From de38a5bb9559a4acb14317a9038ce8267c432c03 Mon Sep 17 00:00:00 2001 From: "Hughes, J.D" Date: Thu, 11 Jun 2020 11:12:28 -0400 Subject: [PATCH] Introduce end-of-line normalization (#913) --- .gitattributes | 18 + autotest/runtests.bat | 8 +- autotest/t505_test.py | 3576 ++++++------- docs/mf6_dev_guide.md | 96 +- examples/Testing/flopy3_Oahu_02_02b.py | 370 +- examples/scripts/flopy_swi2_ex1.py | 414 +- flopy/__init__.py | 76 +- flopy/datbase.py | 164 +- flopy/discretization/__init__.py | 4 +- flopy/discretization/grid.py | 1144 ++-- flopy/discretization/modeltime.py | 88 +- flopy/discretization/structuredgrid.py | 2574 ++++----- flopy/discretization/unstructuredgrid.py | 582 +- flopy/discretization/vertexgrid.py | 720 +-- flopy/export/__init__.py | 14 +- flopy/export/shapefile_utils.py | 1848 +++---- flopy/export/utils.py | 3288 ++++++------ flopy/mbase.py | 3444 ++++++------ flopy/mf6/data/dfn/utl-tas.dfn | 230 +- flopy/mf6/data/dfn/utl-ts.dfn | 346 +- flopy/mf6/data/mfdata.py | 994 ++-- flopy/mf6/data/mfdataarray.py | 2520 ++++----- flopy/mf6/data/mfdatalist.py | 2514 ++++----- flopy/mf6/data/mfdatascalar.py | 1374 ++--- flopy/mf6/data/mfdatastorage.py | 4066 +++++++------- flopy/mf6/data/mfdatautil.py | 1426 ++--- flopy/mf6/data/mffileaccess.py | 3340 ++++++------ flopy/mf6/data/mfstructure.py | 4470 ++++++++-------- flopy/mf6/mfbase.py | 1338 ++--- flopy/mf6/mfmodel.py | 2356 ++++---- flopy/mf6/mfpackage.py | 3954 +++++++------- flopy/mf6/modflow/mfgnc.py | 286 +- flopy/mf6/modflow/mfgwfchd.py | 392 +- flopy/mf6/modflow/mfgwfcsub.py | 1158 ++-- flopy/mf6/modflow/mfgwfdisu.py | 594 +-- flopy/mf6/modflow/mfgwfdisv.py | 388 +- flopy/mf6/modflow/mfgwfdrn.py | 430 +- flopy/mf6/modflow/mfgwfevt.py | 508 +- flopy/mf6/modflow/mfgwfghb.py | 434 +- flopy/mf6/modflow/mfgwfgnc.py | 288 +- flopy/mf6/modflow/mfgwfgwf.py | 560 +- flopy/mf6/modflow/mfgwfhfb.py | 206 +- flopy/mf6/modflow/mfgwflak.py | 1394 ++--- flopy/mf6/modflow/mfgwfmaw.py | 1122 ++-- flopy/mf6/modflow/mfgwfmvr.py | 372 +- flopy/mf6/modflow/mfgwfnpf.py | 600 +-- flopy/mf6/modflow/mfgwfrch.py | 414 +- flopy/mf6/modflow/mfgwfriv.py | 442 +- flopy/mf6/modflow/mfgwfsfr.py | 1218 ++--- flopy/mf6/modflow/mfgwfuzf.py | 860 +-- flopy/mf6/modflow/mfgwfwel.py | 446 +- flopy/mf6/modflow/mfmvr.py | 368 +- flopy/mf6/modflow/mfutlobs.py | 274 +- flopy/mf6/utils/binaryfile_utils.py | 764 +-- flopy/mf6/utils/createpackages.py | 1262 ++--- flopy/mf6/utils/mfenums.py | 24 +- flopy/mf6/utils/mfobservation.py | 960 ++-- flopy/mf6/utils/reference.py | 1736 +++--- flopy/mf6/utils/testutils.py | 426 +- flopy/modflow/__init__.py | 104 +- flopy/modflow/mf.py | 1754 +++--- flopy/modflow/mfaddoutsidefile.py | 38 +- flopy/modflow/mfbas.py | 726 +-- flopy/modflow/mfbcf.py | 898 ++-- flopy/modflow/mfchd.py | 494 +- flopy/modflow/mfde4.py | 610 +-- flopy/modflow/mfdis.py | 1982 +++---- flopy/modflow/mfdisu.py | 2060 +++---- flopy/modflow/mfdrn.py | 590 +- flopy/modflow/mfdrt.py | 564 +- flopy/modflow/mfevt.py | 760 +-- flopy/modflow/mffhb.py | 1406 ++--- flopy/modflow/mfflwob.py | 1160 ++-- flopy/modflow/mfgage.py | 754 +-- flopy/modflow/mfghb.py | 566 +- flopy/modflow/mfgmg.py | 786 +-- flopy/modflow/mfhfb.py | 784 +-- flopy/modflow/mfhob.py | 1356 ++--- flopy/modflow/mfhyd.py | 702 +-- flopy/modflow/mflak.py | 1676 +++--- flopy/modflow/mflmt.py | 508 +- flopy/modflow/mflpf.py | 1262 ++--- flopy/modflow/mfmlt.py | 536 +- flopy/modflow/mfmnw1.py | 984 ++-- flopy/modflow/mfmnw2.py | 3876 +++++++------- flopy/modflow/mfmnwi.py | 688 +-- flopy/modflow/mfnwt.py | 980 ++-- flopy/modflow/mfoc.py | 2132 ++++---- flopy/modflow/mfpar.py | 680 +-- flopy/modflow/mfparbc.py | 538 +- flopy/modflow/mfpbc.py | 218 +- flopy/modflow/mfpcg.py | 670 +-- flopy/modflow/mfpcgn.py | 1052 ++-- flopy/modflow/mfpks.py | 530 +- flopy/modflow/mfpval.py | 432 +- flopy/modflow/mfrch.py | 950 ++-- flopy/modflow/mfriv.py | 702 +-- flopy/modflow/mfsfr2.py | 6220 +++++++++++----------- flopy/modflow/mfsip.py | 510 +- flopy/modflow/mfsms.py | 1132 ++-- flopy/modflow/mfsor.py | 408 +- flopy/modflow/mfstr.py | 1778 +++---- flopy/modflow/mfsub.py | 1512 +++--- flopy/modflow/mfswi2.py | 1432 ++--- flopy/modflow/mfswr1.py | 374 +- flopy/modflow/mfswt.py | 1510 +++--- flopy/modflow/mfupw.py | 1062 ++-- flopy/modflow/mfuzf1.py | 2014 +++---- flopy/modflow/mfwel.py | 772 +-- flopy/modflow/mfzon.py | 440 +- flopy/modflowlgr/__init__.py | 4 +- flopy/modflowlgr/mflgr.py | 1206 ++--- flopy/modpath/__init__.py | 22 +- flopy/modpath/mp.py | 778 +-- flopy/modpath/mp7.py | 926 ++-- flopy/modpath/mp7bas.py | 278 +- flopy/modpath/mp7sim.py | 1260 ++--- flopy/modpath/mpbas.py | 302 +- flopy/modpath/mpsim.py | 846 +-- flopy/mt3d/__init__.py | 22 +- flopy/mt3d/mt.py | 1768 +++--- flopy/mt3d/mtadv.py | 816 +-- flopy/mt3d/mtbtn.py | 1982 +++---- flopy/mt3d/mtdsp.py | 732 +-- flopy/mt3d/mtgcg.py | 472 +- flopy/mt3d/mtphc.py | 220 +- flopy/mt3d/mtrct.py | 1224 ++--- flopy/mt3d/mtssm.py | 1472 ++--- flopy/mt3d/mttob.py | 224 +- flopy/plot/__init__.py | 52 +- flopy/plot/crosssection.py | 1766 +++--- flopy/plot/map.py | 3220 +++++------ flopy/plot/plotbase.py | 1932 +++---- flopy/plot/plotutil.py | 5590 +++++++++---------- flopy/plot/vcrosssection.py | 1478 ++--- flopy/seawat/__init__.py | 8 +- flopy/seawat/swt.py | 918 ++-- flopy/seawat/swtvdf.py | 1014 ++-- flopy/utils/__init__.py | 96 +- flopy/utils/binaryfile.py | 3690 ++++++------- flopy/utils/datafile.py | 1116 ++-- flopy/utils/datautil.py | 1406 ++--- flopy/utils/flopy_io.py | 972 ++-- flopy/utils/formattedfile.py | 780 +-- flopy/utils/gridintersect.py | 2954 +++++----- flopy/utils/mfreadnam.py | 560 +- flopy/utils/observationfile.py | 1076 ++-- flopy/utils/optionblock.py | 980 ++-- flopy/utils/rasters.py | 1704 +++--- flopy/utils/reference.py | 4430 +++++++-------- flopy/utils/swroutputfile.py | 1618 +++--- flopy/utils/util_array.py | 5730 ++++++++++---------- flopy/utils/util_list.py | 2362 ++++---- release/make-release.py | 890 ++-- setup.py | 78 +- 155 files changed, 90973 insertions(+), 90955 deletions(-) create mode 100644 .gitattributes diff --git a/.gitattributes b/.gitattributes new file mode 100644 index 0000000000..7ee4b11f38 --- /dev/null +++ b/.gitattributes @@ -0,0 +1,18 @@ +############################################################################### +# Set default behavior to automatically normalize line endings. +############################################################################### +* text=auto + +############################################################################### +# Set the merge driver for windows files +# +*.bat text eol=crlf + +# Denote all files that are truly binary and should not be modified. +*.png binary +*.jpg binary +*.pdf binary + +# Do not modify the model data in various directories +examples/data/** binary +examples/groundwater_paper/uspb/** binary diff --git a/autotest/runtests.bat b/autotest/runtests.bat index 26b77ff99c..4df8a25fdc 100644 --- a/autotest/runtests.bat +++ b/autotest/runtests.bat @@ -1,4 +1,4 @@ -rmdir /S /Q __pycache__ -nosetests -v -rmdir /S /Q __pycache__ -pause +rmdir /S /Q __pycache__ +nosetests -v +rmdir /S /Q __pycache__ +pause diff --git a/autotest/t505_test.py b/autotest/t505_test.py index 3e179fb03e..e3114e2755 100644 --- a/autotest/t505_test.py +++ b/autotest/t505_test.py @@ -1,1788 +1,1788 @@ -import os - -import numpy as np - -import flopy -import flopy.utils.binaryfile as bf -from flopy.mf6.data.mfdatastorage import DataStorageType -from flopy.utils.datautil import PyListUtil -from flopy.mf6.mfbase import FlopyException -from flopy.mf6.modflow.mfgwf import ModflowGwf -from flopy.mf6.modflow.mfgwfchd import ModflowGwfchd -from flopy.mf6.modflow.mfgwfdis import ModflowGwfdis -from flopy.mf6.modflow.mfgwfdisv import ModflowGwfdisv -from flopy.mf6.modflow.mfgwfdrn import ModflowGwfdrn -from flopy.mf6.modflow.mfgwfevt import ModflowGwfevt -from flopy.mf6.modflow.mfgwfevta import ModflowGwfevta -from flopy.mf6.modflow.mfgwfghb import ModflowGwfghb -from flopy.mf6.modflow.mfgwfgnc import ModflowGwfgnc -from flopy.mf6.modflow.mfgwfgwf import ModflowGwfgwf -from flopy.mf6.modflow.mfgwfhfb import ModflowGwfhfb -from flopy.mf6.modflow.mfgwfic import ModflowGwfic -from flopy.mf6.modflow.mfgwfnpf import ModflowGwfnpf -from flopy.mf6.modflow.mfgwfoc import ModflowGwfoc -from flopy.mf6.modflow.mfgwfrch import ModflowGwfrch -from flopy.mf6.modflow.mfgwfrcha import ModflowGwfrcha -from flopy.mf6.modflow.mfgwfriv import ModflowGwfriv -from flopy.mf6.modflow.mfgwfsfr import ModflowGwfsfr -from flopy.mf6.modflow.mfgwfsto import ModflowGwfsto -from flopy.mf6.modflow.mfgwfwel import ModflowGwfwel -from flopy.mf6.modflow.mfims import ModflowIms -from flopy.mf6.modflow.mfsimulation import MFSimulation -from flopy.mf6.modflow.mftdis import ModflowTdis -from flopy.mf6.modflow.mfutlobs import ModflowUtlobs -from flopy.mf6.modflow.mfutlts import ModflowUtlts -from flopy.mf6.utils import testutils -from flopy.mf6.mfbase import MFDataException - - -try: - import pymake -except: - print('could not import pymake') - -exe_name = 'mf6' -v = flopy.which(exe_name) - -run = True -if v is None: - run = False - -cpth = os.path.join('temp', 't505') -# make the directory if it does not exist -if not os.path.isdir(cpth): - os.makedirs(cpth) - - -def np001(): - # init paths - test_ex_name = 'np001' - model_name = 'np001_mod' - - pth = os.path.join('..', 'examples', 'data', 'mf6', 'create_tests', - test_ex_name) - run_folder = os.path.join(cpth, test_ex_name) - if not os.path.isdir(run_folder): - os.makedirs(run_folder) - - expected_output_folder = os.path.join(pth, 'expected_output') - expected_head_file = os.path.join(expected_output_folder, 'np001_mod.hds') - expected_cbc_file = os.path.join(expected_output_folder, 'np001_mod.cbc') - - # model tests - test_sim = MFSimulation(sim_name=test_ex_name, version='mf6', - exe_name=exe_name, sim_ws=run_folder, - continue_=True, memory_print_option='summary') - name = test_sim.name_file - assert name.continue_.get_data() - assert name.nocheck.get_data() is None - assert name.memory_print_option.get_data() == 'summary' - - kwargs = {} - kwargs['bad_kwarg'] = 20 - try: - ex = False - bad_model = ModflowGwf(test_sim, modelname=model_name, - model_nam_file='{}.nam'.format(model_name), - **kwargs) - except FlopyException: - ex = True - assert (ex == True) - - kwargs = {} - kwargs['xul'] = 20.5 - good_model = ModflowGwf(test_sim, modelname=model_name, - model_nam_file='{}.nam'.format(model_name), - model_rel_path='model_folder', - **kwargs) - - # create simulation - sim = MFSimulation(sim_name=test_ex_name, version='mf6', exe_name=exe_name, - sim_ws=pth) - tdis_rc = [(6.0, 2, 1.0), (6.0, 3, 1.0)] - tdis_package = ModflowTdis(sim, time_units='DAYS', nper=1, - perioddata=[(2.0, 1, 1.0)]) - # specifying the tdis package twice should remove the old tdis package - tdis_package = ModflowTdis(sim, time_units='DAYS', nper=2, - perioddata=tdis_rc) - # first ims file to be replaced - ims_package = ModflowIms(sim, pname='my_ims_file', filename='old_name.ims', - print_option='ALL', complexity='SIMPLE', - outer_hclose=0.00001, - outer_maximum=10, under_relaxation='NONE', - inner_maximum=10, - inner_hclose=0.001, linear_acceleration='CG', - preconditioner_levels=2, - preconditioner_drop_tolerance=0.00001, - number_orthogonalizations=5) - # replace with real ims file - ims_package = ModflowIms(sim, pname='my_ims_file', - filename='{}.ims'.format(test_ex_name), - print_option='ALL', complexity='SIMPLE', - outer_hclose=0.00001, - outer_maximum=50, under_relaxation='NONE', - inner_maximum=30, - inner_hclose=0.00001, linear_acceleration='CG', - preconditioner_levels=7, - preconditioner_drop_tolerance=0.01, - number_orthogonalizations=2) - - model = ModflowGwf(sim, modelname=model_name, - model_nam_file='{}.nam'.format(model_name)) - # test case insensitive lookup - assert(sim.get_model(model_name.upper()) is not None) - - # test getting model using attribute - model = sim.np001_mod - assert(model is not None and model.name == 'np001_mod') - tdis = sim.tdis - assert(tdis is not None and tdis.package_type == 'tdis') - - dis_package = flopy.mf6.ModflowGwfdis(model, length_units='FEET', nlay=1, - nrow=1, ncol=1, delr=100.0, - delc=100.0, - top=60.0, botm=50.0, - filename='{}.dis'.format(model_name), - pname='mydispkg') - # specifying dis package twice with the same name should automatically - # remove the old dis package - top = {'filename': 'top.bin', 'data': 100.0, 'binary': True} - botm = {'filename': 'botm.bin', 'data': 50.0, 'binary': True} - dis_package = flopy.mf6.ModflowGwfdis(model, length_units='FEET', nlay=1, - nrow=1, ncol=10, delr=500.0, - delc=500.0, - top=top, botm=botm, - filename='{}.dis'.format(model_name), - pname='mydispkg') - top_data = dis_package.top.get_data() - assert top_data[0,0] == 100.0 - ic_package = flopy.mf6.ModflowGwfic(model, strt='initial_heads.txt', - filename='{}.ic'.format(model_name)) - npf_package = ModflowGwfnpf(model, pname='npf_1', save_flows=True, - alternative_cell_averaging='logarithmic', - icelltype=1, k=5.0) - - # remove package test using .remove_package(name) - assert (model.get_package(npf_package.package_name) is not None) - model.remove_package(npf_package.package_name) - assert (model.get_package(npf_package.package_name) is None) - # remove package test using .remove() - npf_package = ModflowGwfnpf(model, pname='npf_1', save_flows=True, - alternative_cell_averaging='logarithmic', - icelltype=1, k=5.0) - npf_package.remove() - assert (model.get_package(npf_package.package_name) is None) - - npf_package = ModflowGwfnpf(model, save_flows=True, - alternative_cell_averaging='logarithmic', - icelltype=1, k=5.0) - - oc_package = ModflowGwfoc(model, budget_filerecord=[('np001_mod.cbc',)], - head_filerecord=[('np001_mod.hds',)], - saverecord={0: [('HEAD', 'ALL'), - ('BUDGET', 'ALL')], - 1: [('HEAD', 'ALL'), - ('BUDGET', 'ALL')]}, - printrecord=[('HEAD', 'ALL'), ('BUDGET', 'ALL')]) - oc_package.printrecord.add_transient_key(1) - oc_package.printrecord.set_data([('HEAD', 'ALL'), ('BUDGET', 'ALL')], 1) - - sto_package = ModflowGwfsto(model, save_flows=True, iconvert=1, - ss=0.000001, sy=0.15) - - # test saving a binary file with list data - well_spd = {0: {'filename': 'wel0.bin', 'binary': True, - 'data': [((0, 0, 4), -2000.0), ((0, 0, 7), -2.0)]}, - 1: None} - wel_package = ModflowGwfwel(model, print_input=True, print_flows=True, - save_flows=True, maxbound=2, - stress_period_data=well_spd) - wel_package.stress_period_data.add_transient_key(1) - wel_package.stress_period_data.set_data( - {1: {'filename': 'wel.txt', 'factor': 1.0}}) - - # test getting data from a binary file - well_data = wel_package.stress_period_data.get_data(0) - assert well_data[0][0] == (0, 0, 4) - assert well_data[0][1] == -2000.0 - - drn_package = ModflowGwfdrn(model, print_input=True, print_flows=True, - save_flows=True, maxbound=1, - timeseries=[(0.0, 60.0), (100000.0, 60.0)], - stress_period_data=[((0, 0, 0), 80, 'drn_1')]) - drn_package.ts.time_series_namerecord = 'drn_1' - drn_package.ts.interpolation_methodrecord = 'linearend' - - riv_spd = {0: {'filename': 'riv.txt', 'data':[((0, 0, 9), 110, 90.0, - 100.0, 1.0, 2.0, 3.0)]}} - riv_package = ModflowGwfriv(model, print_input=True, print_flows=True, - save_flows=True, maxbound=1, - auxiliary=['var1', 'var2', 'var3'], - stress_period_data=riv_spd) - riv_data = riv_package.stress_period_data.get_data(0) - assert riv_data[0][0] == (0, 0, 9) - assert riv_data[0][1] == 110 - assert riv_data[0][2] == 90.0 - assert riv_data[0][3] == 100.0 - assert riv_data[0][4] == 1.0 - assert riv_data[0][5] == 2.0 - assert riv_data[0][6] == 3.0 - - # verify package look-up - pkgs = model.get_package() - assert (len(pkgs) == 9) - pkg = model.get_package('oc') - assert isinstance(pkg, ModflowGwfoc) - pkg = sim.get_package('tdis') - assert isinstance(pkg, ModflowTdis) - pkg = model.get_package('mydispkg') - assert isinstance(pkg, - flopy.mf6.ModflowGwfdis) and \ - pkg.package_name == 'mydispkg' - pkg = model.mydispkg - assert isinstance(pkg, - flopy.mf6.ModflowGwfdis) and \ - pkg.package_name == 'mydispkg' - - - # verify external file contents - array_util = PyListUtil() - ic_data = ic_package.strt - ic_array = ic_data.get_data() - assert array_util.array_comp(ic_array, [[[100.0, 100.0, 100.0, 100.0, - 100.0, 100.0, 100.0, 100.0, - 100.0, 100.0]]]) - - # make folder to save simulation - sim.simulation_data.mfpath.set_sim_path(run_folder) - - # write simulation to new location - sim.set_all_data_external() - sim.write_simulation() - - # run simulation - if run: - sim.run_simulation() - - # get expected results - budget_file = os.path.join(os.getcwd(), expected_cbc_file) - budget_obj = bf.CellBudgetFile(budget_file, precision='double') - budget_frf_valid = np.array( - budget_obj.get_data(text='FLOW-JA-FACE', full3D=True)) - - # compare output to expected results - head_file = os.path.join(os.getcwd(), expected_head_file) - head_new = os.path.join(run_folder, 'np001_mod.hds') - outfile = os.path.join(run_folder, 'head_compare.dat') - assert pymake.compare_heads(None, None, files1=head_file, files2=head_new, - outfile=outfile) - - budget_frf = sim.simulation_data.mfdata[ - (model_name, 'CBC', 'FLOW-JA-FACE')] - assert array_util.array_comp(budget_frf_valid, budget_frf) - - # clean up - sim.delete_output_files() - - try: - error_occurred = False - well_spd = {0: {'filename': 'wel0.bin', 'binary': True, - 'data': [((0, 0, 4), -2000.0), ((0, 0, 7), -2.0)]}} - wel_package = ModflowGwfwel(model, boundnames=True, - print_input=True, print_flows=True, - save_flows=True, maxbound=2, - stress_period_data=well_spd) - except MFDataException: - error_occurred = True - assert error_occurred - - # test error checking - drn_package = ModflowGwfdrn(model, print_input=True, print_flows=True, - save_flows=True, maxbound=1, - timeseries=[(0.0, 60.0), (100000.0, 60.0)], - stress_period_data=[((100, 0, 0), np.nan, - 'drn_1'), ((0, 0, 0), - 10.0, 'drn_2')]) - npf_package = ModflowGwfnpf(model, save_flows=True, - alternative_cell_averaging='logarithmic', - icelltype=1, k=100001.0, k33=1e-12) - chk = sim.check() - summary = '.'.join(chk[0].summary_array.desc) - assert 'drn_1 package: invalid BC index' in summary - assert 'npf package: vertical hydraulic conductivity values below ' \ - 'checker threshold of 1e-11' in summary - assert 'npf package: horizontal hydraulic conductivity values above ' \ - 'checker threshold of 100000.0' in summary - data_invalid = False - try: - drn_package = ModflowGwfdrn(model, print_input=True, print_flows=True, - save_flows=True, maxbound=1, - timeseries=[(0.0, 60.0), (100000.0, 60.0)], - stress_period_data=[((0, 0, 0), 10.0)]) - except MFDataException: - data_invalid = True - assert data_invalid - - return - - -def np002(): - # init paths - test_ex_name = 'np002' - model_name = 'np002_mod' - - pth = os.path.join('..', 'examples', 'data', 'mf6', 'create_tests', - test_ex_name) - pth_for_mf = os.path.join('..', '..', '..', pth) - run_folder = os.path.join(cpth, test_ex_name) - if not os.path.isdir(run_folder): - os.makedirs(run_folder) - - expected_output_folder = os.path.join(pth, 'expected_output') - expected_head_file = os.path.join(expected_output_folder, 'np002_mod.hds') - expected_cbc_file = os.path.join(expected_output_folder, 'np002_mod.cbc') - - # create simulation - sim = MFSimulation(sim_name=test_ex_name, version='mf6', exe_name=exe_name, - sim_ws=run_folder, nocheck=True) - name = sim.name_file - assert name.continue_.get_data() == None - assert name.nocheck.get_data() == True - assert name.memory_print_option.get_data() == None - - tdis_rc = [(6.0, 2, 1.0), (6.0, 3, 1.0)] - tdis_package = ModflowTdis(sim, time_units='DAYS', nper=2, - perioddata=tdis_rc) - model = ModflowGwf(sim, modelname=model_name, - model_nam_file='{}.nam'.format(model_name)) - ims_package = ModflowIms(sim, print_option='ALL', complexity='SIMPLE', - outer_hclose=0.00001, - outer_maximum=50, under_relaxation='NONE', - inner_maximum=30, - inner_hclose=0.00001, linear_acceleration='CG', - preconditioner_levels=7, - preconditioner_drop_tolerance=0.01, - number_orthogonalizations=2) - sim.register_ims_package(ims_package, [model.name]) - - # get rid of top_data.txt so that a later test does not automatically pass - top_data_file = os.path.join(run_folder, 'top_data.txt') - if os.path.isfile(top_data_file): - os.remove(top_data_file) - # test loading data to be stored in a file and loading data from a file - # using the "dictionary" input format - top = {'filename': 'top_data.txt', 'factor': 1.0, - 'data': [100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, - 100.0, 100.0]} - botm_file = os.path.join(pth_for_mf, 'botm.txt') - botm = {'filename': botm_file, 'factor': 1.0} - dis_package = ModflowGwfdis(model, length_units='FEET', nlay=1, nrow=1, - ncol=10, delr=500.0, delc=500.0, - top=top, botm=botm, - filename='{}.dis'.format(model_name)) - ic_vals = [100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, - 100.0] - ic_package = ModflowGwfic(model, strt=ic_vals, - filename='{}.ic'.format(model_name)) - ic_package.strt.store_as_external_file('initial_heads.txt') - npf_package = ModflowGwfnpf(model, save_flows=True, icelltype=1, k=100.0) - npf_package.k.store_as_external_file('k.bin', binary=True) - oc_package = ModflowGwfoc(model, budget_filerecord=[('np002_mod.cbc',)], - head_filerecord=[('np002_mod.hds',)], - saverecord=[('HEAD', 'ALL'), ('BUDGET', 'ALL')], - printrecord=[('HEAD', 'ALL'), ('BUDGET', 'ALL')]) - oc_package.saverecord.add_transient_key(1) - oc_package.saverecord.set_data([('HEAD', 'ALL'), ('BUDGET', 'ALL')], 1) - oc_package.printrecord.add_transient_key(1) - oc_package.printrecord.set_data([('HEAD', 'ALL'), ('BUDGET', 'ALL')], 1) - - sto_package = ModflowGwfsto(model, save_flows=True, iconvert=1, - ss=0.000001, sy=0.15) - - hfb_package = ModflowGwfhfb(model, print_input=True, maxhfb=1, - stress_period_data=[((0, 0, 3), (0, 0, 4), - 0.00001)]) - chd_package = ModflowGwfchd(model, print_input=True, print_flows=True, - maxbound=1, stress_period_data=[((0, 0, 0), - 65.0)]) - ghb_package = ModflowGwfghb(model, print_input=True, print_flows=True, - maxbound=1, stress_period_data=[((0, 0, 9), - 125.0, 60.0)]) - rch_package = ModflowGwfrch(model, print_input=True, print_flows=True, - maxbound=2, - stress_period_data=[((0, 0, 3), 0.02), - ((0, 0, 6), 0.1)]) - - # write simulation to new location - sim.write_simulation() - - assert(os.path.isfile(top_data_file)) - - if run: - # run simulation - sim.run_simulation() - - sim2 = MFSimulation.load(sim_ws=run_folder) - model_ = sim2.get_model(model_name) - npf_package = model_.get_package('npf') - k = npf_package.k.array - - # get expected results - budget_file = os.path.join(os.getcwd(), expected_cbc_file) - budget_obj = bf.CellBudgetFile(budget_file, precision='double') - budget_frf_valid = np.array( - budget_obj.get_data(text='FLOW JA FACE ', full3D=True)) - - # compare output to expected results - head_file = os.path.join(os.getcwd(), expected_head_file) - head_new = os.path.join(run_folder, 'np002_mod.hds') - outfile = os.path.join(run_folder, 'head_compare.dat') - assert pymake.compare_heads(None, None, files1=head_file, - files2=head_new, outfile=outfile) - - array_util = PyListUtil() - budget_frf = sim.simulation_data.mfdata[ - (model_name, 'CBC', 'FLOW-JA-FACE')] - assert array_util.array_comp(budget_frf_valid, budget_frf) - - # verify external text file was written correctly - ext_file_path = os.path.join(run_folder, 'initial_heads.txt') - fd = open(ext_file_path, 'r') - line = fd.readline() - line_array = line.split() - assert len(ic_vals) == len(line_array) - for index in range(0, len(ic_vals)): - assert ic_vals[index] == float(line_array[index]) - fd.close() - - # clean up - sim.delete_output_files() - - # test error checking - sto_package = ModflowGwfsto(model, save_flows=True, iconvert=1, - ss=0.00000001, sy=0.6) - chd_package = ModflowGwfchd(model, print_input=True, print_flows=True, - maxbound=1, stress_period_data=[((0, 0, 0), - np.nan)]) - chk = sim.check() - summary = '.'.join(chk[0].summary_array.desc) - assert 'sto package: specific storage values below ' \ - 'checker threshold of 1e-06' in summary - assert 'sto package: specific yield values above ' \ - 'checker threshold of 0.5' in summary - assert 'Not a number' in summary - - return - - -def test021_twri(): - # init paths - test_ex_name = 'test021_twri' - model_name = 'twri' - - pth = os.path.join('..', 'examples', 'data', 'mf6', 'create_tests', - test_ex_name) - run_folder = os.path.join(cpth, test_ex_name) - if not os.path.isdir(run_folder): - os.makedirs(run_folder) - - expected_output_folder = os.path.join(pth, 'expected_output') - expected_head_file = os.path.join(expected_output_folder, 'twri.hds') - - # create simulation - sim = MFSimulation(sim_name=test_ex_name, version='mf6', exe_name=exe_name, - sim_ws=pth) - tdis_rc = [(86400.0, 1, 1.0)] - tdis_package = ModflowTdis(sim, time_units='SECONDS', nper=1, - perioddata=tdis_rc) - model = ModflowGwf(sim, modelname=model_name, - model_nam_file='{}.nam'.format(model_name)) - ims_package = ModflowIms(sim, print_option='SUMMARY', outer_hclose=0.0001, - outer_maximum=500, under_relaxation='NONE', - inner_maximum=100, - inner_hclose=0.0001, rcloserecord=0.001, - linear_acceleration='CG', - scaling_method='NONE', reordering_method='NONE', - relaxation_factor=0.97) - sim.register_ims_package(ims_package, [model.name]) - dis_package = flopy.mf6.ModflowGwfdis(model, nlay=3, nrow=15, ncol=15, - delr=5000.0, delc=5000.0, - top=200.0, botm=[-200, -300, -450], - filename='{}.dis'.format(model_name)) - strt = [{'filename': 'strt.txt', 'factor': 1.0, 'data': 0.0}, - {'filename': 'strt2.bin', 'factor': 1.0, 'data': 1.0, - 'binary': 'True'}, 2.0] - ic_package = ModflowGwfic(model, strt=strt, - filename='{}.ic'.format(model_name)) - npf_package = ModflowGwfnpf(model, save_flows=True, perched=True, - cvoptions='dewatered', - icelltype=[1, 0, 0], k=[0.001, 0.0001, 0.0002], - k33=0.00000002) - oc_package = ModflowGwfoc(model, budget_filerecord='twri.cbc', - head_filerecord='twri.hds', - saverecord=[('HEAD', 'ALL'), ('BUDGET', 'ALL')], - printrecord=[('HEAD', 'ALL')]) - - # build stress_period_data for chd package - stress_period_data = [] - for layer in range(0, 2): - for row in range(0, 15): - stress_period_data.append(((layer, row, 0), 0.0)) - chd_package = ModflowGwfchd(model, print_input=True, print_flows=True, - save_flows=True, maxbound=100, - stress_period_data=stress_period_data) - - # build stress_period_data for drn package - conc = np.ones((15, 15), dtype=np.float) * 35. - auxdata = {0: [6, conc]} - - stress_period_data = [] - drn_heads = [0.0, 0.0, 10.0, 20.0, 30.0, 50.0, 70.0, 90.0, 100.0] - for col, head in zip(range(1, 10), drn_heads): - stress_period_data.append(((0, 7, col), head, 1.0, - 'name_{}'.format(col))) - drn_package = ModflowGwfdrn(model, print_input=True, print_flows=True, - save_flows=True, maxbound=9, boundnames=True, - stress_period_data=stress_period_data) - rch_package = ModflowGwfrcha(model, readasarrays=True, fixed_cell=True, - recharge={0: 0.00000003}, - auxiliary=[('iface', 'conc')], aux=auxdata) - - aux = rch_package.aux.get_data() - - stress_period_data = [] - layers = [2, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0] - rows = [4, 3, 5, 8, 8, 8, 8, 10, 10, 10, 10, 12, 12, 12, 12] - cols = [10, 5, 11, 7, 9, 11, 13, 7, 9, 11, 13, 7, 9, 11, 13] - for layer, row, col in zip(layers, rows, cols): - stress_period_data.append(((layer, row, col), -5.0)) - wel_package = ModflowGwfwel(model, print_input=True, print_flows=True, - save_flows=True, maxbound=15, - stress_period_data=stress_period_data) - - # change folder to save simulation - sim.simulation_data.mfpath.set_sim_path(run_folder) - - # write simulation to new location - sim.write_simulation() - - # run simulation - sim.run_simulation() - - sim2 = MFSimulation.load(sim_ws=run_folder) - model2 = sim2.get_model() - ic2 = model2.get_package('ic') - strt2 = ic2.strt.get_data() - drn2 = model2.get_package('drn') - drn_spd = drn2.stress_period_data.get_data() - assert(strt2[0,0,0] == 0.0) - assert(strt2[1,0,0] == 1.0) - assert(strt2[2,0,0] == 2.0) - assert(drn_spd[0][1][3] == 'name_2') - - # compare output to expected results - head_file = os.path.join(os.getcwd(), expected_head_file) - head_new = os.path.join(run_folder, 'twri.hds') - outfile = os.path.join(run_folder, 'head_compare.dat') - assert pymake.compare_heads(None, None, files1=head_file, files2=head_new, - outfile=outfile) - - # clean up - sim.delete_output_files() - - return - - -def test005_advgw_tidal(): - # init paths - test_ex_name = 'test005_advgw_tidal' - model_name = 'AdvGW_tidal' - - pth = os.path.join('..', 'examples', 'data', 'mf6', 'create_tests', - test_ex_name) - run_folder = os.path.join(cpth, test_ex_name) - if not os.path.isdir(run_folder): - os.makedirs(run_folder) - - expected_output_folder = os.path.join(pth, 'expected_output') - expected_head_file = os.path.join(expected_output_folder, - 'AdvGW_tidal.hds') - - # create simulation - sim = MFSimulation(sim_name=test_ex_name, version='mf6', exe_name=exe_name, - sim_ws=pth) - # test tdis package deletion - tdis_package = ModflowTdis(sim, time_units='DAYS', nper=1, - perioddata=[(2.0, 2, 1.0)]) - sim.remove_package(tdis_package.package_type) - - tdis_rc = [(1.0, 1, 1.0), (10.0, 120, 1.0), (10.0, 120, 1.0), - (10.0, 120, 1.0)] - tdis_package = ModflowTdis(sim, time_units='DAYS', nper=4, - perioddata=tdis_rc) - model = ModflowGwf(sim, modelname=model_name, - model_nam_file='{}.nam'.format(model_name)) - ims_package = ModflowIms(sim, print_option='SUMMARY', complexity='SIMPLE', - outer_hclose=0.0001, - outer_maximum=500, under_relaxation='NONE', - inner_maximum=100, - inner_hclose=0.0001, rcloserecord=0.001, - linear_acceleration='CG', - scaling_method='NONE', reordering_method='NONE', - relaxation_factor=0.97) - sim.register_ims_package(ims_package, [model.name]) - bot_data = [-100 for x in range(150)] - dis_package = ModflowGwfdis(model, nlay=3, nrow=15, ncol=10, delr=500.0, - delc=500.0, - top=50.0, botm=[5.0, -10.0, {'factor': 1.0, - 'data': bot_data}], - filename='{}.dis'.format(model_name)) - ic_package = ModflowGwfic(model, strt=50.0, - filename='{}.ic'.format(model_name)) - npf_package = ModflowGwfnpf(model, save_flows=True, icelltype=[1, 0, 0], - k=[5.0, 0.1, 4.0], - k33=[0.5, 0.005, 0.1]) - oc_package = ModflowGwfoc(model, budget_filerecord='AdvGW_tidal.cbc', - head_filerecord='AdvGW_tidal.hds', - headprintrecord=[('COLUMNS', 10, 'WIDTH', 15, - 'DIGITS', 6, 'GENERAL')], - saverecord=[('HEAD', 'ALL'), ('BUDGET', 'ALL')], - printrecord=[('HEAD', 'FIRST'), ('HEAD', 'LAST'), - ('BUDGET', 'LAST')]) - # test empty - sy_template = ModflowGwfsto.sy.empty(model, True) - for layer in range(0, 3): - sy_template[layer]['data'] = 0.2 - layer_storage_types = [DataStorageType.internal_array, - DataStorageType.internal_constant, - DataStorageType.internal_array] - ss_template = ModflowGwfsto.ss.empty(model, True, layer_storage_types, - 0.000001) - sto_package = ModflowGwfsto(model, save_flows=True, iconvert=1, - ss=ss_template, sy=sy_template, - steady_state={0: True}, - transient={1: True}) - - # wel, evt, ghb, obs, riv, rch, ts - # well package - # test empty with aux vars, bound names, and time series - period_two = ModflowGwfwel.stress_period_data.empty(model, maxbound=3, - aux_vars=['var1', - 'var2', - 'var3'], - boundnames=True, - timeseries=True) - period_two[0][0] = ((0, 11, 2), -50.0, -1, -2, -3, None) - period_two[0][1] = ((2, 4, 7), 'well_1_rate', 1, 2, 3, 'well_1') - period_two[0][2] = ((2, 3, 2), 'well_2_rate', 4, 5, 6, 'well_2') - period_three = ModflowGwfwel.stress_period_data.empty(model, maxbound=2, - aux_vars=['var1', - 'var2', - 'var3'], - boundnames=True, - timeseries=True) - period_three[0][0] = ((2, 3, 2), 'well_2_rate', 1, 2, 3, 'well_2') - period_three[0][1] = ((2, 4, 7), 'well_1_rate', 4, 5, 6, 'well_1') - period_four = ModflowGwfwel.stress_period_data.empty(model, maxbound=5, - aux_vars=['var1', - 'var2', - 'var3'], - boundnames=True, - timeseries=True) - period_four[0][0] = ((2, 4, 7), 'well_1_rate', 1, 2, 3, 'well_1') - period_four[0][1] = ((2, 3, 2), 'well_2_rate', 4, 5, 6, 'well_2') - period_four[0][2] = ((0, 11, 2), -10.0, 7, 8, 9, None) - period_four[0][3] = ((0, 2, 4), -20.0, 17, 18, 19, None) - period_four[0][4] = ((0, 13, 5), -40.0, 27, 28, 29, None) - stress_period_data = {} - stress_period_data[1] = period_two[0] - stress_period_data[2] = period_three[0] - stress_period_data[3] = period_four[0] - # well ts package - timeseries = [(0.0, 0.0, 0.0, 0.0), - (1.0, -200.0, 0.0, -100.0), - (11.0, -1800.0, -500.0, -200.0), - (21.0, -200.0, -400.0, -300.0), - (31.0, 0.0, -600.0, -400.0)] - ts_dict = {'filename': 'well-rates.ts', 'timeseries': timeseries, - 'time_series_namerecord': [('well_1_rate', 'well_2_rate', - 'well_3_rate')], - 'interpolation_methodrecord': [('stepwise', 'stepwise', - 'stepwise')]} - # test removing package with child packages - wel_package = ModflowGwfwel(model, print_input=True, print_flows=True, - auxiliary=[('var1', 'var2', 'var3')], - maxbound=5, - stress_period_data=stress_period_data, - boundnames=True, save_flows=True, - timeseries=ts_dict) - wel_package.remove() - wel_package = ModflowGwfwel(model, print_input=True, print_flows=True, - auxiliary=[('var1', 'var2', 'var3')], - maxbound=5, - stress_period_data=stress_period_data, - boundnames=True, save_flows=True, - timeseries=ts_dict) - - # test empty - evt_period = ModflowGwfevt.stress_period_data.empty(model, 150, nseg=3) - for col in range(0, 10): - for row in range(0, 15): - evt_period[0][col * 15 + row] = ( - ((0, row, col), 50.0, 0.0004, 10.0, 0.2, 0.5, 0.3, 0.1, None)) - evt_package = ModflowGwfevt(model, print_input=True, print_flows=True, - save_flows=True, maxbound=150, - nseg=3, stress_period_data=evt_period) - - # build ghb - ghb_period = {} - ghb_period_array = [] - for layer, cond in zip(range(1, 3), [15.0, 1500.0]): - for row in range(0, 15): - ghb_period_array.append( - ((layer, row, 9), 'tides', cond, 'Estuary-L2')) - ghb_period[0] = ghb_period_array - - # build ts ghb - ts_recarray = [] - fd = open(os.path.join(pth, 'tides.txt'), 'r') - for line in fd: - line_list = line.strip().split(',') - ts_recarray.append((float(line_list[0]), float(line_list[1]))) - ts_package_dict = {'filename':'tides.ts', - 'timeseries':ts_recarray, - 'time_series_namerecord':'tides', - 'interpolation_methodrecord':'linear'} - - obs_dict = {('ghb_obs.csv', 'binary'): [('ghb-2-6-10', 'GHB', (1, 5, 9)), - ('ghb-3-6-10', 'GHB', (2, 5, 9))], - 'ghb_flows.csv': [('Estuary2', 'GHB', 'Estuary-L2'), - ('Estuary3', 'GHB', 'Estuary-L3')], - 'filename': 'AdvGW_tidal.ghb.obs', 'digits': 10, - 'print_input': True} - - ghb_package = ModflowGwfghb(model, print_input=True, print_flows=True, - save_flows=True, boundnames=True, - timeseries=ts_package_dict, - observations=obs_dict, - maxbound=30, stress_period_data=ghb_period) - - riv_period = {} - riv_period_array = [((0, 2, 0), 'river_stage_1', 1001.0, 35.9, None), - ((0, 3, 1), 'river_stage_1', 1002.0, 35.8, None), - ((0, 4, 2), 'river_stage_1', 1003.0, 35.7, None), - ((0, 4, 3), 'river_stage_1', 1004.0, 35.6, None), - ((0, 5, 4), 'river_stage_1', 1005.0, 35.5, None), - ((0, 5, 5), 'river_stage_1', 1006.0, 35.4, 'riv1_c6'), - ((0, 5, 6), 'river_stage_1', 1007.0, 35.3, 'riv1_c7'), - ((0, 4, 7), 'river_stage_1', 1008.0, 35.2, None), - ((0, 4, 8), 'river_stage_1', 1009.0, 35.1, None), - ((0, 4, 9), 'river_stage_1', 1010.0, 35.0, None), - ((0, 9, 0), 'river_stage_2', 1001.0, 36.9, - 'riv2_upper'), - ((0, 8, 1), 'river_stage_2', 1002.0, 36.8, - 'riv2_upper'), - ((0, 7, 2), 'river_stage_2', 1003.0, 36.7, - 'riv2_upper'), - ((0, 6, 3), 'river_stage_2', 1004.0, 36.6, None), - ((0, 6, 4), 'river_stage_2', 1005.0, 36.5, None), - ((0, 5, 5), 'river_stage_2', 1006.0, 36.4, 'riv2_c6'), - ((0, 5, 6), 'river_stage_2', 1007.0, 36.3, 'riv2_c7'), - ((0, 6, 7), 'river_stage_2', 1008.0, 36.2, None), - ((0, 6, 8), 'river_stage_2', 1009.0, 36.1), - ((0, 6, 9), 'river_stage_2', 1010.0, 36.0)] - - riv_period[0] = riv_period_array - # riv time series - ts_data = [(0.0, 40.0, 41.0), (1.0, 41.0, 41.5), (2.0, 43.0, 42.0), - (3.0, 45.0, 42.8), (4.0, 44.0, 43.0), - (6.0, 43.0, 43.1), (9.0, 42.0, 42.4), (11.0, 41.0, 41.5), - (31.0, 40.0, 41.0)] - ts_dict = {'filename': 'river_stages.ts', 'timeseries': ts_data, - 'time_series_namerecord': [('river_stage_1', 'river_stage_2')], - 'interpolation_methodrecord': [('linear', 'stepwise')]} - # riv obs - obs_dict = {'riv_obs.csv': [('rv1-3-1', 'RIV', (0, 2, 0)), - ('rv1-4-2', 'RIV', (0, 3, 1)), - ('rv1-5-3', 'RIV', (0, 4, 2)), - ('rv1-5-4', 'RIV', (0, 4, 3)), - ('rv1-6-5', 'RIV', (0, 5, 4)), - ('rv1-c6', 'RIV', 'riv1_c6'), - ('rv1-c7', 'RIV', 'riv1_c7'), - ('rv2-upper', 'RIV', 'riv2_upper'), - ('rv-2-7-4', 'RIV', (0, 6, 3)), - ('rv2-8-5', 'RIV', (0, 6, 4)), - ('rv-2-9-6', 'RIV', (0, 5, 5,))], - 'riv_flowsA.csv': [('riv1-3-1', 'RIV', (0, 2, 0)), - ('riv1-4-2', 'RIV', (0, 3, 1)), - ('riv1-5-3', 'RIV', (0, 4, 2))], - 'riv_flowsB.csv': [('riv2-10-1', 'RIV', (0, 9, 0)), - ('riv-2-9-2', 'RIV', (0, 8, 1)), - ('riv2-8-3', 'RIV', (0, 7, 2))], - 'filename': 'AdvGW_tidal.riv.obs', 'digits': 10, - 'print_input': True} - - riv_package = ModflowGwfriv(model, print_input=True, print_flows=True, - save_flows=True, - boundnames=True, - timeseries=ts_dict, - maxbound=20, stress_period_data=riv_period, - observations=obs_dict) - - rch1_period = {} - rch1_period_array = [] - col_range = {0: 3, 1: 4, 2: 5} - for row in range(0, 15): - if row in col_range: - col_max = col_range[row] - else: - col_max = 6 - for col in range(0, col_max): - if (row == 3 and col == 5) or (row == 2 and col == 4) or ( - row == 1 and col == 3) or (row == 0 and col == 2): - mult = 0.5 - else: - mult = 1.0 - if row == 0 and col == 0: - bnd = 'rch-1-1' - elif row == 0 and col == 1: - bnd = 'rch-1-2' - elif row == 1 and col == 2: - bnd = 'rch-2-3' - else: - bnd = None - rch1_period_array.append(((0, row, col), 'rch_1', mult, bnd)) - rch1_period[0] = rch1_period_array - rch1_package = ModflowGwfrch(model, filename='AdvGW_tidal_1.rch', - pname='rch_1', fixed_cell=True, - auxiliary='MULTIPLIER', - auxmultname='MULTIPLIER', - print_input=True, print_flows=True, - save_flows=True, boundnames=True, - maxbound=84, stress_period_data=rch1_period) - ts_data = [(0.0, 0.0015), (1.0, 0.0010), (11.0, 0.0015), - (21.0, 0.0025), (31.0, 0.0015)] - rch1_package.ts.initialize(timeseries=ts_data, - filename='recharge_rates_1.ts', - time_series_namerecord='rch_1', - interpolation_methodrecord='stepwise') - - rch2_period = {} - rch2_period_array = [((0, 0, 2), 'rch_2', 0.5), ((0, 0, 3), 'rch_2', 1.0), - ((0, 0, 4), 'rch_2', 1.0), - ((0, 0, 5), 'rch_2', 1.0), ((0, 0, 6), 'rch_2', 1.0), - ((0, 0, 7), 'rch_2', 1.0), - ((0, 0, 8), 'rch_2', 1.0), ((0, 0, 9), 'rch_2', 0.5), - ((0, 1, 3), 'rch_2', 0.5), - ((0, 1, 4), 'rch_2', 1.0), ((0, 1, 5), 'rch_2', 1.0), - ((0, 1, 6), 'rch_2', 1.0), - ((0, 1, 7), 'rch_2', 1.0), ((0, 1, 8), 'rch_2', 0.5), - ((0, 2, 4), 'rch_2', 0.5), - ((0, 2, 5), 'rch_2', 1.0), ((0, 2, 6), 'rch_2', 1.0), - ((0, 2, 7), 'rch_2', 0.5), - ((0, 3, 5), 'rch_2', 0.5), ((0, 3, 6), 'rch_2', 0.5)] - rch2_period[0] = rch2_period_array - rch2_package = ModflowGwfrch(model, filename='AdvGW_tidal_2.rch', - pname='rch_2', fixed_cell=True, - auxiliary='MULTIPLIER', - auxmultname='MULTIPLIER', - print_input=True, print_flows=True, - save_flows=True, - maxbound=20, stress_period_data=rch2_period) - ts_data = [(0.0, 0.0016), (1.0, 0.0018), (11.0, 0.0019), - (21.0, 0.0016), (31.0, 0.0018)] - rch2_package.ts.initialize(timeseries=ts_data, - filename='recharge_rates_2.ts', - time_series_namerecord='rch_2', - interpolation_methodrecord='linear') - - rch3_period = {} - rch3_period_array = [] - col_range = {0: 9, 1: 8, 2: 7} - for row in range(0, 15): - if row in col_range: - col_min = col_range[row] - else: - col_min = 6 - for col in range(col_min, 10): - if (row == 0 and col == 9) or (row == 1 and col == 8) or ( - row == 2 and col == 7) or (row == 3 and col == 6): - mult = 0.5 - else: - mult = 1.0 - rch3_period_array.append(((0, row, col), 'rch_3', mult)) - rch3_period[0] = rch3_period_array - rch3_package = ModflowGwfrch(model, filename='AdvGW_tidal_3.rch', - pname='rch_3', fixed_cell=True, - auxiliary='MULTIPLIER', - auxmultname='MULTIPLIER', - print_input=True, print_flows=True, - save_flows=True, - maxbound=54, - stress_period_data=rch3_period) - ts_data = [(0.0, 0.0017), (1.0, 0.0020), (11.0, 0.0017), - (21.0, 0.0018), (31.0, 0.0020)] - rch3_package.ts.initialize(timeseries=ts_data, - filename='recharge_rates_3.ts', - time_series_namerecord='rch_3', - interpolation_methodrecord='linear') - - # change folder to save simulation - sim.simulation_data.mfpath.set_sim_path(run_folder) - - # write simulation to new location - sim.set_all_data_external() - sim.write_simulation() - - # run simulation - sim.run_simulation() - - # compare output to expected results - head_file = os.path.join(os.getcwd(), expected_head_file) - head_new = os.path.join(run_folder, 'AdvGW_tidal.hds') - outfile = os.path.join(run_folder, 'head_compare.dat') - assert pymake.compare_heads(None, None, files1=head_file, files2=head_new, - outfile=outfile) - - # test rename all - model.rename_all_packages('new_name') - assert model.name_file.filename == 'new_name.nam' - package_type_dict = {} - for package in model.packagelist: - if not package.package_type in package_type_dict: - assert package.filename == 'new_name.{}'.format(package.package_type) - package_type_dict[package.package_type] = 1 - sim.write_simulation() - name_file = os.path.join(run_folder, 'new_name.nam') - assert os.path.exists(name_file) - dis_file = os.path.join(run_folder, 'new_name.dis') - assert os.path.exists(dis_file) - - sim.rename_all_packages('all_files_same_name') - package_type_dict = {} - for package in model.packagelist: - if not package.package_type in package_type_dict: - assert package.filename == \ - 'all_files_same_name.{}'.format(package.package_type) - package_type_dict[package.package_type] = 1 - assert sim._tdis_file.filename == 'all_files_same_name.tdis' - for ims_file in sim._ims_files.values(): - assert ims_file.filename == 'all_files_same_name.ims' - sim.write_simulation() - name_file = os.path.join(run_folder, 'all_files_same_name.nam') - assert os.path.exists(name_file) - dis_file = os.path.join(run_folder, 'all_files_same_name.dis') - assert os.path.exists(dis_file) - tdis_file = os.path.join(run_folder, 'all_files_same_name.tdis') - assert os.path.exists(tdis_file) - - # load simulation - sim_load = MFSimulation.load(sim.name, 'mf6', exe_name, - sim.simulation_data.mfpath.get_sim_path(), - verbosity_level=0) - model = sim_load.get_model() - # confirm ghb obs data has two blocks with correct file names - ghb = model.get_package('ghb') - obs = ghb.obs - obs_data = obs.continuous.get_data() - found_flows = False - found_obs = False - for key, value in obs_data.items(): - if key.lower() == 'ghb_flows.csv': - # there should be only one - assert not found_flows - found_flows = True - if key.lower() == 'ghb_obs.csv': - # there should be only one - assert not found_obs - found_obs = True - assert found_flows and found_obs - - # clean up - sim.delete_output_files() - - # check packages - chk = sim.check() - summary = '.'.join(chk[0].summary_array.desc) - assert summary == '' - - return - - -def test004_bcfss(): - # init paths - test_ex_name = 'test004_bcfss' - model_name = 'bcf2ss' - - pth = os.path.join('..', 'examples', 'data', 'mf6', 'create_tests', - test_ex_name) - run_folder = os.path.join(cpth, test_ex_name) - if not os.path.isdir(run_folder): - os.makedirs(run_folder) - - expected_output_folder = os.path.join(pth, 'expected_output') - expected_head_file = os.path.join(expected_output_folder, 'bcf2ss.hds') - - # create simulation - sim = MFSimulation(sim_name=model_name, version='mf6', exe_name=exe_name, - sim_ws=pth) - tdis_rc = [(1.0, 1, 1.0), (1.0, 1, 1.0)] - tdis_package = ModflowTdis(sim, time_units='DAYS', nper=2, - perioddata=tdis_rc) - model = ModflowGwf(sim, modelname=model_name, - model_nam_file='{}.nam'.format(model_name)) - ims_package = ModflowIms(sim, print_option='ALL', - csv_output_filerecord='bcf2ss.ims.csv', - complexity='SIMPLE', - outer_hclose=0.000001, outer_maximum=500, - under_relaxation='NONE', inner_maximum=100, - inner_hclose=0.000001, rcloserecord=0.001, - linear_acceleration='CG', - scaling_method='NONE', reordering_method='NONE', - relaxation_factor=0.97) - sim.register_ims_package(ims_package, [model.name]) - dis_package = ModflowGwfdis(model, nlay=2, nrow=10, ncol=15, delr=500.0, - delc=500.0, - top=150.0, botm=[50.0, -50.0], - filename='{}.dis'.format(model_name)) - ic_package = ModflowGwfic(model, strt=0.0, - filename='{}.ic'.format(model_name)) - wetdry_data = [] - for row in range(0, 10): - if row == 2 or row == 7: - wetdry_data += [2.0, 2.0, 2.0, -2.0, 2.0, 2.0, 2.0, 2.0] - else: - wetdry_data += [2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0] - wetdry_data += [-2.0, -2.0, -2.0, -2.0, -2.0, -2.0, -2.0] - for row in range(0, 10): - for col in range(0, 15): - wetdry_data.append(0.0) - npf_package = ModflowGwfnpf(model, rewet_record=[ - ('WETFCT', 1.0, 'IWETIT', 1, 'IHDWET', 0)], - save_flows=True, icelltype=[1, 0], - wetdry=wetdry_data, k=[10.0, 5.0], - k33=0.1) - oc_package = ModflowGwfoc(model, budget_filerecord='bcf2ss.cbb', - head_filerecord='bcf2ss.hds', - headprintrecord=[('COLUMNS', 15, 'WIDTH', 12, - 'DIGITS', 2, 'GENERAL')], - saverecord=[('HEAD', 'ALL'), ('BUDGET', 'ALL')], - printrecord=[('HEAD', 'ALL'), ('BUDGET', 'ALL')]) - aux = {0: [[50.0], [1.3]], 1: [[200.0], [1.5]]} - # aux = {0: [[100.0], [2.3]]} - rch_package = ModflowGwfrcha(model, readasarrays=True, save_flows=True, - auxiliary=[('var1', 'var2')], - recharge={0: 0.004}, aux=aux) # *** test if aux works *** - - # aux tests - aux_out = rch_package.aux.get_data() - assert(aux_out[0][0][0,0] == 50.) - assert(aux_out[0][1][0,0] == 1.3) - assert(aux_out[1][0][0,0] == 200.0) - assert(aux_out[1][1][0,0] == 1.5) - - riv_period = {} - riv_period_array = [] - for row in range(0, 10): - riv_period_array.append(((1, row, 14), 0.0, 10000.0, -5.0)) - riv_period[0] = riv_period_array - riv_package = ModflowGwfriv(model, save_flows='bcf2ss.cbb', maxbound=10, - stress_period_data=riv_period) - - wel_period = {} - stress_period_data = [((1, 2, 3), -35000.0, 1, 2, 3), - ((1, 7, 3), -35000.0, 4, 5, 6)] - wel_period[1] = stress_period_data - wel_package = ModflowGwfwel(model, print_input=True, print_flows=True, - save_flows=True, - auxiliary=[('var1', 'var2', 'var3')], - maxbound=2, - stress_period_data=wel_period) - - # change folder to save simulation - sim.simulation_data.mfpath.set_sim_path(run_folder) - - # write simulation to new location - sim.set_all_data_external() - sim.write_simulation() - - # run simulation - if run: - sim.run_simulation() - - # compare output to expected results - head_file = os.path.join(os.getcwd(), expected_head_file) - head_new = os.path.join(run_folder, 'bcf2ss.hds') - outfile = os.path.join(run_folder, 'head_compare.dat') - assert pymake.compare_heads(None, None, files1=head_file, files2=head_new, - outfile=outfile) - - # clean up - sim.delete_output_files() - - return - - -def test035_fhb(): - # init paths - test_ex_name = 'test035_fhb' - model_name = 'fhb2015' - - pth = os.path.join('..', 'examples', 'data', 'mf6', 'create_tests', - test_ex_name) - run_folder = os.path.join(cpth, test_ex_name) - if not os.path.isdir(run_folder): - os.makedirs(run_folder) - - expected_output_folder = os.path.join(pth, 'expected_output') - expected_head_file = os.path.join(expected_output_folder, - 'fhb2015_fhb.hds') - - # create simulation - sim = MFSimulation(sim_name=model_name, version='mf6', exe_name=exe_name, - sim_ws=pth) - tdis_rc = [(400.0, 10, 1.0), (200.0, 4, 1.0), (400.0, 6, 1.1)] - tdis_package = ModflowTdis(sim, time_units='DAYS', nper=3, - perioddata=tdis_rc) - model = ModflowGwf(sim, modelname=model_name, - model_nam_file='{}.nam'.format(model_name)) - ims_package = ModflowIms(sim, print_option='SUMMARY', complexity='SIMPLE', - outer_hclose=0.001, - outer_maximum=120, under_relaxation='NONE', - inner_maximum=100, inner_hclose=0.0001, - rcloserecord=0.1, linear_acceleration='CG', - preconditioner_levels=7, - preconditioner_drop_tolerance=0.001, - number_orthogonalizations=2) - sim.register_ims_package(ims_package, [model.name]) - dis_package = ModflowGwfdis(model, length_units='UNDEFINED', nlay=1, - nrow=3, ncol=10, delr=1000.0, - delc=1000.0, top=50.0, botm=-200.0, - filename='{}.dis'.format(model_name)) - ic_package = ModflowGwfic(model, strt=0.0, - filename='{}.ic'.format(model_name)) - npf_package = ModflowGwfnpf(model, perched=True, icelltype=0, k=20.0, - k33=1.0) - oc_package = ModflowGwfoc(model, head_filerecord='fhb2015_fhb.hds', - headprintrecord=[('COLUMNS', 20, 'WIDTH', 5, - 'DIGITS', 2, 'FIXED')], - saverecord={0: [('HEAD', 'ALL')], - 2: [('HEAD', 'ALL')]}, - printrecord={ - 0: [('HEAD', 'ALL'), ('BUDGET', 'ALL')], - 2: [('HEAD', 'ALL'), ('BUDGET', 'ALL')]}) - sto_package = ModflowGwfsto(model, storagecoefficient=True, iconvert=0, - ss=0.01, sy=0.0) - time = model.modeltime - assert (time.steady_state[0] == False and time.steady_state[1] == False - and time.steady_state[2] == False) - wel_period = {0: [((0, 1, 0), 'flow')]} - wel_package = ModflowGwfwel(model, print_input=True, print_flows=True, - save_flows=True, - maxbound=1, stress_period_data=wel_period) - well_ts = [(0.0, 2000.0), (307.0, 6000.0), (791.0, 5000.0), - (1000.0, 9000.0)] - wel_package.ts.initialize(filename='fhb_flow.ts', timeseries=well_ts, - time_series_namerecord='flow', - interpolation_methodrecord='linear') - - chd_period = { - 0: [((0, 0, 9), 'head'), ((0, 1, 9), 'head'), ((0, 2, 9), 'head')]} - chd_package = ModflowGwfchd(model, print_input=True, print_flows=True, - save_flows=True, maxbound=3, - stress_period_data=chd_period) - chd_ts = [(0.0, 0.0), (307.0, 1.0), (791.0, 5.0), (1000.0, 2.0)] - chd_package.ts.initialize(filename='fhb_head.ts', timeseries=chd_ts, - time_series_namerecord='head', - interpolation_methodrecord='linearend') - - # change folder to save simulation - sim.simulation_data.mfpath.set_sim_path(run_folder) - - # write simulation to new location - sim.set_all_data_external() - sim.write_simulation() - - # run simulation - if run: - sim.run_simulation() - - # compare output to expected results - head_file = os.path.join(os.getcwd(), expected_head_file) - head_new = os.path.join(run_folder, 'fhb2015_fhb.hds') - outfile = os.path.join(run_folder, 'head_compare.dat') - assert pymake.compare_heads(None, None, files1=head_file, files2=head_new, - outfile=outfile) - - # clean up - sim.delete_output_files() - - return - - -def test006_gwf3_disv(): - # init paths - test_ex_name = 'test006_gwf3_disv' - model_name = 'flow' - - pth = os.path.join('..', 'examples', 'data', 'mf6', 'create_tests', - test_ex_name) - run_folder = os.path.join(cpth, test_ex_name) - if not os.path.isdir(run_folder): - os.makedirs(run_folder) - - expected_output_folder = os.path.join(pth, 'expected_output') - expected_head_file = os.path.join(expected_output_folder, 'flow.hds') - - # create simulation - sim = MFSimulation(sim_name=test_ex_name, version='mf6', exe_name=exe_name, - sim_ws=pth) - tdis_rc = [(1.0, 1, 1.0)] - tdis_package = ModflowTdis(sim, time_units='DAYS', nper=1, - perioddata=tdis_rc) - model = ModflowGwf(sim, modelname=model_name, - model_nam_file='{}.nam'.format(model_name)) - ims_package = ModflowIms(sim, print_option='SUMMARY', - outer_hclose=0.00000001, - outer_maximum=1000, under_relaxation='NONE', - inner_maximum=1000, - inner_hclose=0.00000001, rcloserecord=0.01, - linear_acceleration='BICGSTAB', - scaling_method='NONE', reordering_method='NONE', - relaxation_factor=0.97) - sim.register_ims_package(ims_package, [model.name]) - vertices = testutils.read_vertices(os.path.join(pth, 'vertices.txt')) - c2drecarray = testutils.read_cell2d(os.path.join(pth, 'cell2d.txt')) - disv_package = ModflowGwfdisv(model, ncpl=121, nlay=1, nvert=148, top=0.0, - botm=-100.0, idomain=1, - vertices=vertices, cell2d=c2drecarray, - filename='{}.disv'.format(model_name)) - strt_list = [1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, - 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0] - ic_package = ModflowGwfic(model, strt=strt_list, - filename='{}.ic'.format(model_name)) - k = {'filename': 'k.bin', 'factor': 1.0, 'data': 1.0, 'binary': 'True'} - npf_package = ModflowGwfnpf(model, save_flows=True, icelltype=0, k=k, - k33=1.0) - k_data = npf_package.k.get_data() - assert(k_data[0,0] == 1.0) - - oc_package = ModflowGwfoc(model, budget_filerecord='flow.cbc', - head_filerecord='flow.hds', - saverecord=[('HEAD', 'ALL'), ('BUDGET', 'ALL')], - printrecord=[('HEAD', 'ALL'), ('BUDGET', 'ALL')]) - - # build stress_period_data for chd package - set_1 = [0, 7, 14, 18, 22, 26, 33] - set_2 = [6, 13, 17, 21, 25, 32, 39] - stress_period_data = [] - for value in set_1: - stress_period_data.append(((0, value), 1.0)) - for value in set_2: - stress_period_data.append(((0, value), 0.0)) - chd_package = ModflowGwfchd(model, print_input=True, print_flows=True, - save_flows=True, maxbound=14, - stress_period_data=stress_period_data) - - period_rch = {} - rch_array = [] - for val in range(0, 10): - rch_array.append(((0, val), 0.0)) - period_rch[0] = rch_array - rch_package = ModflowGwfrch(model, fixed_cell=True, maxbound=10, - stress_period_data=period_rch) - - gncrecarray = [((0, 9), (0, 40), (0, 8), 0.333333333333), - ((0, 9), (0, 42), (0, 10), 0.333333333333), - ((0, 10), (0, 43), (0, 9), 0.333333333333), - ((0, 10), (0, 45), (0, 11), 0.333333333333), - ((0, 11), (0, 46), (0, 10), 0.333333333333), - ((0, 11), (0, 48), (0, 12), 0.333333333333), - ((0, 15), (0, 40), (0, 8), 0.333333333333), - ((0, 15), (0, 58), (0, 19), 0.333333333333), - ((0, 16), (0, 48), (0, 12), 0.333333333333), - ((0, 16), (0, 66), (0, 20), 0.333333333333), - ((0, 19), (0, 67), (0, 15), 0.333333333333), - ((0, 19), (0, 85), (0, 23), 0.333333333333), - ((0, 20), (0, 75), (0, 16), 0.333333333333), - ((0, 20), (0, 93), (0, 24), 0.333333333333), - ((0, 23), (0, 94), (0, 19), 0.333333333333), - ((0, 23), (0, 112), (0, 27), 0.333333333333), - ((0, 24), (0, 102), (0, 20), 0.333333333333), - ((0, 24), (0, 120), (0, 31), 0.333333333333), - ((0, 28), (0, 112), (0, 27), 0.333333333333), - ((0, 28), (0, 114), (0, 29), 0.333333333333), - ((0, 29), (0, 115), (0, 28), 0.333333333333), - ((0, 29), (0, 117), (0, 30), 0.333333333333), - ((0, 30), (0, 118), (0, 29), 0.333333333333), - ((0, 30), (0, 120), (0, 31), 0.333333333333)] - gnc_package = ModflowGwfgnc(model, print_input=True, print_flows=True, - numgnc=24, numalphaj=1, - gncdata=gncrecarray) - - # change folder to save simulation - sim.simulation_data.mfpath.set_sim_path(run_folder) - - # write simulation to new location - sim.write_simulation() - - # run simulation - if run: - sim.run_simulation() - - # compare output to expected results - head_file = os.path.join(os.getcwd(), expected_head_file) - head_new = os.path.join(run_folder, 'flow.hds') - outfile = os.path.join(run_folder, 'head_compare.dat') - assert pymake.compare_heads(None, None, files1=head_file, files2=head_new, - outfile=outfile) - - # export to netcdf - temporarily disabled - #model.export(os.path.join(run_folder, "test006_gwf3.nc")) - # export to shape file - model.export(os.path.join(run_folder, "test006_gwf3.shp")) - - # clean up - sim.delete_output_files() - - return - - -def test006_2models_gnc(): - # init paths - test_ex_name = 'test006_2models_gnc' - model_name_1 = 'model1' - model_name_2 = 'model2' - - pth = os.path.join('..', 'examples', 'data', 'mf6', 'create_tests', - test_ex_name) - run_folder = os.path.join(cpth, test_ex_name) - if not os.path.isdir(run_folder): - os.makedirs(run_folder) - - expected_output_folder = os.path.join(pth, 'expected_output') - expected_head_file_1 = os.path.join(expected_output_folder, 'model1.hds') - expected_head_file_2 = os.path.join(expected_output_folder, 'model2.hds') - - # create simulation - sim = MFSimulation(sim_name=test_ex_name, version='mf6', exe_name=exe_name, - sim_ws=pth) - tdis_rc = [(1.0, 1, 1.0)] - tdis_package = ModflowTdis(sim, time_units='DAYS', nper=1, - perioddata=tdis_rc) - model_1 = ModflowGwf(sim, modelname=model_name_1, - model_nam_file='{}.nam'.format(model_name_1)) - model_2 = ModflowGwf(sim, modelname=model_name_2, - model_nam_file='{}.nam'.format(model_name_2)) - ims_package = ModflowIms(sim, print_option='SUMMARY', - outer_hclose=0.00000001, - outer_maximum=1000, under_relaxation='NONE', - inner_maximum=1000, - inner_hclose=0.00000001, rcloserecord=0.01, - linear_acceleration='BICGSTAB', - scaling_method='NONE', reordering_method='NONE', - relaxation_factor=0.97) - sim.register_ims_package(ims_package, [model_1.name, model_2.name]) - idom = [1, 1, 1, 1, 1, 1, 1, - 1, 1, 1, 1, 1, 1, 1, - 1, 1, 0, 0, 0, 1, 1, - 1, 1, 0, 0, 0, 1, 1, - 1, 1, 0, 0, 0, 1, 1, - 1, 1, 1, 1, 1, 1, 1, - 1, 1, 1, 1, 1, 1, 1, ] - dis_package_1 = ModflowGwfdis(model_1, length_units='METERS', nlay=1, - nrow=7, ncol=7, idomain=idom, - delr=100.0, delc=100.0, top=0.0, botm=-100.0, - filename='{}.dis'.format(model_name_1)) - dis_package_2 = ModflowGwfdis(model_2, length_units='METERS', nlay=1, - nrow=9, ncol=9, delr=33.33, - delc=33.33, top=0.0, botm=-100.0, - filename='{}.dis'.format(model_name_2)) - - strt_list = [1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, - 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, - 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, - 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, - 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, - 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, - 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, ] - ic_package_1 = ModflowGwfic(model_1, strt=strt_list, - filename='{}.ic'.format(model_name_1)) - ic_package_2 = ModflowGwfic(model_2, strt=1.0, - filename='{}.ic'.format(model_name_2)) - npf_package_1 = ModflowGwfnpf(model_1, save_flows=True, perched=True, - icelltype=0, k=1.0, k33=1.0) - npf_package_2 = ModflowGwfnpf(model_2, save_flows=True, perched=True, - icelltype=0, k=1.0, k33=1.0) - oc_package_1 = ModflowGwfoc(model_1, budget_filerecord='model1.cbc', - head_filerecord='model1.hds', - saverecord=[('HEAD', 'ALL'), - ('BUDGET', 'ALL')], - printrecord=[('HEAD', 'ALL'), - ('BUDGET', 'ALL')]) - oc_package_2 = ModflowGwfoc(model_2, budget_filerecord='model2.cbc', - head_filerecord='model2.hds', - saverecord=[('HEAD', 'ALL'), - ('BUDGET', 'ALL')], - printrecord=[('HEAD', 'ALL'), - ('BUDGET', 'ALL')]) - - # build periodrecarray for chd package - set_1 = [0, 7, 14, 18, 22, 26, 33] - set_2 = [6, 13, 17, 21, 25, 32, 39] - stress_period_data = [] - for value in range(0, 7): - stress_period_data.append(((0, value, 0), 1.0)) - for value in range(0, 7): - stress_period_data.append(((0, value, 6), 0.0)) - chd_package = ModflowGwfchd(model_1, print_input=True, print_flows=True, - save_flows=True, maxbound=30, - stress_period_data=stress_period_data) - - gncrecarray = testutils.read_gncrecarray(os.path.join(pth, 'gnc.txt')) - # test gnc delete - new_gncrecarray = gncrecarray[10:] - gnc_package = ModflowGwfgnc(sim, print_input=True, print_flows=True, - numgnc=26, numalphaj=1, - gncdata=new_gncrecarray) - sim.remove_package(gnc_package.package_type) - - gnc_package = ModflowGwfgnc(sim, print_input=True, print_flows=True, - numgnc=36, numalphaj=1, - gncdata=gncrecarray) - - exgrecarray = testutils.read_exchangedata(os.path.join(pth, 'exg.txt')) - - # build obs dictionary - gwf_obs = {('gwfgwf_obs.csv'): [('gwf-1-3-2_1-1-1', 'flow-ja-face', - (0, 2, 1), (0, 0, 0)), - ('gwf-1-3-2_1-2-1', 'flow-ja-face', - (0, 2, 1), (0, 1, 0))]} - - # test exg delete - newexgrecarray = exgrecarray[10:] - exg_package = ModflowGwfgwf(sim, print_input=True, print_flows=True, - save_flows=True, auxiliary='testaux', - gnc_filerecord='test006_2models_gnc.gnc', - nexg=26, exchangedata=newexgrecarray, - exgtype='gwf6-gwf6', exgmnamea=model_name_1, - exgmnameb=model_name_2) - sim.remove_package(exg_package.package_type) - - exg_package = ModflowGwfgwf(sim, print_input=True, print_flows=True, - save_flows=True, auxiliary='testaux', - gnc_filerecord='test006_2models_gnc.gnc', - nexg=36, exchangedata=exgrecarray, - exgtype='gwf6-gwf6', exgmnamea=model_name_1, - exgmnameb=model_name_2, observations=gwf_obs) - - # change folder to save simulation - sim.simulation_data.mfpath.set_sim_path(run_folder) - - # write simulation to new location - sim.set_all_data_external() - sim.write_simulation() - - # run simulation - if run: - sim.run_simulation() - - # compare output to expected results - head_file = os.path.join(os.getcwd(), expected_head_file_1) - head_new = os.path.join(run_folder, 'model1.hds') - outfile = os.path.join(run_folder, 'head_compare.dat') - assert pymake.compare_heads(None, None, files1=head_file, files2=head_new, - outfile=outfile) - - # compare output to expected results - head_file = os.path.join(os.getcwd(), expected_head_file_2) - head_new = os.path.join(run_folder, 'model2.hds') - outfile = os.path.join(run_folder, 'head_compare.dat') - assert pymake.compare_heads(None, None, files1=head_file, files2=head_new, - outfile=outfile) - - # clean up - sim.delete_output_files() - - return - - -def test050_circle_island(): - # init paths - test_ex_name = 'test050_circle_island' - model_name = 'ci' - - pth = os.path.join('..', 'examples', 'data', 'mf6', 'create_tests', - test_ex_name) - run_folder = os.path.join(cpth, test_ex_name) - if not os.path.isdir(run_folder): - os.makedirs(run_folder) - - expected_output_folder = os.path.join(pth, 'expected_output') - expected_head_file = os.path.join(expected_output_folder, 'ci.output.hds') - - # create simulation - sim = MFSimulation(sim_name=test_ex_name, version='mf6', exe_name=exe_name, - sim_ws=pth) - tdis_rc = [(1.0, 1, 1.0)] - tdis_package = ModflowTdis(sim, time_units='DAYS', nper=1, - perioddata=tdis_rc) - model = ModflowGwf(sim, modelname=model_name, - model_nam_file='{}.nam'.format(model_name)) - ims_package = ModflowIms(sim, print_option='SUMMARY', - outer_hclose=0.000001, - outer_maximum=500, under_relaxation='NONE', - inner_maximum=1000, - inner_hclose=0.000001, rcloserecord=0.000001, - linear_acceleration='BICGSTAB', - relaxation_factor=0.0) - sim.register_ims_package(ims_package, [model.name]) - vertices = testutils.read_vertices(os.path.join(pth, 'vertices.txt')) - c2drecarray = testutils.read_cell2d(os.path.join(pth, 'cell2d.txt')) - disv_package = ModflowGwfdisv(model, ncpl=5240, nlay=2, nvert=2778, - top=0.0, botm=[-20.0, -40.0], - idomain=1, vertices=vertices, - cell2d=c2drecarray, - filename='{}.disv'.format(model_name)) - ic_package = ModflowGwfic(model, strt=0.0, - filename='{}.ic'.format(model_name)) - npf_package = ModflowGwfnpf(model, save_flows=True, icelltype=0, k=10.0, - k33=0.2) - oc_package = ModflowGwfoc(model, budget_filerecord='ci.output.cbc', - head_filerecord='ci.output.hds', - saverecord=[('HEAD', 'ALL'), ('BUDGET', 'ALL')], - printrecord=[('HEAD', 'ALL'), ('BUDGET', 'ALL')]) - - stress_period_data = testutils.read_ghbrecarray( - os.path.join(pth, 'ghb.txt'), 2) - ghb_package = ModflowGwfghb(model, maxbound=3173, - stress_period_data=stress_period_data) - - rch_data = ['OPEN/CLOSE', 'rech.dat', 'FACTOR', 1.0, 'IPRN', 0] - rch_package = ModflowGwfrcha(model, readasarrays=True, - save_flows=True, recharge=rch_data) - - # change folder to save simulation - sim.simulation_data.mfpath.set_sim_path(run_folder) - - # write simulation to new location - sim.set_all_data_external() - sim.write_simulation() - - # run simulation - if run: - sim.run_simulation() - - # compare output to expected results - head_file = os.path.join(os.getcwd(), expected_head_file) - head_new = os.path.join(run_folder, 'ci.output.hds') - outfile = os.path.join(run_folder, 'head_compare.dat') - assert pymake.compare_heads(None, None, files1=head_file, files2=head_new, - outfile=outfile) - - # clean up - sim.delete_output_files() - - return - - -def test028_sfr(): - # init paths - test_ex_name = 'test028_sfr' - model_name = 'test1tr' - - pth = os.path.join('..', 'examples', 'data', 'mf6', 'create_tests', - test_ex_name) - run_folder = os.path.join(cpth, test_ex_name) - if not os.path.isdir(run_folder): - os.makedirs(run_folder) - - expected_output_folder = os.path.join(pth, 'expected_output') - expected_head_file = os.path.join(expected_output_folder, 'test1tr.hds') - - # create simulation - sim = MFSimulation(sim_name=test_ex_name, version='mf6', exe_name=exe_name, - sim_ws=pth) - sim.name_file.continue_.set_data(True) - tdis_rc = [(1577889000, 50, 1.1), (1577889000, 50, 1.1)] - tdis_package = ModflowTdis(sim, time_units='SECONDS', nper=2, - perioddata=tdis_rc, filename='simulation.tdis') - model = ModflowGwf(sim, modelname=model_name, - model_nam_file='{}.nam'.format(model_name)) - model.name_file.save_flows.set_data(True) - ims_package = ModflowIms(sim, print_option='SUMMARY', outer_hclose=0.00001, - outer_maximum=100, under_relaxation='DBD', - under_relaxation_theta=0.85, - under_relaxation_kappa=0.0001, - under_relaxation_gamma=0.0, - under_relaxation_momentum=0.1, - backtracking_number=0, backtracking_tolerance=1.1, - backtracking_reduction_factor=0.7, - backtracking_residual_limit=1.0, - inner_hclose=0.00001, rcloserecord=0.1, - inner_maximum=100, linear_acceleration='CG', - scaling_method='NONE', reordering_method='NONE', - relaxation_factor=0.99, - filename='model.ims') - sim.register_ims_package(ims_package, [model.name]) - top = testutils.read_std_array(os.path.join(pth, 'top.txt'), 'float') - botm = testutils.read_std_array(os.path.join(pth, 'botm.txt'), 'float') - idomain = testutils.read_std_array(os.path.join(pth, 'idomain.txt'), 'int') - dis_package = ModflowGwfdis(model, length_units='FEET', nlay=1, nrow=15, - ncol=10, delr=5000.0, delc=5000.0, - top=top, botm=botm, idomain=idomain, - filename='{}.dis'.format(model_name)) - strt = testutils.read_std_array(os.path.join(pth, 'strt.txt'), 'float') - strt_int = ['internal', 'factor', 1.0, 'iprn', 0, strt] - ic_package = ModflowGwfic(model, strt=strt_int, - filename='{}.ic'.format(model_name)) - - k_vals = testutils.read_std_array(os.path.join(pth, 'k.txt'), 'float') - k = ['internal', 'factor', 3.000E-03, 'iprn', 0, k_vals] - npf_package = ModflowGwfnpf(model, icelltype=1, k=k, k33=1.0) - npf_package.k.factor = 2.000E-04 - - oc_package = ModflowGwfoc(model, budget_filerecord='test1tr.cbc', - head_filerecord='test1tr.hds', - saverecord={0: [('HEAD', 'FREQUENCY', 5), - ('BUDGET', 'FREQUENCY', 5)]}, - printrecord={0: [('HEAD', 'FREQUENCY', 5), - ('BUDGET', 'FREQUENCY', 5)]}) - - sy_vals = testutils.read_std_array(os.path.join(pth, 'sy.txt'), 'float') - sy = {'factor': 0.2, 'iprn': 0, 'data': sy_vals} - sto_package = ModflowGwfsto(model, iconvert=1, ss=1.0E-6, sy=sy) - - surf = testutils.read_std_array(os.path.join(pth, 'surface.txt'), 'float') - surf_data = ['internal', 'factor', 1.0, 'iprn', -1, surf] - - # build time array series - tas = {0.0: 9.5E-08, 6.0E09: 9.5E-08, - 'filename': 'test028_sfr.evt.tas', - 'time_series_namerecord': 'evtarray_1', - 'interpolation_methodrecord': 'LINEAR'} - - evt_package = ModflowGwfevta(model, readasarrays=True, timearrayseries=tas, - surface=surf_data, depth=15.0, - rate='TIMEARRAYSERIES evtarray_1', - filename='test1tr.evt') - # attach obs package to evt - obs_dict = {'test028_sfr.evt.csv': [('obs-1', 'EVT', (0, 1, 5)), - ('obs-2', 'EVT', (0, 2, 3))]} - evt_package.obs.initialize(filename='test028_sfr.evt.obs', print_input=True, - continuous=obs_dict) - - stress_period_data = { - 0: [((0, 12, 0), 988.0, 0.038), ((0, 13, 8), 1045.0, 0.038)]} - ghb_package = ModflowGwfghb(model, maxbound=2, - stress_period_data=stress_period_data) - - rch = testutils.read_std_array(os.path.join(pth, 'recharge.txt'), 'float') - # test empty - rch_data = ModflowGwfrcha.recharge.empty(model) - rch_data[0]['data'] = rch - rch_data[0]['factor'] = 5.000E-10 - rch_data[0]['iprn'] = -1 - rch_package = ModflowGwfrcha(model, readasarrays=True, recharge=rch_data, - filename='test1tr.rch') - - sfr_rec = testutils.read_sfr_rec(os.path.join(pth, 'sfr_rec.txt'), 3) - reach_con_rec = testutils.read_reach_con_rec( - os.path.join(pth, 'sfr_reach_con_rec.txt')) - reach_div_rec = testutils.read_reach_div_rec( - os.path.join(pth, 'sfr_reach_div_rec.txt')) - reach_per_rec = testutils.read_reach_per_rec( - os.path.join(pth, 'sfr_reach_per_rec.txt')) - # test zero based indexes - reach_con_rec[0] = (0, -0.0) - sfr_package = ModflowGwfsfr(model, unit_conversion=1.486, - stage_filerecord='test1tr.sfr.stage.bin', - budget_filerecord='test1tr.sfr.cbc', - nreaches=36, packagedata=sfr_rec, - connectiondata=reach_con_rec, - diversions=reach_div_rec, - perioddata={0: reach_per_rec}) - assert (sfr_package.connectiondata.get_data()[0][1] == -0.0) - assert (sfr_package.connectiondata.get_data()[1][1] == 0.0) - assert (sfr_package.connectiondata.get_data()[2][1] == 1.0) - assert (sfr_package.packagedata.get_data()[1][1].lower() == 'none') - - sim.simulation_data.mfpath.set_sim_path(run_folder) - sim.write_simulation() - sim.load(sim_name=test_ex_name, version='mf6', exe_name=exe_name, - sim_ws=run_folder) - model = sim.get_model(model_name) - sfr_package = model.get_package('sfr') - # sfr_package.set_all_data_external() - assert (sfr_package.connectiondata.get_data()[0][1] == -0.0) - assert (sfr_package.connectiondata.get_data()[1][1] == 0.0) - assert (sfr_package.connectiondata.get_data()[2][1] == 1.0) - pdata = sfr_package.packagedata.get_data() - assert (sfr_package.packagedata.get_data()[1][1].lower() == 'none') - - # undo zero based test and move on - model.remove_package(sfr_package.package_type) - reach_con_rec = testutils.read_reach_con_rec( - os.path.join(pth, 'sfr_reach_con_rec.txt')) - - # set sfr settings back to expected package data - rec_line = (sfr_rec[1][0], (0, 1, 1)) + sfr_rec[1][2:] - sfr_rec[1] = rec_line - - sfr_package = ModflowGwfsfr(model, unit_conversion=1.486, - stage_filerecord='test1tr.sfr.stage.bin', - budget_filerecord='test1tr.sfr.cbc', - nreaches=36, packagedata=sfr_rec, - connectiondata=reach_con_rec, - diversions=reach_div_rec, - perioddata={0: reach_per_rec}) - - obs_data_1 = testutils.read_obs(os.path.join(pth, 'sfr_obs_1.txt')) - obs_data_2 = testutils.read_obs(os.path.join(pth, 'sfr_obs_2.txt')) - obs_data_3 = testutils.read_obs(os.path.join(pth, 'sfr_obs_3.txt')) - obs_data = {'test1tr.sfr.csv': obs_data_1, - 'test1tr.sfr.qaq.csv': obs_data_2, - 'test1tr.sfr.flow.csv': obs_data_3} - sfr_package.obs.initialize(filename='test1tr.sfr.obs', digits=10, - print_input=True, continuous=obs_data) - - wells = testutils.read_wells(os.path.join(pth, 'well.txt')) - wel_package = ModflowGwfwel(model, boundnames=True, maxbound=10, - stress_period_data={0: wells, 1: [()]}) - - # write simulation to new location - sim.write_simulation() - - # run simulation - if run: - sim.run_simulation() - - # compare output to expected results - head_file = os.path.join(os.getcwd(), expected_head_file) - head_new = os.path.join(run_folder, 'test1tr.hds') - outfile = os.path.join(run_folder, 'head_compare.dat') - assert pymake.compare_heads(None, None, files1=head_file, files2=head_new, - outfile=outfile) - - # clean up - sim.delete_output_files() - - return - - -if __name__ == '__main__': - np001() - np002() - test004_bcfss() - test005_advgw_tidal() - test006_2models_gnc() - test006_gwf3_disv() - test021_twri() - test028_sfr() - test035_fhb() - test050_circle_island() +import os + +import numpy as np + +import flopy +import flopy.utils.binaryfile as bf +from flopy.mf6.data.mfdatastorage import DataStorageType +from flopy.utils.datautil import PyListUtil +from flopy.mf6.mfbase import FlopyException +from flopy.mf6.modflow.mfgwf import ModflowGwf +from flopy.mf6.modflow.mfgwfchd import ModflowGwfchd +from flopy.mf6.modflow.mfgwfdis import ModflowGwfdis +from flopy.mf6.modflow.mfgwfdisv import ModflowGwfdisv +from flopy.mf6.modflow.mfgwfdrn import ModflowGwfdrn +from flopy.mf6.modflow.mfgwfevt import ModflowGwfevt +from flopy.mf6.modflow.mfgwfevta import ModflowGwfevta +from flopy.mf6.modflow.mfgwfghb import ModflowGwfghb +from flopy.mf6.modflow.mfgwfgnc import ModflowGwfgnc +from flopy.mf6.modflow.mfgwfgwf import ModflowGwfgwf +from flopy.mf6.modflow.mfgwfhfb import ModflowGwfhfb +from flopy.mf6.modflow.mfgwfic import ModflowGwfic +from flopy.mf6.modflow.mfgwfnpf import ModflowGwfnpf +from flopy.mf6.modflow.mfgwfoc import ModflowGwfoc +from flopy.mf6.modflow.mfgwfrch import ModflowGwfrch +from flopy.mf6.modflow.mfgwfrcha import ModflowGwfrcha +from flopy.mf6.modflow.mfgwfriv import ModflowGwfriv +from flopy.mf6.modflow.mfgwfsfr import ModflowGwfsfr +from flopy.mf6.modflow.mfgwfsto import ModflowGwfsto +from flopy.mf6.modflow.mfgwfwel import ModflowGwfwel +from flopy.mf6.modflow.mfims import ModflowIms +from flopy.mf6.modflow.mfsimulation import MFSimulation +from flopy.mf6.modflow.mftdis import ModflowTdis +from flopy.mf6.modflow.mfutlobs import ModflowUtlobs +from flopy.mf6.modflow.mfutlts import ModflowUtlts +from flopy.mf6.utils import testutils +from flopy.mf6.mfbase import MFDataException + + +try: + import pymake +except: + print('could not import pymake') + +exe_name = 'mf6' +v = flopy.which(exe_name) + +run = True +if v is None: + run = False + +cpth = os.path.join('temp', 't505') +# make the directory if it does not exist +if not os.path.isdir(cpth): + os.makedirs(cpth) + + +def np001(): + # init paths + test_ex_name = 'np001' + model_name = 'np001_mod' + + pth = os.path.join('..', 'examples', 'data', 'mf6', 'create_tests', + test_ex_name) + run_folder = os.path.join(cpth, test_ex_name) + if not os.path.isdir(run_folder): + os.makedirs(run_folder) + + expected_output_folder = os.path.join(pth, 'expected_output') + expected_head_file = os.path.join(expected_output_folder, 'np001_mod.hds') + expected_cbc_file = os.path.join(expected_output_folder, 'np001_mod.cbc') + + # model tests + test_sim = MFSimulation(sim_name=test_ex_name, version='mf6', + exe_name=exe_name, sim_ws=run_folder, + continue_=True, memory_print_option='summary') + name = test_sim.name_file + assert name.continue_.get_data() + assert name.nocheck.get_data() is None + assert name.memory_print_option.get_data() == 'summary' + + kwargs = {} + kwargs['bad_kwarg'] = 20 + try: + ex = False + bad_model = ModflowGwf(test_sim, modelname=model_name, + model_nam_file='{}.nam'.format(model_name), + **kwargs) + except FlopyException: + ex = True + assert (ex == True) + + kwargs = {} + kwargs['xul'] = 20.5 + good_model = ModflowGwf(test_sim, modelname=model_name, + model_nam_file='{}.nam'.format(model_name), + model_rel_path='model_folder', + **kwargs) + + # create simulation + sim = MFSimulation(sim_name=test_ex_name, version='mf6', exe_name=exe_name, + sim_ws=pth) + tdis_rc = [(6.0, 2, 1.0), (6.0, 3, 1.0)] + tdis_package = ModflowTdis(sim, time_units='DAYS', nper=1, + perioddata=[(2.0, 1, 1.0)]) + # specifying the tdis package twice should remove the old tdis package + tdis_package = ModflowTdis(sim, time_units='DAYS', nper=2, + perioddata=tdis_rc) + # first ims file to be replaced + ims_package = ModflowIms(sim, pname='my_ims_file', filename='old_name.ims', + print_option='ALL', complexity='SIMPLE', + outer_hclose=0.00001, + outer_maximum=10, under_relaxation='NONE', + inner_maximum=10, + inner_hclose=0.001, linear_acceleration='CG', + preconditioner_levels=2, + preconditioner_drop_tolerance=0.00001, + number_orthogonalizations=5) + # replace with real ims file + ims_package = ModflowIms(sim, pname='my_ims_file', + filename='{}.ims'.format(test_ex_name), + print_option='ALL', complexity='SIMPLE', + outer_hclose=0.00001, + outer_maximum=50, under_relaxation='NONE', + inner_maximum=30, + inner_hclose=0.00001, linear_acceleration='CG', + preconditioner_levels=7, + preconditioner_drop_tolerance=0.01, + number_orthogonalizations=2) + + model = ModflowGwf(sim, modelname=model_name, + model_nam_file='{}.nam'.format(model_name)) + # test case insensitive lookup + assert(sim.get_model(model_name.upper()) is not None) + + # test getting model using attribute + model = sim.np001_mod + assert(model is not None and model.name == 'np001_mod') + tdis = sim.tdis + assert(tdis is not None and tdis.package_type == 'tdis') + + dis_package = flopy.mf6.ModflowGwfdis(model, length_units='FEET', nlay=1, + nrow=1, ncol=1, delr=100.0, + delc=100.0, + top=60.0, botm=50.0, + filename='{}.dis'.format(model_name), + pname='mydispkg') + # specifying dis package twice with the same name should automatically + # remove the old dis package + top = {'filename': 'top.bin', 'data': 100.0, 'binary': True} + botm = {'filename': 'botm.bin', 'data': 50.0, 'binary': True} + dis_package = flopy.mf6.ModflowGwfdis(model, length_units='FEET', nlay=1, + nrow=1, ncol=10, delr=500.0, + delc=500.0, + top=top, botm=botm, + filename='{}.dis'.format(model_name), + pname='mydispkg') + top_data = dis_package.top.get_data() + assert top_data[0,0] == 100.0 + ic_package = flopy.mf6.ModflowGwfic(model, strt='initial_heads.txt', + filename='{}.ic'.format(model_name)) + npf_package = ModflowGwfnpf(model, pname='npf_1', save_flows=True, + alternative_cell_averaging='logarithmic', + icelltype=1, k=5.0) + + # remove package test using .remove_package(name) + assert (model.get_package(npf_package.package_name) is not None) + model.remove_package(npf_package.package_name) + assert (model.get_package(npf_package.package_name) is None) + # remove package test using .remove() + npf_package = ModflowGwfnpf(model, pname='npf_1', save_flows=True, + alternative_cell_averaging='logarithmic', + icelltype=1, k=5.0) + npf_package.remove() + assert (model.get_package(npf_package.package_name) is None) + + npf_package = ModflowGwfnpf(model, save_flows=True, + alternative_cell_averaging='logarithmic', + icelltype=1, k=5.0) + + oc_package = ModflowGwfoc(model, budget_filerecord=[('np001_mod.cbc',)], + head_filerecord=[('np001_mod.hds',)], + saverecord={0: [('HEAD', 'ALL'), + ('BUDGET', 'ALL')], + 1: [('HEAD', 'ALL'), + ('BUDGET', 'ALL')]}, + printrecord=[('HEAD', 'ALL'), ('BUDGET', 'ALL')]) + oc_package.printrecord.add_transient_key(1) + oc_package.printrecord.set_data([('HEAD', 'ALL'), ('BUDGET', 'ALL')], 1) + + sto_package = ModflowGwfsto(model, save_flows=True, iconvert=1, + ss=0.000001, sy=0.15) + + # test saving a binary file with list data + well_spd = {0: {'filename': 'wel0.bin', 'binary': True, + 'data': [((0, 0, 4), -2000.0), ((0, 0, 7), -2.0)]}, + 1: None} + wel_package = ModflowGwfwel(model, print_input=True, print_flows=True, + save_flows=True, maxbound=2, + stress_period_data=well_spd) + wel_package.stress_period_data.add_transient_key(1) + wel_package.stress_period_data.set_data( + {1: {'filename': 'wel.txt', 'factor': 1.0}}) + + # test getting data from a binary file + well_data = wel_package.stress_period_data.get_data(0) + assert well_data[0][0] == (0, 0, 4) + assert well_data[0][1] == -2000.0 + + drn_package = ModflowGwfdrn(model, print_input=True, print_flows=True, + save_flows=True, maxbound=1, + timeseries=[(0.0, 60.0), (100000.0, 60.0)], + stress_period_data=[((0, 0, 0), 80, 'drn_1')]) + drn_package.ts.time_series_namerecord = 'drn_1' + drn_package.ts.interpolation_methodrecord = 'linearend' + + riv_spd = {0: {'filename': 'riv.txt', 'data':[((0, 0, 9), 110, 90.0, + 100.0, 1.0, 2.0, 3.0)]}} + riv_package = ModflowGwfriv(model, print_input=True, print_flows=True, + save_flows=True, maxbound=1, + auxiliary=['var1', 'var2', 'var3'], + stress_period_data=riv_spd) + riv_data = riv_package.stress_period_data.get_data(0) + assert riv_data[0][0] == (0, 0, 9) + assert riv_data[0][1] == 110 + assert riv_data[0][2] == 90.0 + assert riv_data[0][3] == 100.0 + assert riv_data[0][4] == 1.0 + assert riv_data[0][5] == 2.0 + assert riv_data[0][6] == 3.0 + + # verify package look-up + pkgs = model.get_package() + assert (len(pkgs) == 9) + pkg = model.get_package('oc') + assert isinstance(pkg, ModflowGwfoc) + pkg = sim.get_package('tdis') + assert isinstance(pkg, ModflowTdis) + pkg = model.get_package('mydispkg') + assert isinstance(pkg, + flopy.mf6.ModflowGwfdis) and \ + pkg.package_name == 'mydispkg' + pkg = model.mydispkg + assert isinstance(pkg, + flopy.mf6.ModflowGwfdis) and \ + pkg.package_name == 'mydispkg' + + + # verify external file contents + array_util = PyListUtil() + ic_data = ic_package.strt + ic_array = ic_data.get_data() + assert array_util.array_comp(ic_array, [[[100.0, 100.0, 100.0, 100.0, + 100.0, 100.0, 100.0, 100.0, + 100.0, 100.0]]]) + + # make folder to save simulation + sim.simulation_data.mfpath.set_sim_path(run_folder) + + # write simulation to new location + sim.set_all_data_external() + sim.write_simulation() + + # run simulation + if run: + sim.run_simulation() + + # get expected results + budget_file = os.path.join(os.getcwd(), expected_cbc_file) + budget_obj = bf.CellBudgetFile(budget_file, precision='double') + budget_frf_valid = np.array( + budget_obj.get_data(text='FLOW-JA-FACE', full3D=True)) + + # compare output to expected results + head_file = os.path.join(os.getcwd(), expected_head_file) + head_new = os.path.join(run_folder, 'np001_mod.hds') + outfile = os.path.join(run_folder, 'head_compare.dat') + assert pymake.compare_heads(None, None, files1=head_file, files2=head_new, + outfile=outfile) + + budget_frf = sim.simulation_data.mfdata[ + (model_name, 'CBC', 'FLOW-JA-FACE')] + assert array_util.array_comp(budget_frf_valid, budget_frf) + + # clean up + sim.delete_output_files() + + try: + error_occurred = False + well_spd = {0: {'filename': 'wel0.bin', 'binary': True, + 'data': [((0, 0, 4), -2000.0), ((0, 0, 7), -2.0)]}} + wel_package = ModflowGwfwel(model, boundnames=True, + print_input=True, print_flows=True, + save_flows=True, maxbound=2, + stress_period_data=well_spd) + except MFDataException: + error_occurred = True + assert error_occurred + + # test error checking + drn_package = ModflowGwfdrn(model, print_input=True, print_flows=True, + save_flows=True, maxbound=1, + timeseries=[(0.0, 60.0), (100000.0, 60.0)], + stress_period_data=[((100, 0, 0), np.nan, + 'drn_1'), ((0, 0, 0), + 10.0, 'drn_2')]) + npf_package = ModflowGwfnpf(model, save_flows=True, + alternative_cell_averaging='logarithmic', + icelltype=1, k=100001.0, k33=1e-12) + chk = sim.check() + summary = '.'.join(chk[0].summary_array.desc) + assert 'drn_1 package: invalid BC index' in summary + assert 'npf package: vertical hydraulic conductivity values below ' \ + 'checker threshold of 1e-11' in summary + assert 'npf package: horizontal hydraulic conductivity values above ' \ + 'checker threshold of 100000.0' in summary + data_invalid = False + try: + drn_package = ModflowGwfdrn(model, print_input=True, print_flows=True, + save_flows=True, maxbound=1, + timeseries=[(0.0, 60.0), (100000.0, 60.0)], + stress_period_data=[((0, 0, 0), 10.0)]) + except MFDataException: + data_invalid = True + assert data_invalid + + return + + +def np002(): + # init paths + test_ex_name = 'np002' + model_name = 'np002_mod' + + pth = os.path.join('..', 'examples', 'data', 'mf6', 'create_tests', + test_ex_name) + pth_for_mf = os.path.join('..', '..', '..', pth) + run_folder = os.path.join(cpth, test_ex_name) + if not os.path.isdir(run_folder): + os.makedirs(run_folder) + + expected_output_folder = os.path.join(pth, 'expected_output') + expected_head_file = os.path.join(expected_output_folder, 'np002_mod.hds') + expected_cbc_file = os.path.join(expected_output_folder, 'np002_mod.cbc') + + # create simulation + sim = MFSimulation(sim_name=test_ex_name, version='mf6', exe_name=exe_name, + sim_ws=run_folder, nocheck=True) + name = sim.name_file + assert name.continue_.get_data() == None + assert name.nocheck.get_data() == True + assert name.memory_print_option.get_data() == None + + tdis_rc = [(6.0, 2, 1.0), (6.0, 3, 1.0)] + tdis_package = ModflowTdis(sim, time_units='DAYS', nper=2, + perioddata=tdis_rc) + model = ModflowGwf(sim, modelname=model_name, + model_nam_file='{}.nam'.format(model_name)) + ims_package = ModflowIms(sim, print_option='ALL', complexity='SIMPLE', + outer_hclose=0.00001, + outer_maximum=50, under_relaxation='NONE', + inner_maximum=30, + inner_hclose=0.00001, linear_acceleration='CG', + preconditioner_levels=7, + preconditioner_drop_tolerance=0.01, + number_orthogonalizations=2) + sim.register_ims_package(ims_package, [model.name]) + + # get rid of top_data.txt so that a later test does not automatically pass + top_data_file = os.path.join(run_folder, 'top_data.txt') + if os.path.isfile(top_data_file): + os.remove(top_data_file) + # test loading data to be stored in a file and loading data from a file + # using the "dictionary" input format + top = {'filename': 'top_data.txt', 'factor': 1.0, + 'data': [100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, + 100.0, 100.0]} + botm_file = os.path.join(pth_for_mf, 'botm.txt') + botm = {'filename': botm_file, 'factor': 1.0} + dis_package = ModflowGwfdis(model, length_units='FEET', nlay=1, nrow=1, + ncol=10, delr=500.0, delc=500.0, + top=top, botm=botm, + filename='{}.dis'.format(model_name)) + ic_vals = [100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, + 100.0] + ic_package = ModflowGwfic(model, strt=ic_vals, + filename='{}.ic'.format(model_name)) + ic_package.strt.store_as_external_file('initial_heads.txt') + npf_package = ModflowGwfnpf(model, save_flows=True, icelltype=1, k=100.0) + npf_package.k.store_as_external_file('k.bin', binary=True) + oc_package = ModflowGwfoc(model, budget_filerecord=[('np002_mod.cbc',)], + head_filerecord=[('np002_mod.hds',)], + saverecord=[('HEAD', 'ALL'), ('BUDGET', 'ALL')], + printrecord=[('HEAD', 'ALL'), ('BUDGET', 'ALL')]) + oc_package.saverecord.add_transient_key(1) + oc_package.saverecord.set_data([('HEAD', 'ALL'), ('BUDGET', 'ALL')], 1) + oc_package.printrecord.add_transient_key(1) + oc_package.printrecord.set_data([('HEAD', 'ALL'), ('BUDGET', 'ALL')], 1) + + sto_package = ModflowGwfsto(model, save_flows=True, iconvert=1, + ss=0.000001, sy=0.15) + + hfb_package = ModflowGwfhfb(model, print_input=True, maxhfb=1, + stress_period_data=[((0, 0, 3), (0, 0, 4), + 0.00001)]) + chd_package = ModflowGwfchd(model, print_input=True, print_flows=True, + maxbound=1, stress_period_data=[((0, 0, 0), + 65.0)]) + ghb_package = ModflowGwfghb(model, print_input=True, print_flows=True, + maxbound=1, stress_period_data=[((0, 0, 9), + 125.0, 60.0)]) + rch_package = ModflowGwfrch(model, print_input=True, print_flows=True, + maxbound=2, + stress_period_data=[((0, 0, 3), 0.02), + ((0, 0, 6), 0.1)]) + + # write simulation to new location + sim.write_simulation() + + assert(os.path.isfile(top_data_file)) + + if run: + # run simulation + sim.run_simulation() + + sim2 = MFSimulation.load(sim_ws=run_folder) + model_ = sim2.get_model(model_name) + npf_package = model_.get_package('npf') + k = npf_package.k.array + + # get expected results + budget_file = os.path.join(os.getcwd(), expected_cbc_file) + budget_obj = bf.CellBudgetFile(budget_file, precision='double') + budget_frf_valid = np.array( + budget_obj.get_data(text='FLOW JA FACE ', full3D=True)) + + # compare output to expected results + head_file = os.path.join(os.getcwd(), expected_head_file) + head_new = os.path.join(run_folder, 'np002_mod.hds') + outfile = os.path.join(run_folder, 'head_compare.dat') + assert pymake.compare_heads(None, None, files1=head_file, + files2=head_new, outfile=outfile) + + array_util = PyListUtil() + budget_frf = sim.simulation_data.mfdata[ + (model_name, 'CBC', 'FLOW-JA-FACE')] + assert array_util.array_comp(budget_frf_valid, budget_frf) + + # verify external text file was written correctly + ext_file_path = os.path.join(run_folder, 'initial_heads.txt') + fd = open(ext_file_path, 'r') + line = fd.readline() + line_array = line.split() + assert len(ic_vals) == len(line_array) + for index in range(0, len(ic_vals)): + assert ic_vals[index] == float(line_array[index]) + fd.close() + + # clean up + sim.delete_output_files() + + # test error checking + sto_package = ModflowGwfsto(model, save_flows=True, iconvert=1, + ss=0.00000001, sy=0.6) + chd_package = ModflowGwfchd(model, print_input=True, print_flows=True, + maxbound=1, stress_period_data=[((0, 0, 0), + np.nan)]) + chk = sim.check() + summary = '.'.join(chk[0].summary_array.desc) + assert 'sto package: specific storage values below ' \ + 'checker threshold of 1e-06' in summary + assert 'sto package: specific yield values above ' \ + 'checker threshold of 0.5' in summary + assert 'Not a number' in summary + + return + + +def test021_twri(): + # init paths + test_ex_name = 'test021_twri' + model_name = 'twri' + + pth = os.path.join('..', 'examples', 'data', 'mf6', 'create_tests', + test_ex_name) + run_folder = os.path.join(cpth, test_ex_name) + if not os.path.isdir(run_folder): + os.makedirs(run_folder) + + expected_output_folder = os.path.join(pth, 'expected_output') + expected_head_file = os.path.join(expected_output_folder, 'twri.hds') + + # create simulation + sim = MFSimulation(sim_name=test_ex_name, version='mf6', exe_name=exe_name, + sim_ws=pth) + tdis_rc = [(86400.0, 1, 1.0)] + tdis_package = ModflowTdis(sim, time_units='SECONDS', nper=1, + perioddata=tdis_rc) + model = ModflowGwf(sim, modelname=model_name, + model_nam_file='{}.nam'.format(model_name)) + ims_package = ModflowIms(sim, print_option='SUMMARY', outer_hclose=0.0001, + outer_maximum=500, under_relaxation='NONE', + inner_maximum=100, + inner_hclose=0.0001, rcloserecord=0.001, + linear_acceleration='CG', + scaling_method='NONE', reordering_method='NONE', + relaxation_factor=0.97) + sim.register_ims_package(ims_package, [model.name]) + dis_package = flopy.mf6.ModflowGwfdis(model, nlay=3, nrow=15, ncol=15, + delr=5000.0, delc=5000.0, + top=200.0, botm=[-200, -300, -450], + filename='{}.dis'.format(model_name)) + strt = [{'filename': 'strt.txt', 'factor': 1.0, 'data': 0.0}, + {'filename': 'strt2.bin', 'factor': 1.0, 'data': 1.0, + 'binary': 'True'}, 2.0] + ic_package = ModflowGwfic(model, strt=strt, + filename='{}.ic'.format(model_name)) + npf_package = ModflowGwfnpf(model, save_flows=True, perched=True, + cvoptions='dewatered', + icelltype=[1, 0, 0], k=[0.001, 0.0001, 0.0002], + k33=0.00000002) + oc_package = ModflowGwfoc(model, budget_filerecord='twri.cbc', + head_filerecord='twri.hds', + saverecord=[('HEAD', 'ALL'), ('BUDGET', 'ALL')], + printrecord=[('HEAD', 'ALL')]) + + # build stress_period_data for chd package + stress_period_data = [] + for layer in range(0, 2): + for row in range(0, 15): + stress_period_data.append(((layer, row, 0), 0.0)) + chd_package = ModflowGwfchd(model, print_input=True, print_flows=True, + save_flows=True, maxbound=100, + stress_period_data=stress_period_data) + + # build stress_period_data for drn package + conc = np.ones((15, 15), dtype=np.float) * 35. + auxdata = {0: [6, conc]} + + stress_period_data = [] + drn_heads = [0.0, 0.0, 10.0, 20.0, 30.0, 50.0, 70.0, 90.0, 100.0] + for col, head in zip(range(1, 10), drn_heads): + stress_period_data.append(((0, 7, col), head, 1.0, + 'name_{}'.format(col))) + drn_package = ModflowGwfdrn(model, print_input=True, print_flows=True, + save_flows=True, maxbound=9, boundnames=True, + stress_period_data=stress_period_data) + rch_package = ModflowGwfrcha(model, readasarrays=True, fixed_cell=True, + recharge={0: 0.00000003}, + auxiliary=[('iface', 'conc')], aux=auxdata) + + aux = rch_package.aux.get_data() + + stress_period_data = [] + layers = [2, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0] + rows = [4, 3, 5, 8, 8, 8, 8, 10, 10, 10, 10, 12, 12, 12, 12] + cols = [10, 5, 11, 7, 9, 11, 13, 7, 9, 11, 13, 7, 9, 11, 13] + for layer, row, col in zip(layers, rows, cols): + stress_period_data.append(((layer, row, col), -5.0)) + wel_package = ModflowGwfwel(model, print_input=True, print_flows=True, + save_flows=True, maxbound=15, + stress_period_data=stress_period_data) + + # change folder to save simulation + sim.simulation_data.mfpath.set_sim_path(run_folder) + + # write simulation to new location + sim.write_simulation() + + # run simulation + sim.run_simulation() + + sim2 = MFSimulation.load(sim_ws=run_folder) + model2 = sim2.get_model() + ic2 = model2.get_package('ic') + strt2 = ic2.strt.get_data() + drn2 = model2.get_package('drn') + drn_spd = drn2.stress_period_data.get_data() + assert(strt2[0,0,0] == 0.0) + assert(strt2[1,0,0] == 1.0) + assert(strt2[2,0,0] == 2.0) + assert(drn_spd[0][1][3] == 'name_2') + + # compare output to expected results + head_file = os.path.join(os.getcwd(), expected_head_file) + head_new = os.path.join(run_folder, 'twri.hds') + outfile = os.path.join(run_folder, 'head_compare.dat') + assert pymake.compare_heads(None, None, files1=head_file, files2=head_new, + outfile=outfile) + + # clean up + sim.delete_output_files() + + return + + +def test005_advgw_tidal(): + # init paths + test_ex_name = 'test005_advgw_tidal' + model_name = 'AdvGW_tidal' + + pth = os.path.join('..', 'examples', 'data', 'mf6', 'create_tests', + test_ex_name) + run_folder = os.path.join(cpth, test_ex_name) + if not os.path.isdir(run_folder): + os.makedirs(run_folder) + + expected_output_folder = os.path.join(pth, 'expected_output') + expected_head_file = os.path.join(expected_output_folder, + 'AdvGW_tidal.hds') + + # create simulation + sim = MFSimulation(sim_name=test_ex_name, version='mf6', exe_name=exe_name, + sim_ws=pth) + # test tdis package deletion + tdis_package = ModflowTdis(sim, time_units='DAYS', nper=1, + perioddata=[(2.0, 2, 1.0)]) + sim.remove_package(tdis_package.package_type) + + tdis_rc = [(1.0, 1, 1.0), (10.0, 120, 1.0), (10.0, 120, 1.0), + (10.0, 120, 1.0)] + tdis_package = ModflowTdis(sim, time_units='DAYS', nper=4, + perioddata=tdis_rc) + model = ModflowGwf(sim, modelname=model_name, + model_nam_file='{}.nam'.format(model_name)) + ims_package = ModflowIms(sim, print_option='SUMMARY', complexity='SIMPLE', + outer_hclose=0.0001, + outer_maximum=500, under_relaxation='NONE', + inner_maximum=100, + inner_hclose=0.0001, rcloserecord=0.001, + linear_acceleration='CG', + scaling_method='NONE', reordering_method='NONE', + relaxation_factor=0.97) + sim.register_ims_package(ims_package, [model.name]) + bot_data = [-100 for x in range(150)] + dis_package = ModflowGwfdis(model, nlay=3, nrow=15, ncol=10, delr=500.0, + delc=500.0, + top=50.0, botm=[5.0, -10.0, {'factor': 1.0, + 'data': bot_data}], + filename='{}.dis'.format(model_name)) + ic_package = ModflowGwfic(model, strt=50.0, + filename='{}.ic'.format(model_name)) + npf_package = ModflowGwfnpf(model, save_flows=True, icelltype=[1, 0, 0], + k=[5.0, 0.1, 4.0], + k33=[0.5, 0.005, 0.1]) + oc_package = ModflowGwfoc(model, budget_filerecord='AdvGW_tidal.cbc', + head_filerecord='AdvGW_tidal.hds', + headprintrecord=[('COLUMNS', 10, 'WIDTH', 15, + 'DIGITS', 6, 'GENERAL')], + saverecord=[('HEAD', 'ALL'), ('BUDGET', 'ALL')], + printrecord=[('HEAD', 'FIRST'), ('HEAD', 'LAST'), + ('BUDGET', 'LAST')]) + # test empty + sy_template = ModflowGwfsto.sy.empty(model, True) + for layer in range(0, 3): + sy_template[layer]['data'] = 0.2 + layer_storage_types = [DataStorageType.internal_array, + DataStorageType.internal_constant, + DataStorageType.internal_array] + ss_template = ModflowGwfsto.ss.empty(model, True, layer_storage_types, + 0.000001) + sto_package = ModflowGwfsto(model, save_flows=True, iconvert=1, + ss=ss_template, sy=sy_template, + steady_state={0: True}, + transient={1: True}) + + # wel, evt, ghb, obs, riv, rch, ts + # well package + # test empty with aux vars, bound names, and time series + period_two = ModflowGwfwel.stress_period_data.empty(model, maxbound=3, + aux_vars=['var1', + 'var2', + 'var3'], + boundnames=True, + timeseries=True) + period_two[0][0] = ((0, 11, 2), -50.0, -1, -2, -3, None) + period_two[0][1] = ((2, 4, 7), 'well_1_rate', 1, 2, 3, 'well_1') + period_two[0][2] = ((2, 3, 2), 'well_2_rate', 4, 5, 6, 'well_2') + period_three = ModflowGwfwel.stress_period_data.empty(model, maxbound=2, + aux_vars=['var1', + 'var2', + 'var3'], + boundnames=True, + timeseries=True) + period_three[0][0] = ((2, 3, 2), 'well_2_rate', 1, 2, 3, 'well_2') + period_three[0][1] = ((2, 4, 7), 'well_1_rate', 4, 5, 6, 'well_1') + period_four = ModflowGwfwel.stress_period_data.empty(model, maxbound=5, + aux_vars=['var1', + 'var2', + 'var3'], + boundnames=True, + timeseries=True) + period_four[0][0] = ((2, 4, 7), 'well_1_rate', 1, 2, 3, 'well_1') + period_four[0][1] = ((2, 3, 2), 'well_2_rate', 4, 5, 6, 'well_2') + period_four[0][2] = ((0, 11, 2), -10.0, 7, 8, 9, None) + period_four[0][3] = ((0, 2, 4), -20.0, 17, 18, 19, None) + period_four[0][4] = ((0, 13, 5), -40.0, 27, 28, 29, None) + stress_period_data = {} + stress_period_data[1] = period_two[0] + stress_period_data[2] = period_three[0] + stress_period_data[3] = period_four[0] + # well ts package + timeseries = [(0.0, 0.0, 0.0, 0.0), + (1.0, -200.0, 0.0, -100.0), + (11.0, -1800.0, -500.0, -200.0), + (21.0, -200.0, -400.0, -300.0), + (31.0, 0.0, -600.0, -400.0)] + ts_dict = {'filename': 'well-rates.ts', 'timeseries': timeseries, + 'time_series_namerecord': [('well_1_rate', 'well_2_rate', + 'well_3_rate')], + 'interpolation_methodrecord': [('stepwise', 'stepwise', + 'stepwise')]} + # test removing package with child packages + wel_package = ModflowGwfwel(model, print_input=True, print_flows=True, + auxiliary=[('var1', 'var2', 'var3')], + maxbound=5, + stress_period_data=stress_period_data, + boundnames=True, save_flows=True, + timeseries=ts_dict) + wel_package.remove() + wel_package = ModflowGwfwel(model, print_input=True, print_flows=True, + auxiliary=[('var1', 'var2', 'var3')], + maxbound=5, + stress_period_data=stress_period_data, + boundnames=True, save_flows=True, + timeseries=ts_dict) + + # test empty + evt_period = ModflowGwfevt.stress_period_data.empty(model, 150, nseg=3) + for col in range(0, 10): + for row in range(0, 15): + evt_period[0][col * 15 + row] = ( + ((0, row, col), 50.0, 0.0004, 10.0, 0.2, 0.5, 0.3, 0.1, None)) + evt_package = ModflowGwfevt(model, print_input=True, print_flows=True, + save_flows=True, maxbound=150, + nseg=3, stress_period_data=evt_period) + + # build ghb + ghb_period = {} + ghb_period_array = [] + for layer, cond in zip(range(1, 3), [15.0, 1500.0]): + for row in range(0, 15): + ghb_period_array.append( + ((layer, row, 9), 'tides', cond, 'Estuary-L2')) + ghb_period[0] = ghb_period_array + + # build ts ghb + ts_recarray = [] + fd = open(os.path.join(pth, 'tides.txt'), 'r') + for line in fd: + line_list = line.strip().split(',') + ts_recarray.append((float(line_list[0]), float(line_list[1]))) + ts_package_dict = {'filename':'tides.ts', + 'timeseries':ts_recarray, + 'time_series_namerecord':'tides', + 'interpolation_methodrecord':'linear'} + + obs_dict = {('ghb_obs.csv', 'binary'): [('ghb-2-6-10', 'GHB', (1, 5, 9)), + ('ghb-3-6-10', 'GHB', (2, 5, 9))], + 'ghb_flows.csv': [('Estuary2', 'GHB', 'Estuary-L2'), + ('Estuary3', 'GHB', 'Estuary-L3')], + 'filename': 'AdvGW_tidal.ghb.obs', 'digits': 10, + 'print_input': True} + + ghb_package = ModflowGwfghb(model, print_input=True, print_flows=True, + save_flows=True, boundnames=True, + timeseries=ts_package_dict, + observations=obs_dict, + maxbound=30, stress_period_data=ghb_period) + + riv_period = {} + riv_period_array = [((0, 2, 0), 'river_stage_1', 1001.0, 35.9, None), + ((0, 3, 1), 'river_stage_1', 1002.0, 35.8, None), + ((0, 4, 2), 'river_stage_1', 1003.0, 35.7, None), + ((0, 4, 3), 'river_stage_1', 1004.0, 35.6, None), + ((0, 5, 4), 'river_stage_1', 1005.0, 35.5, None), + ((0, 5, 5), 'river_stage_1', 1006.0, 35.4, 'riv1_c6'), + ((0, 5, 6), 'river_stage_1', 1007.0, 35.3, 'riv1_c7'), + ((0, 4, 7), 'river_stage_1', 1008.0, 35.2, None), + ((0, 4, 8), 'river_stage_1', 1009.0, 35.1, None), + ((0, 4, 9), 'river_stage_1', 1010.0, 35.0, None), + ((0, 9, 0), 'river_stage_2', 1001.0, 36.9, + 'riv2_upper'), + ((0, 8, 1), 'river_stage_2', 1002.0, 36.8, + 'riv2_upper'), + ((0, 7, 2), 'river_stage_2', 1003.0, 36.7, + 'riv2_upper'), + ((0, 6, 3), 'river_stage_2', 1004.0, 36.6, None), + ((0, 6, 4), 'river_stage_2', 1005.0, 36.5, None), + ((0, 5, 5), 'river_stage_2', 1006.0, 36.4, 'riv2_c6'), + ((0, 5, 6), 'river_stage_2', 1007.0, 36.3, 'riv2_c7'), + ((0, 6, 7), 'river_stage_2', 1008.0, 36.2, None), + ((0, 6, 8), 'river_stage_2', 1009.0, 36.1), + ((0, 6, 9), 'river_stage_2', 1010.0, 36.0)] + + riv_period[0] = riv_period_array + # riv time series + ts_data = [(0.0, 40.0, 41.0), (1.0, 41.0, 41.5), (2.0, 43.0, 42.0), + (3.0, 45.0, 42.8), (4.0, 44.0, 43.0), + (6.0, 43.0, 43.1), (9.0, 42.0, 42.4), (11.0, 41.0, 41.5), + (31.0, 40.0, 41.0)] + ts_dict = {'filename': 'river_stages.ts', 'timeseries': ts_data, + 'time_series_namerecord': [('river_stage_1', 'river_stage_2')], + 'interpolation_methodrecord': [('linear', 'stepwise')]} + # riv obs + obs_dict = {'riv_obs.csv': [('rv1-3-1', 'RIV', (0, 2, 0)), + ('rv1-4-2', 'RIV', (0, 3, 1)), + ('rv1-5-3', 'RIV', (0, 4, 2)), + ('rv1-5-4', 'RIV', (0, 4, 3)), + ('rv1-6-5', 'RIV', (0, 5, 4)), + ('rv1-c6', 'RIV', 'riv1_c6'), + ('rv1-c7', 'RIV', 'riv1_c7'), + ('rv2-upper', 'RIV', 'riv2_upper'), + ('rv-2-7-4', 'RIV', (0, 6, 3)), + ('rv2-8-5', 'RIV', (0, 6, 4)), + ('rv-2-9-6', 'RIV', (0, 5, 5,))], + 'riv_flowsA.csv': [('riv1-3-1', 'RIV', (0, 2, 0)), + ('riv1-4-2', 'RIV', (0, 3, 1)), + ('riv1-5-3', 'RIV', (0, 4, 2))], + 'riv_flowsB.csv': [('riv2-10-1', 'RIV', (0, 9, 0)), + ('riv-2-9-2', 'RIV', (0, 8, 1)), + ('riv2-8-3', 'RIV', (0, 7, 2))], + 'filename': 'AdvGW_tidal.riv.obs', 'digits': 10, + 'print_input': True} + + riv_package = ModflowGwfriv(model, print_input=True, print_flows=True, + save_flows=True, + boundnames=True, + timeseries=ts_dict, + maxbound=20, stress_period_data=riv_period, + observations=obs_dict) + + rch1_period = {} + rch1_period_array = [] + col_range = {0: 3, 1: 4, 2: 5} + for row in range(0, 15): + if row in col_range: + col_max = col_range[row] + else: + col_max = 6 + for col in range(0, col_max): + if (row == 3 and col == 5) or (row == 2 and col == 4) or ( + row == 1 and col == 3) or (row == 0 and col == 2): + mult = 0.5 + else: + mult = 1.0 + if row == 0 and col == 0: + bnd = 'rch-1-1' + elif row == 0 and col == 1: + bnd = 'rch-1-2' + elif row == 1 and col == 2: + bnd = 'rch-2-3' + else: + bnd = None + rch1_period_array.append(((0, row, col), 'rch_1', mult, bnd)) + rch1_period[0] = rch1_period_array + rch1_package = ModflowGwfrch(model, filename='AdvGW_tidal_1.rch', + pname='rch_1', fixed_cell=True, + auxiliary='MULTIPLIER', + auxmultname='MULTIPLIER', + print_input=True, print_flows=True, + save_flows=True, boundnames=True, + maxbound=84, stress_period_data=rch1_period) + ts_data = [(0.0, 0.0015), (1.0, 0.0010), (11.0, 0.0015), + (21.0, 0.0025), (31.0, 0.0015)] + rch1_package.ts.initialize(timeseries=ts_data, + filename='recharge_rates_1.ts', + time_series_namerecord='rch_1', + interpolation_methodrecord='stepwise') + + rch2_period = {} + rch2_period_array = [((0, 0, 2), 'rch_2', 0.5), ((0, 0, 3), 'rch_2', 1.0), + ((0, 0, 4), 'rch_2', 1.0), + ((0, 0, 5), 'rch_2', 1.0), ((0, 0, 6), 'rch_2', 1.0), + ((0, 0, 7), 'rch_2', 1.0), + ((0, 0, 8), 'rch_2', 1.0), ((0, 0, 9), 'rch_2', 0.5), + ((0, 1, 3), 'rch_2', 0.5), + ((0, 1, 4), 'rch_2', 1.0), ((0, 1, 5), 'rch_2', 1.0), + ((0, 1, 6), 'rch_2', 1.0), + ((0, 1, 7), 'rch_2', 1.0), ((0, 1, 8), 'rch_2', 0.5), + ((0, 2, 4), 'rch_2', 0.5), + ((0, 2, 5), 'rch_2', 1.0), ((0, 2, 6), 'rch_2', 1.0), + ((0, 2, 7), 'rch_2', 0.5), + ((0, 3, 5), 'rch_2', 0.5), ((0, 3, 6), 'rch_2', 0.5)] + rch2_period[0] = rch2_period_array + rch2_package = ModflowGwfrch(model, filename='AdvGW_tidal_2.rch', + pname='rch_2', fixed_cell=True, + auxiliary='MULTIPLIER', + auxmultname='MULTIPLIER', + print_input=True, print_flows=True, + save_flows=True, + maxbound=20, stress_period_data=rch2_period) + ts_data = [(0.0, 0.0016), (1.0, 0.0018), (11.0, 0.0019), + (21.0, 0.0016), (31.0, 0.0018)] + rch2_package.ts.initialize(timeseries=ts_data, + filename='recharge_rates_2.ts', + time_series_namerecord='rch_2', + interpolation_methodrecord='linear') + + rch3_period = {} + rch3_period_array = [] + col_range = {0: 9, 1: 8, 2: 7} + for row in range(0, 15): + if row in col_range: + col_min = col_range[row] + else: + col_min = 6 + for col in range(col_min, 10): + if (row == 0 and col == 9) or (row == 1 and col == 8) or ( + row == 2 and col == 7) or (row == 3 and col == 6): + mult = 0.5 + else: + mult = 1.0 + rch3_period_array.append(((0, row, col), 'rch_3', mult)) + rch3_period[0] = rch3_period_array + rch3_package = ModflowGwfrch(model, filename='AdvGW_tidal_3.rch', + pname='rch_3', fixed_cell=True, + auxiliary='MULTIPLIER', + auxmultname='MULTIPLIER', + print_input=True, print_flows=True, + save_flows=True, + maxbound=54, + stress_period_data=rch3_period) + ts_data = [(0.0, 0.0017), (1.0, 0.0020), (11.0, 0.0017), + (21.0, 0.0018), (31.0, 0.0020)] + rch3_package.ts.initialize(timeseries=ts_data, + filename='recharge_rates_3.ts', + time_series_namerecord='rch_3', + interpolation_methodrecord='linear') + + # change folder to save simulation + sim.simulation_data.mfpath.set_sim_path(run_folder) + + # write simulation to new location + sim.set_all_data_external() + sim.write_simulation() + + # run simulation + sim.run_simulation() + + # compare output to expected results + head_file = os.path.join(os.getcwd(), expected_head_file) + head_new = os.path.join(run_folder, 'AdvGW_tidal.hds') + outfile = os.path.join(run_folder, 'head_compare.dat') + assert pymake.compare_heads(None, None, files1=head_file, files2=head_new, + outfile=outfile) + + # test rename all + model.rename_all_packages('new_name') + assert model.name_file.filename == 'new_name.nam' + package_type_dict = {} + for package in model.packagelist: + if not package.package_type in package_type_dict: + assert package.filename == 'new_name.{}'.format(package.package_type) + package_type_dict[package.package_type] = 1 + sim.write_simulation() + name_file = os.path.join(run_folder, 'new_name.nam') + assert os.path.exists(name_file) + dis_file = os.path.join(run_folder, 'new_name.dis') + assert os.path.exists(dis_file) + + sim.rename_all_packages('all_files_same_name') + package_type_dict = {} + for package in model.packagelist: + if not package.package_type in package_type_dict: + assert package.filename == \ + 'all_files_same_name.{}'.format(package.package_type) + package_type_dict[package.package_type] = 1 + assert sim._tdis_file.filename == 'all_files_same_name.tdis' + for ims_file in sim._ims_files.values(): + assert ims_file.filename == 'all_files_same_name.ims' + sim.write_simulation() + name_file = os.path.join(run_folder, 'all_files_same_name.nam') + assert os.path.exists(name_file) + dis_file = os.path.join(run_folder, 'all_files_same_name.dis') + assert os.path.exists(dis_file) + tdis_file = os.path.join(run_folder, 'all_files_same_name.tdis') + assert os.path.exists(tdis_file) + + # load simulation + sim_load = MFSimulation.load(sim.name, 'mf6', exe_name, + sim.simulation_data.mfpath.get_sim_path(), + verbosity_level=0) + model = sim_load.get_model() + # confirm ghb obs data has two blocks with correct file names + ghb = model.get_package('ghb') + obs = ghb.obs + obs_data = obs.continuous.get_data() + found_flows = False + found_obs = False + for key, value in obs_data.items(): + if key.lower() == 'ghb_flows.csv': + # there should be only one + assert not found_flows + found_flows = True + if key.lower() == 'ghb_obs.csv': + # there should be only one + assert not found_obs + found_obs = True + assert found_flows and found_obs + + # clean up + sim.delete_output_files() + + # check packages + chk = sim.check() + summary = '.'.join(chk[0].summary_array.desc) + assert summary == '' + + return + + +def test004_bcfss(): + # init paths + test_ex_name = 'test004_bcfss' + model_name = 'bcf2ss' + + pth = os.path.join('..', 'examples', 'data', 'mf6', 'create_tests', + test_ex_name) + run_folder = os.path.join(cpth, test_ex_name) + if not os.path.isdir(run_folder): + os.makedirs(run_folder) + + expected_output_folder = os.path.join(pth, 'expected_output') + expected_head_file = os.path.join(expected_output_folder, 'bcf2ss.hds') + + # create simulation + sim = MFSimulation(sim_name=model_name, version='mf6', exe_name=exe_name, + sim_ws=pth) + tdis_rc = [(1.0, 1, 1.0), (1.0, 1, 1.0)] + tdis_package = ModflowTdis(sim, time_units='DAYS', nper=2, + perioddata=tdis_rc) + model = ModflowGwf(sim, modelname=model_name, + model_nam_file='{}.nam'.format(model_name)) + ims_package = ModflowIms(sim, print_option='ALL', + csv_output_filerecord='bcf2ss.ims.csv', + complexity='SIMPLE', + outer_hclose=0.000001, outer_maximum=500, + under_relaxation='NONE', inner_maximum=100, + inner_hclose=0.000001, rcloserecord=0.001, + linear_acceleration='CG', + scaling_method='NONE', reordering_method='NONE', + relaxation_factor=0.97) + sim.register_ims_package(ims_package, [model.name]) + dis_package = ModflowGwfdis(model, nlay=2, nrow=10, ncol=15, delr=500.0, + delc=500.0, + top=150.0, botm=[50.0, -50.0], + filename='{}.dis'.format(model_name)) + ic_package = ModflowGwfic(model, strt=0.0, + filename='{}.ic'.format(model_name)) + wetdry_data = [] + for row in range(0, 10): + if row == 2 or row == 7: + wetdry_data += [2.0, 2.0, 2.0, -2.0, 2.0, 2.0, 2.0, 2.0] + else: + wetdry_data += [2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0] + wetdry_data += [-2.0, -2.0, -2.0, -2.0, -2.0, -2.0, -2.0] + for row in range(0, 10): + for col in range(0, 15): + wetdry_data.append(0.0) + npf_package = ModflowGwfnpf(model, rewet_record=[ + ('WETFCT', 1.0, 'IWETIT', 1, 'IHDWET', 0)], + save_flows=True, icelltype=[1, 0], + wetdry=wetdry_data, k=[10.0, 5.0], + k33=0.1) + oc_package = ModflowGwfoc(model, budget_filerecord='bcf2ss.cbb', + head_filerecord='bcf2ss.hds', + headprintrecord=[('COLUMNS', 15, 'WIDTH', 12, + 'DIGITS', 2, 'GENERAL')], + saverecord=[('HEAD', 'ALL'), ('BUDGET', 'ALL')], + printrecord=[('HEAD', 'ALL'), ('BUDGET', 'ALL')]) + aux = {0: [[50.0], [1.3]], 1: [[200.0], [1.5]]} + # aux = {0: [[100.0], [2.3]]} + rch_package = ModflowGwfrcha(model, readasarrays=True, save_flows=True, + auxiliary=[('var1', 'var2')], + recharge={0: 0.004}, aux=aux) # *** test if aux works *** + + # aux tests + aux_out = rch_package.aux.get_data() + assert(aux_out[0][0][0,0] == 50.) + assert(aux_out[0][1][0,0] == 1.3) + assert(aux_out[1][0][0,0] == 200.0) + assert(aux_out[1][1][0,0] == 1.5) + + riv_period = {} + riv_period_array = [] + for row in range(0, 10): + riv_period_array.append(((1, row, 14), 0.0, 10000.0, -5.0)) + riv_period[0] = riv_period_array + riv_package = ModflowGwfriv(model, save_flows='bcf2ss.cbb', maxbound=10, + stress_period_data=riv_period) + + wel_period = {} + stress_period_data = [((1, 2, 3), -35000.0, 1, 2, 3), + ((1, 7, 3), -35000.0, 4, 5, 6)] + wel_period[1] = stress_period_data + wel_package = ModflowGwfwel(model, print_input=True, print_flows=True, + save_flows=True, + auxiliary=[('var1', 'var2', 'var3')], + maxbound=2, + stress_period_data=wel_period) + + # change folder to save simulation + sim.simulation_data.mfpath.set_sim_path(run_folder) + + # write simulation to new location + sim.set_all_data_external() + sim.write_simulation() + + # run simulation + if run: + sim.run_simulation() + + # compare output to expected results + head_file = os.path.join(os.getcwd(), expected_head_file) + head_new = os.path.join(run_folder, 'bcf2ss.hds') + outfile = os.path.join(run_folder, 'head_compare.dat') + assert pymake.compare_heads(None, None, files1=head_file, files2=head_new, + outfile=outfile) + + # clean up + sim.delete_output_files() + + return + + +def test035_fhb(): + # init paths + test_ex_name = 'test035_fhb' + model_name = 'fhb2015' + + pth = os.path.join('..', 'examples', 'data', 'mf6', 'create_tests', + test_ex_name) + run_folder = os.path.join(cpth, test_ex_name) + if not os.path.isdir(run_folder): + os.makedirs(run_folder) + + expected_output_folder = os.path.join(pth, 'expected_output') + expected_head_file = os.path.join(expected_output_folder, + 'fhb2015_fhb.hds') + + # create simulation + sim = MFSimulation(sim_name=model_name, version='mf6', exe_name=exe_name, + sim_ws=pth) + tdis_rc = [(400.0, 10, 1.0), (200.0, 4, 1.0), (400.0, 6, 1.1)] + tdis_package = ModflowTdis(sim, time_units='DAYS', nper=3, + perioddata=tdis_rc) + model = ModflowGwf(sim, modelname=model_name, + model_nam_file='{}.nam'.format(model_name)) + ims_package = ModflowIms(sim, print_option='SUMMARY', complexity='SIMPLE', + outer_hclose=0.001, + outer_maximum=120, under_relaxation='NONE', + inner_maximum=100, inner_hclose=0.0001, + rcloserecord=0.1, linear_acceleration='CG', + preconditioner_levels=7, + preconditioner_drop_tolerance=0.001, + number_orthogonalizations=2) + sim.register_ims_package(ims_package, [model.name]) + dis_package = ModflowGwfdis(model, length_units='UNDEFINED', nlay=1, + nrow=3, ncol=10, delr=1000.0, + delc=1000.0, top=50.0, botm=-200.0, + filename='{}.dis'.format(model_name)) + ic_package = ModflowGwfic(model, strt=0.0, + filename='{}.ic'.format(model_name)) + npf_package = ModflowGwfnpf(model, perched=True, icelltype=0, k=20.0, + k33=1.0) + oc_package = ModflowGwfoc(model, head_filerecord='fhb2015_fhb.hds', + headprintrecord=[('COLUMNS', 20, 'WIDTH', 5, + 'DIGITS', 2, 'FIXED')], + saverecord={0: [('HEAD', 'ALL')], + 2: [('HEAD', 'ALL')]}, + printrecord={ + 0: [('HEAD', 'ALL'), ('BUDGET', 'ALL')], + 2: [('HEAD', 'ALL'), ('BUDGET', 'ALL')]}) + sto_package = ModflowGwfsto(model, storagecoefficient=True, iconvert=0, + ss=0.01, sy=0.0) + time = model.modeltime + assert (time.steady_state[0] == False and time.steady_state[1] == False + and time.steady_state[2] == False) + wel_period = {0: [((0, 1, 0), 'flow')]} + wel_package = ModflowGwfwel(model, print_input=True, print_flows=True, + save_flows=True, + maxbound=1, stress_period_data=wel_period) + well_ts = [(0.0, 2000.0), (307.0, 6000.0), (791.0, 5000.0), + (1000.0, 9000.0)] + wel_package.ts.initialize(filename='fhb_flow.ts', timeseries=well_ts, + time_series_namerecord='flow', + interpolation_methodrecord='linear') + + chd_period = { + 0: [((0, 0, 9), 'head'), ((0, 1, 9), 'head'), ((0, 2, 9), 'head')]} + chd_package = ModflowGwfchd(model, print_input=True, print_flows=True, + save_flows=True, maxbound=3, + stress_period_data=chd_period) + chd_ts = [(0.0, 0.0), (307.0, 1.0), (791.0, 5.0), (1000.0, 2.0)] + chd_package.ts.initialize(filename='fhb_head.ts', timeseries=chd_ts, + time_series_namerecord='head', + interpolation_methodrecord='linearend') + + # change folder to save simulation + sim.simulation_data.mfpath.set_sim_path(run_folder) + + # write simulation to new location + sim.set_all_data_external() + sim.write_simulation() + + # run simulation + if run: + sim.run_simulation() + + # compare output to expected results + head_file = os.path.join(os.getcwd(), expected_head_file) + head_new = os.path.join(run_folder, 'fhb2015_fhb.hds') + outfile = os.path.join(run_folder, 'head_compare.dat') + assert pymake.compare_heads(None, None, files1=head_file, files2=head_new, + outfile=outfile) + + # clean up + sim.delete_output_files() + + return + + +def test006_gwf3_disv(): + # init paths + test_ex_name = 'test006_gwf3_disv' + model_name = 'flow' + + pth = os.path.join('..', 'examples', 'data', 'mf6', 'create_tests', + test_ex_name) + run_folder = os.path.join(cpth, test_ex_name) + if not os.path.isdir(run_folder): + os.makedirs(run_folder) + + expected_output_folder = os.path.join(pth, 'expected_output') + expected_head_file = os.path.join(expected_output_folder, 'flow.hds') + + # create simulation + sim = MFSimulation(sim_name=test_ex_name, version='mf6', exe_name=exe_name, + sim_ws=pth) + tdis_rc = [(1.0, 1, 1.0)] + tdis_package = ModflowTdis(sim, time_units='DAYS', nper=1, + perioddata=tdis_rc) + model = ModflowGwf(sim, modelname=model_name, + model_nam_file='{}.nam'.format(model_name)) + ims_package = ModflowIms(sim, print_option='SUMMARY', + outer_hclose=0.00000001, + outer_maximum=1000, under_relaxation='NONE', + inner_maximum=1000, + inner_hclose=0.00000001, rcloserecord=0.01, + linear_acceleration='BICGSTAB', + scaling_method='NONE', reordering_method='NONE', + relaxation_factor=0.97) + sim.register_ims_package(ims_package, [model.name]) + vertices = testutils.read_vertices(os.path.join(pth, 'vertices.txt')) + c2drecarray = testutils.read_cell2d(os.path.join(pth, 'cell2d.txt')) + disv_package = ModflowGwfdisv(model, ncpl=121, nlay=1, nvert=148, top=0.0, + botm=-100.0, idomain=1, + vertices=vertices, cell2d=c2drecarray, + filename='{}.disv'.format(model_name)) + strt_list = [1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, + 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0] + ic_package = ModflowGwfic(model, strt=strt_list, + filename='{}.ic'.format(model_name)) + k = {'filename': 'k.bin', 'factor': 1.0, 'data': 1.0, 'binary': 'True'} + npf_package = ModflowGwfnpf(model, save_flows=True, icelltype=0, k=k, + k33=1.0) + k_data = npf_package.k.get_data() + assert(k_data[0,0] == 1.0) + + oc_package = ModflowGwfoc(model, budget_filerecord='flow.cbc', + head_filerecord='flow.hds', + saverecord=[('HEAD', 'ALL'), ('BUDGET', 'ALL')], + printrecord=[('HEAD', 'ALL'), ('BUDGET', 'ALL')]) + + # build stress_period_data for chd package + set_1 = [0, 7, 14, 18, 22, 26, 33] + set_2 = [6, 13, 17, 21, 25, 32, 39] + stress_period_data = [] + for value in set_1: + stress_period_data.append(((0, value), 1.0)) + for value in set_2: + stress_period_data.append(((0, value), 0.0)) + chd_package = ModflowGwfchd(model, print_input=True, print_flows=True, + save_flows=True, maxbound=14, + stress_period_data=stress_period_data) + + period_rch = {} + rch_array = [] + for val in range(0, 10): + rch_array.append(((0, val), 0.0)) + period_rch[0] = rch_array + rch_package = ModflowGwfrch(model, fixed_cell=True, maxbound=10, + stress_period_data=period_rch) + + gncrecarray = [((0, 9), (0, 40), (0, 8), 0.333333333333), + ((0, 9), (0, 42), (0, 10), 0.333333333333), + ((0, 10), (0, 43), (0, 9), 0.333333333333), + ((0, 10), (0, 45), (0, 11), 0.333333333333), + ((0, 11), (0, 46), (0, 10), 0.333333333333), + ((0, 11), (0, 48), (0, 12), 0.333333333333), + ((0, 15), (0, 40), (0, 8), 0.333333333333), + ((0, 15), (0, 58), (0, 19), 0.333333333333), + ((0, 16), (0, 48), (0, 12), 0.333333333333), + ((0, 16), (0, 66), (0, 20), 0.333333333333), + ((0, 19), (0, 67), (0, 15), 0.333333333333), + ((0, 19), (0, 85), (0, 23), 0.333333333333), + ((0, 20), (0, 75), (0, 16), 0.333333333333), + ((0, 20), (0, 93), (0, 24), 0.333333333333), + ((0, 23), (0, 94), (0, 19), 0.333333333333), + ((0, 23), (0, 112), (0, 27), 0.333333333333), + ((0, 24), (0, 102), (0, 20), 0.333333333333), + ((0, 24), (0, 120), (0, 31), 0.333333333333), + ((0, 28), (0, 112), (0, 27), 0.333333333333), + ((0, 28), (0, 114), (0, 29), 0.333333333333), + ((0, 29), (0, 115), (0, 28), 0.333333333333), + ((0, 29), (0, 117), (0, 30), 0.333333333333), + ((0, 30), (0, 118), (0, 29), 0.333333333333), + ((0, 30), (0, 120), (0, 31), 0.333333333333)] + gnc_package = ModflowGwfgnc(model, print_input=True, print_flows=True, + numgnc=24, numalphaj=1, + gncdata=gncrecarray) + + # change folder to save simulation + sim.simulation_data.mfpath.set_sim_path(run_folder) + + # write simulation to new location + sim.write_simulation() + + # run simulation + if run: + sim.run_simulation() + + # compare output to expected results + head_file = os.path.join(os.getcwd(), expected_head_file) + head_new = os.path.join(run_folder, 'flow.hds') + outfile = os.path.join(run_folder, 'head_compare.dat') + assert pymake.compare_heads(None, None, files1=head_file, files2=head_new, + outfile=outfile) + + # export to netcdf - temporarily disabled + #model.export(os.path.join(run_folder, "test006_gwf3.nc")) + # export to shape file + model.export(os.path.join(run_folder, "test006_gwf3.shp")) + + # clean up + sim.delete_output_files() + + return + + +def test006_2models_gnc(): + # init paths + test_ex_name = 'test006_2models_gnc' + model_name_1 = 'model1' + model_name_2 = 'model2' + + pth = os.path.join('..', 'examples', 'data', 'mf6', 'create_tests', + test_ex_name) + run_folder = os.path.join(cpth, test_ex_name) + if not os.path.isdir(run_folder): + os.makedirs(run_folder) + + expected_output_folder = os.path.join(pth, 'expected_output') + expected_head_file_1 = os.path.join(expected_output_folder, 'model1.hds') + expected_head_file_2 = os.path.join(expected_output_folder, 'model2.hds') + + # create simulation + sim = MFSimulation(sim_name=test_ex_name, version='mf6', exe_name=exe_name, + sim_ws=pth) + tdis_rc = [(1.0, 1, 1.0)] + tdis_package = ModflowTdis(sim, time_units='DAYS', nper=1, + perioddata=tdis_rc) + model_1 = ModflowGwf(sim, modelname=model_name_1, + model_nam_file='{}.nam'.format(model_name_1)) + model_2 = ModflowGwf(sim, modelname=model_name_2, + model_nam_file='{}.nam'.format(model_name_2)) + ims_package = ModflowIms(sim, print_option='SUMMARY', + outer_hclose=0.00000001, + outer_maximum=1000, under_relaxation='NONE', + inner_maximum=1000, + inner_hclose=0.00000001, rcloserecord=0.01, + linear_acceleration='BICGSTAB', + scaling_method='NONE', reordering_method='NONE', + relaxation_factor=0.97) + sim.register_ims_package(ims_package, [model_1.name, model_2.name]) + idom = [1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, + 1, 1, 0, 0, 0, 1, 1, + 1, 1, 0, 0, 0, 1, 1, + 1, 1, 0, 0, 0, 1, 1, + 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, ] + dis_package_1 = ModflowGwfdis(model_1, length_units='METERS', nlay=1, + nrow=7, ncol=7, idomain=idom, + delr=100.0, delc=100.0, top=0.0, botm=-100.0, + filename='{}.dis'.format(model_name_1)) + dis_package_2 = ModflowGwfdis(model_2, length_units='METERS', nlay=1, + nrow=9, ncol=9, delr=33.33, + delc=33.33, top=0.0, botm=-100.0, + filename='{}.dis'.format(model_name_2)) + + strt_list = [1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, + 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, + 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, + 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, + 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, + 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, + 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, ] + ic_package_1 = ModflowGwfic(model_1, strt=strt_list, + filename='{}.ic'.format(model_name_1)) + ic_package_2 = ModflowGwfic(model_2, strt=1.0, + filename='{}.ic'.format(model_name_2)) + npf_package_1 = ModflowGwfnpf(model_1, save_flows=True, perched=True, + icelltype=0, k=1.0, k33=1.0) + npf_package_2 = ModflowGwfnpf(model_2, save_flows=True, perched=True, + icelltype=0, k=1.0, k33=1.0) + oc_package_1 = ModflowGwfoc(model_1, budget_filerecord='model1.cbc', + head_filerecord='model1.hds', + saverecord=[('HEAD', 'ALL'), + ('BUDGET', 'ALL')], + printrecord=[('HEAD', 'ALL'), + ('BUDGET', 'ALL')]) + oc_package_2 = ModflowGwfoc(model_2, budget_filerecord='model2.cbc', + head_filerecord='model2.hds', + saverecord=[('HEAD', 'ALL'), + ('BUDGET', 'ALL')], + printrecord=[('HEAD', 'ALL'), + ('BUDGET', 'ALL')]) + + # build periodrecarray for chd package + set_1 = [0, 7, 14, 18, 22, 26, 33] + set_2 = [6, 13, 17, 21, 25, 32, 39] + stress_period_data = [] + for value in range(0, 7): + stress_period_data.append(((0, value, 0), 1.0)) + for value in range(0, 7): + stress_period_data.append(((0, value, 6), 0.0)) + chd_package = ModflowGwfchd(model_1, print_input=True, print_flows=True, + save_flows=True, maxbound=30, + stress_period_data=stress_period_data) + + gncrecarray = testutils.read_gncrecarray(os.path.join(pth, 'gnc.txt')) + # test gnc delete + new_gncrecarray = gncrecarray[10:] + gnc_package = ModflowGwfgnc(sim, print_input=True, print_flows=True, + numgnc=26, numalphaj=1, + gncdata=new_gncrecarray) + sim.remove_package(gnc_package.package_type) + + gnc_package = ModflowGwfgnc(sim, print_input=True, print_flows=True, + numgnc=36, numalphaj=1, + gncdata=gncrecarray) + + exgrecarray = testutils.read_exchangedata(os.path.join(pth, 'exg.txt')) + + # build obs dictionary + gwf_obs = {('gwfgwf_obs.csv'): [('gwf-1-3-2_1-1-1', 'flow-ja-face', + (0, 2, 1), (0, 0, 0)), + ('gwf-1-3-2_1-2-1', 'flow-ja-face', + (0, 2, 1), (0, 1, 0))]} + + # test exg delete + newexgrecarray = exgrecarray[10:] + exg_package = ModflowGwfgwf(sim, print_input=True, print_flows=True, + save_flows=True, auxiliary='testaux', + gnc_filerecord='test006_2models_gnc.gnc', + nexg=26, exchangedata=newexgrecarray, + exgtype='gwf6-gwf6', exgmnamea=model_name_1, + exgmnameb=model_name_2) + sim.remove_package(exg_package.package_type) + + exg_package = ModflowGwfgwf(sim, print_input=True, print_flows=True, + save_flows=True, auxiliary='testaux', + gnc_filerecord='test006_2models_gnc.gnc', + nexg=36, exchangedata=exgrecarray, + exgtype='gwf6-gwf6', exgmnamea=model_name_1, + exgmnameb=model_name_2, observations=gwf_obs) + + # change folder to save simulation + sim.simulation_data.mfpath.set_sim_path(run_folder) + + # write simulation to new location + sim.set_all_data_external() + sim.write_simulation() + + # run simulation + if run: + sim.run_simulation() + + # compare output to expected results + head_file = os.path.join(os.getcwd(), expected_head_file_1) + head_new = os.path.join(run_folder, 'model1.hds') + outfile = os.path.join(run_folder, 'head_compare.dat') + assert pymake.compare_heads(None, None, files1=head_file, files2=head_new, + outfile=outfile) + + # compare output to expected results + head_file = os.path.join(os.getcwd(), expected_head_file_2) + head_new = os.path.join(run_folder, 'model2.hds') + outfile = os.path.join(run_folder, 'head_compare.dat') + assert pymake.compare_heads(None, None, files1=head_file, files2=head_new, + outfile=outfile) + + # clean up + sim.delete_output_files() + + return + + +def test050_circle_island(): + # init paths + test_ex_name = 'test050_circle_island' + model_name = 'ci' + + pth = os.path.join('..', 'examples', 'data', 'mf6', 'create_tests', + test_ex_name) + run_folder = os.path.join(cpth, test_ex_name) + if not os.path.isdir(run_folder): + os.makedirs(run_folder) + + expected_output_folder = os.path.join(pth, 'expected_output') + expected_head_file = os.path.join(expected_output_folder, 'ci.output.hds') + + # create simulation + sim = MFSimulation(sim_name=test_ex_name, version='mf6', exe_name=exe_name, + sim_ws=pth) + tdis_rc = [(1.0, 1, 1.0)] + tdis_package = ModflowTdis(sim, time_units='DAYS', nper=1, + perioddata=tdis_rc) + model = ModflowGwf(sim, modelname=model_name, + model_nam_file='{}.nam'.format(model_name)) + ims_package = ModflowIms(sim, print_option='SUMMARY', + outer_hclose=0.000001, + outer_maximum=500, under_relaxation='NONE', + inner_maximum=1000, + inner_hclose=0.000001, rcloserecord=0.000001, + linear_acceleration='BICGSTAB', + relaxation_factor=0.0) + sim.register_ims_package(ims_package, [model.name]) + vertices = testutils.read_vertices(os.path.join(pth, 'vertices.txt')) + c2drecarray = testutils.read_cell2d(os.path.join(pth, 'cell2d.txt')) + disv_package = ModflowGwfdisv(model, ncpl=5240, nlay=2, nvert=2778, + top=0.0, botm=[-20.0, -40.0], + idomain=1, vertices=vertices, + cell2d=c2drecarray, + filename='{}.disv'.format(model_name)) + ic_package = ModflowGwfic(model, strt=0.0, + filename='{}.ic'.format(model_name)) + npf_package = ModflowGwfnpf(model, save_flows=True, icelltype=0, k=10.0, + k33=0.2) + oc_package = ModflowGwfoc(model, budget_filerecord='ci.output.cbc', + head_filerecord='ci.output.hds', + saverecord=[('HEAD', 'ALL'), ('BUDGET', 'ALL')], + printrecord=[('HEAD', 'ALL'), ('BUDGET', 'ALL')]) + + stress_period_data = testutils.read_ghbrecarray( + os.path.join(pth, 'ghb.txt'), 2) + ghb_package = ModflowGwfghb(model, maxbound=3173, + stress_period_data=stress_period_data) + + rch_data = ['OPEN/CLOSE', 'rech.dat', 'FACTOR', 1.0, 'IPRN', 0] + rch_package = ModflowGwfrcha(model, readasarrays=True, + save_flows=True, recharge=rch_data) + + # change folder to save simulation + sim.simulation_data.mfpath.set_sim_path(run_folder) + + # write simulation to new location + sim.set_all_data_external() + sim.write_simulation() + + # run simulation + if run: + sim.run_simulation() + + # compare output to expected results + head_file = os.path.join(os.getcwd(), expected_head_file) + head_new = os.path.join(run_folder, 'ci.output.hds') + outfile = os.path.join(run_folder, 'head_compare.dat') + assert pymake.compare_heads(None, None, files1=head_file, files2=head_new, + outfile=outfile) + + # clean up + sim.delete_output_files() + + return + + +def test028_sfr(): + # init paths + test_ex_name = 'test028_sfr' + model_name = 'test1tr' + + pth = os.path.join('..', 'examples', 'data', 'mf6', 'create_tests', + test_ex_name) + run_folder = os.path.join(cpth, test_ex_name) + if not os.path.isdir(run_folder): + os.makedirs(run_folder) + + expected_output_folder = os.path.join(pth, 'expected_output') + expected_head_file = os.path.join(expected_output_folder, 'test1tr.hds') + + # create simulation + sim = MFSimulation(sim_name=test_ex_name, version='mf6', exe_name=exe_name, + sim_ws=pth) + sim.name_file.continue_.set_data(True) + tdis_rc = [(1577889000, 50, 1.1), (1577889000, 50, 1.1)] + tdis_package = ModflowTdis(sim, time_units='SECONDS', nper=2, + perioddata=tdis_rc, filename='simulation.tdis') + model = ModflowGwf(sim, modelname=model_name, + model_nam_file='{}.nam'.format(model_name)) + model.name_file.save_flows.set_data(True) + ims_package = ModflowIms(sim, print_option='SUMMARY', outer_hclose=0.00001, + outer_maximum=100, under_relaxation='DBD', + under_relaxation_theta=0.85, + under_relaxation_kappa=0.0001, + under_relaxation_gamma=0.0, + under_relaxation_momentum=0.1, + backtracking_number=0, backtracking_tolerance=1.1, + backtracking_reduction_factor=0.7, + backtracking_residual_limit=1.0, + inner_hclose=0.00001, rcloserecord=0.1, + inner_maximum=100, linear_acceleration='CG', + scaling_method='NONE', reordering_method='NONE', + relaxation_factor=0.99, + filename='model.ims') + sim.register_ims_package(ims_package, [model.name]) + top = testutils.read_std_array(os.path.join(pth, 'top.txt'), 'float') + botm = testutils.read_std_array(os.path.join(pth, 'botm.txt'), 'float') + idomain = testutils.read_std_array(os.path.join(pth, 'idomain.txt'), 'int') + dis_package = ModflowGwfdis(model, length_units='FEET', nlay=1, nrow=15, + ncol=10, delr=5000.0, delc=5000.0, + top=top, botm=botm, idomain=idomain, + filename='{}.dis'.format(model_name)) + strt = testutils.read_std_array(os.path.join(pth, 'strt.txt'), 'float') + strt_int = ['internal', 'factor', 1.0, 'iprn', 0, strt] + ic_package = ModflowGwfic(model, strt=strt_int, + filename='{}.ic'.format(model_name)) + + k_vals = testutils.read_std_array(os.path.join(pth, 'k.txt'), 'float') + k = ['internal', 'factor', 3.000E-03, 'iprn', 0, k_vals] + npf_package = ModflowGwfnpf(model, icelltype=1, k=k, k33=1.0) + npf_package.k.factor = 2.000E-04 + + oc_package = ModflowGwfoc(model, budget_filerecord='test1tr.cbc', + head_filerecord='test1tr.hds', + saverecord={0: [('HEAD', 'FREQUENCY', 5), + ('BUDGET', 'FREQUENCY', 5)]}, + printrecord={0: [('HEAD', 'FREQUENCY', 5), + ('BUDGET', 'FREQUENCY', 5)]}) + + sy_vals = testutils.read_std_array(os.path.join(pth, 'sy.txt'), 'float') + sy = {'factor': 0.2, 'iprn': 0, 'data': sy_vals} + sto_package = ModflowGwfsto(model, iconvert=1, ss=1.0E-6, sy=sy) + + surf = testutils.read_std_array(os.path.join(pth, 'surface.txt'), 'float') + surf_data = ['internal', 'factor', 1.0, 'iprn', -1, surf] + + # build time array series + tas = {0.0: 9.5E-08, 6.0E09: 9.5E-08, + 'filename': 'test028_sfr.evt.tas', + 'time_series_namerecord': 'evtarray_1', + 'interpolation_methodrecord': 'LINEAR'} + + evt_package = ModflowGwfevta(model, readasarrays=True, timearrayseries=tas, + surface=surf_data, depth=15.0, + rate='TIMEARRAYSERIES evtarray_1', + filename='test1tr.evt') + # attach obs package to evt + obs_dict = {'test028_sfr.evt.csv': [('obs-1', 'EVT', (0, 1, 5)), + ('obs-2', 'EVT', (0, 2, 3))]} + evt_package.obs.initialize(filename='test028_sfr.evt.obs', print_input=True, + continuous=obs_dict) + + stress_period_data = { + 0: [((0, 12, 0), 988.0, 0.038), ((0, 13, 8), 1045.0, 0.038)]} + ghb_package = ModflowGwfghb(model, maxbound=2, + stress_period_data=stress_period_data) + + rch = testutils.read_std_array(os.path.join(pth, 'recharge.txt'), 'float') + # test empty + rch_data = ModflowGwfrcha.recharge.empty(model) + rch_data[0]['data'] = rch + rch_data[0]['factor'] = 5.000E-10 + rch_data[0]['iprn'] = -1 + rch_package = ModflowGwfrcha(model, readasarrays=True, recharge=rch_data, + filename='test1tr.rch') + + sfr_rec = testutils.read_sfr_rec(os.path.join(pth, 'sfr_rec.txt'), 3) + reach_con_rec = testutils.read_reach_con_rec( + os.path.join(pth, 'sfr_reach_con_rec.txt')) + reach_div_rec = testutils.read_reach_div_rec( + os.path.join(pth, 'sfr_reach_div_rec.txt')) + reach_per_rec = testutils.read_reach_per_rec( + os.path.join(pth, 'sfr_reach_per_rec.txt')) + # test zero based indexes + reach_con_rec[0] = (0, -0.0) + sfr_package = ModflowGwfsfr(model, unit_conversion=1.486, + stage_filerecord='test1tr.sfr.stage.bin', + budget_filerecord='test1tr.sfr.cbc', + nreaches=36, packagedata=sfr_rec, + connectiondata=reach_con_rec, + diversions=reach_div_rec, + perioddata={0: reach_per_rec}) + assert (sfr_package.connectiondata.get_data()[0][1] == -0.0) + assert (sfr_package.connectiondata.get_data()[1][1] == 0.0) + assert (sfr_package.connectiondata.get_data()[2][1] == 1.0) + assert (sfr_package.packagedata.get_data()[1][1].lower() == 'none') + + sim.simulation_data.mfpath.set_sim_path(run_folder) + sim.write_simulation() + sim.load(sim_name=test_ex_name, version='mf6', exe_name=exe_name, + sim_ws=run_folder) + model = sim.get_model(model_name) + sfr_package = model.get_package('sfr') + # sfr_package.set_all_data_external() + assert (sfr_package.connectiondata.get_data()[0][1] == -0.0) + assert (sfr_package.connectiondata.get_data()[1][1] == 0.0) + assert (sfr_package.connectiondata.get_data()[2][1] == 1.0) + pdata = sfr_package.packagedata.get_data() + assert (sfr_package.packagedata.get_data()[1][1].lower() == 'none') + + # undo zero based test and move on + model.remove_package(sfr_package.package_type) + reach_con_rec = testutils.read_reach_con_rec( + os.path.join(pth, 'sfr_reach_con_rec.txt')) + + # set sfr settings back to expected package data + rec_line = (sfr_rec[1][0], (0, 1, 1)) + sfr_rec[1][2:] + sfr_rec[1] = rec_line + + sfr_package = ModflowGwfsfr(model, unit_conversion=1.486, + stage_filerecord='test1tr.sfr.stage.bin', + budget_filerecord='test1tr.sfr.cbc', + nreaches=36, packagedata=sfr_rec, + connectiondata=reach_con_rec, + diversions=reach_div_rec, + perioddata={0: reach_per_rec}) + + obs_data_1 = testutils.read_obs(os.path.join(pth, 'sfr_obs_1.txt')) + obs_data_2 = testutils.read_obs(os.path.join(pth, 'sfr_obs_2.txt')) + obs_data_3 = testutils.read_obs(os.path.join(pth, 'sfr_obs_3.txt')) + obs_data = {'test1tr.sfr.csv': obs_data_1, + 'test1tr.sfr.qaq.csv': obs_data_2, + 'test1tr.sfr.flow.csv': obs_data_3} + sfr_package.obs.initialize(filename='test1tr.sfr.obs', digits=10, + print_input=True, continuous=obs_data) + + wells = testutils.read_wells(os.path.join(pth, 'well.txt')) + wel_package = ModflowGwfwel(model, boundnames=True, maxbound=10, + stress_period_data={0: wells, 1: [()]}) + + # write simulation to new location + sim.write_simulation() + + # run simulation + if run: + sim.run_simulation() + + # compare output to expected results + head_file = os.path.join(os.getcwd(), expected_head_file) + head_new = os.path.join(run_folder, 'test1tr.hds') + outfile = os.path.join(run_folder, 'head_compare.dat') + assert pymake.compare_heads(None, None, files1=head_file, files2=head_new, + outfile=outfile) + + # clean up + sim.delete_output_files() + + return + + +if __name__ == '__main__': + np001() + np002() + test004_bcfss() + test005_advgw_tidal() + test006_2models_gnc() + test006_gwf3_disv() + test021_twri() + test028_sfr() + test035_fhb() + test050_circle_island() diff --git a/docs/mf6_dev_guide.md b/docs/mf6_dev_guide.md index ae0b95db0a..61c364d3de 100644 --- a/docs/mf6_dev_guide.md +++ b/docs/mf6_dev_guide.md @@ -1,48 +1,48 @@ -Introduction ------------------------------------------------ - -This file provides an overview of how FloPy for MODFLOW 6 (FPMF6) works under the hood and is intended for anyone who wants to add a new package, new model type, or new features to this library. FloPy library files that support MODFLOW 6 can be found in the flopy/mf6 folder and sub-folders. - -Package Meta-Data and Package Files ------------------------------------------------ - -FPMF6 uses meta-data files located in flopy/mf6/data/dfn to define the model and package types supported by MODFLOW 6. When additional model and package types are added to MODFLOW 6, additional meta-data files can be added to this folder and flopy/mf6/utils/createpackages.py can be run to add new packages to the FloPy library. createpackages.py uses flopy/mf6/data/mfstructure.py to read meta-data files (*.dfn) and use that meta-data to create the package files found in flopy/mf6/modflow (do not directly modify any of the files in this folder, they are all automatically generated). The automatically generated package files contain an interface for accessing package data and data documentation generated from the meta-data files. Additionally, meta-data describing package data types and shapes is stored in the dfn attribute. flopy/mf6/data/mfstructure.py can load structure information using the dfn attribute (instead of loading it from the meta-data files). This allows for flopy to be installed without the dfn files. - -All meta-data can be accessed from the flopy.mf6.data.mfstructure.MFStructure class. This is a singleton class, meaning only one instance of this class can be created. The class contains a sim_struct attribute (which is a flopy.mf6.data.mfstructure.MFSimulationStructure object) which contains all of the meta-data for all package files. Meta-data is stored in a structured format. MFSimulationStructure contains MFModelStructure and MFInputFileStructure objects, which contain the meta-data for each model type and each "simulation-level" package (tdis, ims, ...). MFModelStructure contains model specific meta-data and a MFInputFileStructure object for each package in that model. MFInputFileStructure contains package specific meta-data and a MFBlockStructure object for each block contained in the package file. MFBlockStructure contains block specific meta-data and a MFDataStructure object for each data structure defined in the block, and MFDataStructure contains data structure specific meta-data and a MFDataItemStructure object for each data item contained in the data structure. Data structures define the structure of data that is naturally grouped together, for example, the data in a numpy recarray. Data item structures define the structure of specific pieces of data, for example, a single column of a numpy recarray. The meta-data defined in these classes provides all the information FloPy needs to read and write MODFLOW 6 package and name files, create the Flopy interface, and check the data for various constraints. - - -*** -MFStructure --+ MFSimulationStructure --+ MFModelStructure --+ MFInputFileStructure --+ MFBlockStructure --+ MFDataStructure --+ MFDataItemStructure - -Figure 1: FPMF6 generic data structure classes. Lines connecting classes show a relationship defined between the two connected classes. A "*" next to the class means that the class is a sub-class of the connected class. A "+" next to the class means that the class is contained within the connected class. -*** - -Package and Data Base Classes ------------------------------------------------ - -The package and data classes are related as shown below in figure 2. On the top of the figure 2 is the MFPackage class, which is the base class for all packages. MFPackage contains generic methods for building data objects and reading and writing the package to a file. MFPackage contains a MFInputFileStructure object that defines how the data is structured in the package file. MFPackage also contains a dictionary of blocks (MFBlock). The MFBlock class is a generic class used to represent a block within a package. MFBlock contains a MFBlockStructure object that defines how the data in the block is structured. MFBlock also contains a dictionary of data objects (subclasses of MFData) contained in the block and a list of block headers (MFBlockHeader) for that block. Block headers contain the block's name and optionally data items (eg. iprn). - - -*** -MFPackage --+ MFBlock --+ MFData - -MFPackage --+ MFInputFileStructure - -MFBlock --+ MFBlockStructure - -MFData --+ MFDataStructure - -MFData --* MFArray --* MFTransientArray - -MFData --* MFList --* MFTransientList - -MFData --* MFScalar --* MFTransientScalar - -MFTransientData --* MFTransientArray, MFTransientList, MFTransientScalar - -Figure 2: FPMF6 package and data classes. Lines connecting classes show a relationship defined between the two connected classes. A "*" next to the class means that the class is a sub-class of the connected class. A "+" next to the class means that the class is contained within the connected class. -*** - -There are three main types of data, MFList, MFArray, and MFScalar data. All three of these data types are derived from the MFData abstract base class. MFList data is the type of data stored in a spreadsheet with different column headings. For example, the data describing a flow barrier are of type MFList. MFList data is stored in numpy recarrays. MFArray data is data of a single type (eg. all integer values). For example, the model's HK values are of type MFArray. MFArrays are stored in numpy ndarrays. MFScalar data is a single data item. Most MFScalar data are options. All MFData subclasses contain an MFDataStructure object that defines the expected structure and types of the data. - -Transient data, or data defined for each stress period (eg. data in the period blocks) is stored in MFTransientArray, MFTransientList, and MFTransientScalar. These classes are sub-classes of MFArray, MFList, and MFScalar, respectively. These classes are also subclasses of MFTransientData. +Introduction +----------------------------------------------- + +This file provides an overview of how FloPy for MODFLOW 6 (FPMF6) works under the hood and is intended for anyone who wants to add a new package, new model type, or new features to this library. FloPy library files that support MODFLOW 6 can be found in the flopy/mf6 folder and sub-folders. + +Package Meta-Data and Package Files +----------------------------------------------- + +FPMF6 uses meta-data files located in flopy/mf6/data/dfn to define the model and package types supported by MODFLOW 6. When additional model and package types are added to MODFLOW 6, additional meta-data files can be added to this folder and flopy/mf6/utils/createpackages.py can be run to add new packages to the FloPy library. createpackages.py uses flopy/mf6/data/mfstructure.py to read meta-data files (*.dfn) and use that meta-data to create the package files found in flopy/mf6/modflow (do not directly modify any of the files in this folder, they are all automatically generated). The automatically generated package files contain an interface for accessing package data and data documentation generated from the meta-data files. Additionally, meta-data describing package data types and shapes is stored in the dfn attribute. flopy/mf6/data/mfstructure.py can load structure information using the dfn attribute (instead of loading it from the meta-data files). This allows for flopy to be installed without the dfn files. + +All meta-data can be accessed from the flopy.mf6.data.mfstructure.MFStructure class. This is a singleton class, meaning only one instance of this class can be created. The class contains a sim_struct attribute (which is a flopy.mf6.data.mfstructure.MFSimulationStructure object) which contains all of the meta-data for all package files. Meta-data is stored in a structured format. MFSimulationStructure contains MFModelStructure and MFInputFileStructure objects, which contain the meta-data for each model type and each "simulation-level" package (tdis, ims, ...). MFModelStructure contains model specific meta-data and a MFInputFileStructure object for each package in that model. MFInputFileStructure contains package specific meta-data and a MFBlockStructure object for each block contained in the package file. MFBlockStructure contains block specific meta-data and a MFDataStructure object for each data structure defined in the block, and MFDataStructure contains data structure specific meta-data and a MFDataItemStructure object for each data item contained in the data structure. Data structures define the structure of data that is naturally grouped together, for example, the data in a numpy recarray. Data item structures define the structure of specific pieces of data, for example, a single column of a numpy recarray. The meta-data defined in these classes provides all the information FloPy needs to read and write MODFLOW 6 package and name files, create the Flopy interface, and check the data for various constraints. + + +*** +MFStructure --+ MFSimulationStructure --+ MFModelStructure --+ MFInputFileStructure --+ MFBlockStructure --+ MFDataStructure --+ MFDataItemStructure + +Figure 1: FPMF6 generic data structure classes. Lines connecting classes show a relationship defined between the two connected classes. A "*" next to the class means that the class is a sub-class of the connected class. A "+" next to the class means that the class is contained within the connected class. +*** + +Package and Data Base Classes +----------------------------------------------- + +The package and data classes are related as shown below in figure 2. On the top of the figure 2 is the MFPackage class, which is the base class for all packages. MFPackage contains generic methods for building data objects and reading and writing the package to a file. MFPackage contains a MFInputFileStructure object that defines how the data is structured in the package file. MFPackage also contains a dictionary of blocks (MFBlock). The MFBlock class is a generic class used to represent a block within a package. MFBlock contains a MFBlockStructure object that defines how the data in the block is structured. MFBlock also contains a dictionary of data objects (subclasses of MFData) contained in the block and a list of block headers (MFBlockHeader) for that block. Block headers contain the block's name and optionally data items (eg. iprn). + + +*** +MFPackage --+ MFBlock --+ MFData + +MFPackage --+ MFInputFileStructure + +MFBlock --+ MFBlockStructure + +MFData --+ MFDataStructure + +MFData --* MFArray --* MFTransientArray + +MFData --* MFList --* MFTransientList + +MFData --* MFScalar --* MFTransientScalar + +MFTransientData --* MFTransientArray, MFTransientList, MFTransientScalar + +Figure 2: FPMF6 package and data classes. Lines connecting classes show a relationship defined between the two connected classes. A "*" next to the class means that the class is a sub-class of the connected class. A "+" next to the class means that the class is contained within the connected class. +*** + +There are three main types of data, MFList, MFArray, and MFScalar data. All three of these data types are derived from the MFData abstract base class. MFList data is the type of data stored in a spreadsheet with different column headings. For example, the data describing a flow barrier are of type MFList. MFList data is stored in numpy recarrays. MFArray data is data of a single type (eg. all integer values). For example, the model's HK values are of type MFArray. MFArrays are stored in numpy ndarrays. MFScalar data is a single data item. Most MFScalar data are options. All MFData subclasses contain an MFDataStructure object that defines the expected structure and types of the data. + +Transient data, or data defined for each stress period (eg. data in the period blocks) is stored in MFTransientArray, MFTransientList, and MFTransientScalar. These classes are sub-classes of MFArray, MFList, and MFScalar, respectively. These classes are also subclasses of MFTransientData. diff --git a/examples/Testing/flopy3_Oahu_02_02b.py b/examples/Testing/flopy3_Oahu_02_02b.py index aedf5d8bdd..5ff341a823 100755 --- a/examples/Testing/flopy3_Oahu_02_02b.py +++ b/examples/Testing/flopy3_Oahu_02_02b.py @@ -1,185 +1,185 @@ -# OAHU island-wide GWRP model -# using retarded units (ft) and a coarse test grid -# simulating 1 short stress period (eventually steady-state) -# simulating 1 layer of volcanic rock referenced to sea level -# simulating 50% seawater salinity with SWI2 -# uses GH relation for initial salinity conditions -# apply crude mask of Oahu coastline as ocean boundary -# uniform recharge, add 2 wells, and 1 horizontal flow barrier -# changed origin of grid in plot to upper left corner. -# -# uses FLOPY3, modified from FLOPY2 tutorial 2 -# -# Kolja Rotzoll (kolja@usgs.gov), 1/15/2015 -# ---------------------------------------------------- -import os -import sys -import numpy as np -from pylab import * -from PIL import Image, ImageDraw - -flopypath = os.path.join('..', '..') -if flopypath not in sys.path: - print('Adding to sys.path: ', flopypath) - sys.path.append(flopypath) - -import flopy - -workspace = os.path.join('data') -#make sure workspace directory exists -if not os.path.exists(workspace): - os.makedirs(workspace) - - -# flopy objects -modelname = 'Oahu_01' -mf = flopy.modflow.Modflow(modelname, exe_name='mf2005', model_ws=workspace) - -# model domain and grid definition -ztop = 30. # top of layer (ft rel to msl) -botm = -1000. # bottom of layer (ft rel to msl) -nlay = 1 # number of layers (z) -nrow = 18 # number of rows (y) -ncol = 20 # number of columns (x) -delr = 16000 # row width of cell, in ft -delc = delr # column width of cell, in ft -Lx = delr * ncol # length of x model domain, in ft -Ly = delc * nrow # length of y model domain, in ft - -# define the stress periods -nper = 1 -ts = 1 # length of time step, in days -nstp = 1000 # number of time steps -perlen = nstp * ts # length of simulation, in days -steady = True # steady state or transient -dis = flopy.modflow.ModflowDis(mf, nlay, nrow, ncol, delr=delr, delc=delc, - top=ztop, botm=botm, nper=nper, - perlen=perlen, nstp=nstp, steady=steady) - -# hydraulic parameters (lpf or bcf) -hk = 1500. # horizontal K -sy = 0.05 # specific yield -ss = 1.e-5 # specific storage -layavg = 0 # 0 = harmonic mean, 1 = logarithmic mean, -# 2 = arithmetic mean of sat b and log-mean K -laytyp = 1 # 0 = confined, 1 = convertible -lpf = flopy.modflow.ModflowLpf(mf, hk=hk, sy=sy, ss=ss, laytyp=laytyp, layavg=layavg) -laycon = 2 # 0 = confined, 1 = unconfined T varies, -# 2 = convertible T const, 3 = convertible T varies - -# water/land interface (now replaced with coarse Oahu coastline) -polyg = [(6, 13), (3, 6), (6, 6), (9, 3), (12, 8), (14, 9), (16, 13), (13, 14), (11, 13), - (6, 13)] # referenced to row/col -px, py = zip(*polyg) -colcell, rowcell = meshgrid(range(ncol), range(nrow)) -mask = Image.new('L', (ncol, nrow), 0) -ImageDraw.Draw(mask).polygon(polyg, outline=1, fill=1) -index = np.array(mask) - -# BAS package -ibound = np.ones((nlay, nrow, ncol), dtype=np.int32) # active cells -h_start = np.zeros((nrow, ncol), dtype=float) -peak = 15 # maximum expected water level -h_start[:, :][index == 1] = peak # starting heads over land -h_start[:, :][index == 0] = 0 # starting heads over ocean -#print h_start -bas = flopy.modflow.ModflowBas(mf, ibound=ibound, strt=h_start) - -# general head boundary -nghb = ncol * nrow - np.sum(index) -lrchc = np.zeros((nghb, 5)) -lrchc[:, 0] = 0 -lrchc[:, 1] = rowcell[index == 0] -lrchc[:, 2] = colcell[index == 0] -lrchc[:, 3] = 0. -lrchc[:, 4] = hk * 10 -#print lrchc -ghb = flopy.modflow.ModflowGhb(mf, stress_period_data={0: lrchc}) - -# recharge & withdrawal -Recharge = 600 * 133680.56 # Total recharge over the island, in ft^3/d -nrech = np.sum(index) -lrcq = np.zeros((nrech, 4)) -lrcq[:, 0] = 0 -lrcq[:, 1] = rowcell[index == 1] -lrcq[:, 2] = colcell[index == 1] -lrcq[:, 3] = Recharge / nrech -lrcq = np.vstack((lrcq, [0, 8, 7, -90 * 133680], [0, 10, 9, -80 * 133680])) # add wells (row/col, zero-based) -#print lrcq -wel = flopy.modflow.ModflowWel(mf, stress_period_data={0: lrcq}) - -# horizontal flow barrier -nhfb = 12 -lrcrch = np.zeros((nhfb, 6)) -lrcrch[:, 0] = 0 # layer -lrcrch[:, 1] = arange(2, nhfb + 2) # row 1 -lrcrch[:, 2] = ones(nhfb) * (ncol / 2 - 1) # col 1 -lrcrch[:, 3] = arange(2, nhfb + 2) # row 2 -lrcrch[:, 4] = ones(nhfb) * (ncol / 2) # col 2 -lrcrch[:, 5] = 0.000001 # hydrologic characteristics -#print lrcrch -hfb = flopy.modflow.ModflowHfb(mf, hfb_data=lrcrch) - -# SWI input -z1 = np.zeros((nrow, ncol)) -z1[index == 1] = peak * (-40) # 50% salinity from starting head -z = array([z1]) # zeta interfaces -#print z -iso = np.zeros((nrow, ncol), dtype=np.int32) # water type of sinks and sources -iso[:, :][index == 1] = 1 # land -iso[:, :][index == 0] = -2 # ocean (ghb) -#print iso -swi = flopy.modflow.ModflowSwi2(mf, nsrf=1, istrat=1, toeslope=0.04, tipslope=0.04, - nu=[0, 0.025], zeta=z, ssz=0.05, isource=iso, nsolver=1) - -# output control & solver -spd = {(0, 0): ['print head'], - (0, 1): [], - (0, 249): ['print head'], - (0, 250): [], - (0, 499): ['print head', 'save ibound'], - (0, 500): [], - (0, 749): ['print head', 'ddreference'], - (0, 750): [], - (0, 999): ['print head']} -#oc = flopy.modflow.ModflowOc(mf, stress_period_data=spd, cboufm='(20i5)') -oc = flopy.modflow.ModflowOc88(mf, save_head_every=100, - item2=[[0, 1, 0, 0]], item3=[[0, 1, 0, 0]]) -pcg = flopy.modflow.ModflowPcg(mf, hclose=1.0e-4, rclose=5.0e-0) # pre-conjugate gradient solver -#de4 = flopy.modflow.ModflowDe4(mf, itmx=1, hclose=1e-5) # direct solver -# -------------------------------------------------------------------- - -# write the model input files -mf.write_input() - -print('\n\nfinished write...\n') - -m2 = flopy.modflow.Modflow.load(modelname, exe_name='mf2005', model_ws=workspace, verbose=True) - -print('\nfinished read...\n') - -oc2 = m2.get_package('OC') - -#print(oc2.stress_period_data.keys()) - -oc2.write_file() - -ax = m2.plot(colorbar=True) -print(len(ax)) -plt.show() - -#m2.dis.plot(colorbar=True) -#plt.show() - -#m2.lpf.plot(colorbar=True) -#plt.show() - -#m2.ghb.plot(key='cond', colorbar=True, masked_values=[0]) -#plt.show() - -#m2.ghb.plot() -#plt.show() - -print('\nthis is the end...my friend\n') - - +# OAHU island-wide GWRP model +# using retarded units (ft) and a coarse test grid +# simulating 1 short stress period (eventually steady-state) +# simulating 1 layer of volcanic rock referenced to sea level +# simulating 50% seawater salinity with SWI2 +# uses GH relation for initial salinity conditions +# apply crude mask of Oahu coastline as ocean boundary +# uniform recharge, add 2 wells, and 1 horizontal flow barrier +# changed origin of grid in plot to upper left corner. +# +# uses FLOPY3, modified from FLOPY2 tutorial 2 +# +# Kolja Rotzoll (kolja@usgs.gov), 1/15/2015 +# ---------------------------------------------------- +import os +import sys +import numpy as np +from pylab import * +from PIL import Image, ImageDraw + +flopypath = os.path.join('..', '..') +if flopypath not in sys.path: + print('Adding to sys.path: ', flopypath) + sys.path.append(flopypath) + +import flopy + +workspace = os.path.join('data') +#make sure workspace directory exists +if not os.path.exists(workspace): + os.makedirs(workspace) + + +# flopy objects +modelname = 'Oahu_01' +mf = flopy.modflow.Modflow(modelname, exe_name='mf2005', model_ws=workspace) + +# model domain and grid definition +ztop = 30. # top of layer (ft rel to msl) +botm = -1000. # bottom of layer (ft rel to msl) +nlay = 1 # number of layers (z) +nrow = 18 # number of rows (y) +ncol = 20 # number of columns (x) +delr = 16000 # row width of cell, in ft +delc = delr # column width of cell, in ft +Lx = delr * ncol # length of x model domain, in ft +Ly = delc * nrow # length of y model domain, in ft + +# define the stress periods +nper = 1 +ts = 1 # length of time step, in days +nstp = 1000 # number of time steps +perlen = nstp * ts # length of simulation, in days +steady = True # steady state or transient +dis = flopy.modflow.ModflowDis(mf, nlay, nrow, ncol, delr=delr, delc=delc, + top=ztop, botm=botm, nper=nper, + perlen=perlen, nstp=nstp, steady=steady) + +# hydraulic parameters (lpf or bcf) +hk = 1500. # horizontal K +sy = 0.05 # specific yield +ss = 1.e-5 # specific storage +layavg = 0 # 0 = harmonic mean, 1 = logarithmic mean, +# 2 = arithmetic mean of sat b and log-mean K +laytyp = 1 # 0 = confined, 1 = convertible +lpf = flopy.modflow.ModflowLpf(mf, hk=hk, sy=sy, ss=ss, laytyp=laytyp, layavg=layavg) +laycon = 2 # 0 = confined, 1 = unconfined T varies, +# 2 = convertible T const, 3 = convertible T varies + +# water/land interface (now replaced with coarse Oahu coastline) +polyg = [(6, 13), (3, 6), (6, 6), (9, 3), (12, 8), (14, 9), (16, 13), (13, 14), (11, 13), + (6, 13)] # referenced to row/col +px, py = zip(*polyg) +colcell, rowcell = meshgrid(range(ncol), range(nrow)) +mask = Image.new('L', (ncol, nrow), 0) +ImageDraw.Draw(mask).polygon(polyg, outline=1, fill=1) +index = np.array(mask) + +# BAS package +ibound = np.ones((nlay, nrow, ncol), dtype=np.int32) # active cells +h_start = np.zeros((nrow, ncol), dtype=float) +peak = 15 # maximum expected water level +h_start[:, :][index == 1] = peak # starting heads over land +h_start[:, :][index == 0] = 0 # starting heads over ocean +#print h_start +bas = flopy.modflow.ModflowBas(mf, ibound=ibound, strt=h_start) + +# general head boundary +nghb = ncol * nrow - np.sum(index) +lrchc = np.zeros((nghb, 5)) +lrchc[:, 0] = 0 +lrchc[:, 1] = rowcell[index == 0] +lrchc[:, 2] = colcell[index == 0] +lrchc[:, 3] = 0. +lrchc[:, 4] = hk * 10 +#print lrchc +ghb = flopy.modflow.ModflowGhb(mf, stress_period_data={0: lrchc}) + +# recharge & withdrawal +Recharge = 600 * 133680.56 # Total recharge over the island, in ft^3/d +nrech = np.sum(index) +lrcq = np.zeros((nrech, 4)) +lrcq[:, 0] = 0 +lrcq[:, 1] = rowcell[index == 1] +lrcq[:, 2] = colcell[index == 1] +lrcq[:, 3] = Recharge / nrech +lrcq = np.vstack((lrcq, [0, 8, 7, -90 * 133680], [0, 10, 9, -80 * 133680])) # add wells (row/col, zero-based) +#print lrcq +wel = flopy.modflow.ModflowWel(mf, stress_period_data={0: lrcq}) + +# horizontal flow barrier +nhfb = 12 +lrcrch = np.zeros((nhfb, 6)) +lrcrch[:, 0] = 0 # layer +lrcrch[:, 1] = arange(2, nhfb + 2) # row 1 +lrcrch[:, 2] = ones(nhfb) * (ncol / 2 - 1) # col 1 +lrcrch[:, 3] = arange(2, nhfb + 2) # row 2 +lrcrch[:, 4] = ones(nhfb) * (ncol / 2) # col 2 +lrcrch[:, 5] = 0.000001 # hydrologic characteristics +#print lrcrch +hfb = flopy.modflow.ModflowHfb(mf, hfb_data=lrcrch) + +# SWI input +z1 = np.zeros((nrow, ncol)) +z1[index == 1] = peak * (-40) # 50% salinity from starting head +z = array([z1]) # zeta interfaces +#print z +iso = np.zeros((nrow, ncol), dtype=np.int32) # water type of sinks and sources +iso[:, :][index == 1] = 1 # land +iso[:, :][index == 0] = -2 # ocean (ghb) +#print iso +swi = flopy.modflow.ModflowSwi2(mf, nsrf=1, istrat=1, toeslope=0.04, tipslope=0.04, + nu=[0, 0.025], zeta=z, ssz=0.05, isource=iso, nsolver=1) + +# output control & solver +spd = {(0, 0): ['print head'], + (0, 1): [], + (0, 249): ['print head'], + (0, 250): [], + (0, 499): ['print head', 'save ibound'], + (0, 500): [], + (0, 749): ['print head', 'ddreference'], + (0, 750): [], + (0, 999): ['print head']} +#oc = flopy.modflow.ModflowOc(mf, stress_period_data=spd, cboufm='(20i5)') +oc = flopy.modflow.ModflowOc88(mf, save_head_every=100, + item2=[[0, 1, 0, 0]], item3=[[0, 1, 0, 0]]) +pcg = flopy.modflow.ModflowPcg(mf, hclose=1.0e-4, rclose=5.0e-0) # pre-conjugate gradient solver +#de4 = flopy.modflow.ModflowDe4(mf, itmx=1, hclose=1e-5) # direct solver +# -------------------------------------------------------------------- + +# write the model input files +mf.write_input() + +print('\n\nfinished write...\n') + +m2 = flopy.modflow.Modflow.load(modelname, exe_name='mf2005', model_ws=workspace, verbose=True) + +print('\nfinished read...\n') + +oc2 = m2.get_package('OC') + +#print(oc2.stress_period_data.keys()) + +oc2.write_file() + +ax = m2.plot(colorbar=True) +print(len(ax)) +plt.show() + +#m2.dis.plot(colorbar=True) +#plt.show() + +#m2.lpf.plot(colorbar=True) +#plt.show() + +#m2.ghb.plot(key='cond', colorbar=True, masked_values=[0]) +#plt.show() + +#m2.ghb.plot() +#plt.show() + +print('\nthis is the end...my friend\n') + + diff --git a/examples/scripts/flopy_swi2_ex1.py b/examples/scripts/flopy_swi2_ex1.py index 706966aabc..170e8d29c7 100755 --- a/examples/scripts/flopy_swi2_ex1.py +++ b/examples/scripts/flopy_swi2_ex1.py @@ -1,207 +1,207 @@ -from __future__ import print_function - -import os -import sys -import math - -import numpy as np - -import flopy - -import matplotlib.pyplot as plt - -# --modify default matplotlib settings -updates = {'font.family': ['Univers 57 Condensed', 'Arial'], - 'mathtext.default': 'regular', - 'pdf.compression': 0, - 'pdf.fonttype': 42, - 'legend.fontsize': 7, - 'axes.labelsize': 8, - 'xtick.labelsize': 7, - 'ytick.labelsize': 7} -plt.rcParams.update(updates) - - -def run(): - workspace = 'swiex1' - - cleanFiles = False - fext = 'png' - narg = len(sys.argv) - iarg = 0 - if narg > 1: - while iarg < narg - 1: - iarg += 1 - basearg = sys.argv[iarg].lower() - if basearg == '--clean': - cleanFiles = True - elif basearg == '--pdf': - fext = 'pdf' - - if cleanFiles: - print('cleaning all files') - print('excluding *.py files') - files = os.listdir(workspace) - for f in files: - if os.path.isdir(f): - continue - if '.py' != os.path.splitext(f)[1].lower(): - print(' removing...{}'.format(os.path.basename(f))) - os.remove(os.path.join(workspace, f)) - return 1 - - modelname = 'swiex1' - exe_name = 'mf2005' - - nlay = 1 - nrow = 1 - ncol = 50 - - delr = 5. - delc = 1. - - ibound = np.ones((nrow, ncol), np.int) - ibound[0, -1] = -1 - - # create initial zeta surface - z = np.zeros((nrow, ncol), np.float) - z[0, 16:24] = np.arange(-2.5, -40, -5) - z[0, 24:] = -40 - z = [z] - # create isource for SWI2 - isource = np.ones((nrow, ncol), np.int) - isource[0, 0] = 2 - - ocdict = {} - for idx in range(49, 200, 50): - key = (0, idx) - ocdict[key] = ['save head', 'save budget'] - key = (0, idx + 1) - ocdict[key] = [] - - # create flopy modflow object - ml = flopy.modflow.Modflow(modelname, version='mf2005', exe_name=exe_name, - model_ws=workspace) - # create flopy modflow package objects - discret = flopy.modflow.ModflowDis(ml, nlay=nlay, nrow=nrow, ncol=ncol, - delr=delr, delc=delc, - top=0, botm=[-40.0], - perlen=400, nstp=200) - bas = flopy.modflow.ModflowBas(ml, ibound=ibound, strt=0.0) - lpf = flopy.modflow.ModflowLpf(ml, hk=2., vka=2.0, vkcb=0, laytyp=0, - layavg=0) - wel = flopy.modflow.ModflowWel(ml, stress_period_data={0: [(0, 0, 0, 1)]}) - swi = flopy.modflow.ModflowSwi2(ml, iswizt=55, npln=1, istrat=1, - toeslope=0.2, tipslope=0.2, nu=[0, 0.025], - zeta=z, ssz=0.2, isource=isource, - nsolver=1) - oc = flopy.modflow.ModflowOc(ml, stress_period_data=ocdict) - pcg = flopy.modflow.ModflowPcg(ml) - # create model files - ml.write_input() - # run the model - m = ml.run_model(silent=False) - # read model heads - headfile = os.path.join(workspace, '{}.hds'.format(modelname)) - hdobj = flopy.utils.HeadFile(headfile) - head = hdobj.get_alldata() - head = np.array(head) - # read model zeta - zetafile = os.path.join(workspace, '{}.zta'.format(modelname)) - zobj = flopy.utils.CellBudgetFile(zetafile) - zkstpkper = zobj.get_kstpkper() - zeta = [] - for kk in zkstpkper: - zeta.append(zobj.get_data(kstpkper=kk, text=' ZETASRF 1')[0]) - zeta = np.array(zeta) - - x = np.arange(0.5 * delr, ncol * delr, delr) - - # Wilson and Sa Da Costa - k = 2.0 - n = 0.2 - nu = 0.025 - H = 40.0 - tzero = H * n / (k * nu) / 4.0 - Ltoe = np.zeros(4) - v = 0.125 - t = np.arange(100, 500, 100) - - fwid = 7.00 - fhgt = 3.50 - flft = 0.125 - frgt = 0.95 - fbot = 0.125 - ftop = 0.925 - - fig = plt.figure(figsize=(fwid, fhgt), facecolor='w') - fig.subplots_adjust(wspace=0.25, hspace=0.25, left=flft, right=frgt, - bottom=fbot, top=ftop) - - ax = fig.add_subplot(211) - ax.text(-0.075, 1.05, 'A', transform=ax.transAxes, va='center', - ha='center', - size='8') - ax.plot([80, 120], [0, -40], 'k') - ax.set_xlim(0, 250) - ax.set_ylim(-40, 0) - ax.set_yticks(np.arange(-40, 1, 10)) - ax.text(50, -10, 'salt') - ax.text(130, -10, 'fresh') - a = ax.annotate("", xy=(50, -25), xytext=(30, -25), - arrowprops=dict(arrowstyle='->', fc='k')) - ax.text(40, -22, 'groundwater flow velocity=0.125 m/d', ha='center', - size=7) - ax.set_ylabel('Elevation, in meters') - - ax = fig.add_subplot(212) - ax.text(-0.075, 1.05, 'B', transform=ax.transAxes, va='center', - ha='center', - size='8') - - for i in range(4): - Ltoe[i] = H * math.sqrt(k * nu * (t[i] + tzero) / n / H) - ax.plot([100 - Ltoe[i] + v * t[i], 100 + Ltoe[i] + v * t[i]], [0, -40], - 'k', label='_None') - - for i in range(4): - zi = zeta[i, 0, 0, :] - p = (zi < 0) & (zi > -39.9) - ax.plot(x[p], zeta[i, 0, 0, p], 'bo', - markersize=3, markeredgecolor='blue', markerfacecolor='None', - label='_None') - ipos = 0 - for jdx, t in enumerate(zeta[i, 0, 0, :]): - if t > -39.9: - ipos = jdx - ax.text(x[ipos], -37.75, '{0} days'.format(((i + 1) * 100)), size=5, - ha='left', va='center') - - # fake items for labels - ax.plot([-100., -100], [-100., -100], 'k', label='Analytical solution') - ax.plot([-100., -100], [-100., -100], 'bo', markersize=3, - markeredgecolor='blue', markerfacecolor='None', label='SWI2') - # legend - leg = ax.legend(loc='upper right', numpoints=1) - leg._drawFrame = False - # axes - ax.set_xlim(0, 250) - ax.set_ylim(-40, 0) - ax.set_yticks(np.arange(-40, 1, 10)) - a = ax.annotate("", xy=(50, -25), xytext=(30, -25), - arrowprops=dict(arrowstyle='->', fc='k')) - ax.text(40, -22, 'groundwater flow velocity=0.125 m/d', ha='center', - size=7) - ax.set_ylabel('Elevation, in meters') - ax.set_xlabel('Horizontal distance, in meters') - - outfig = os.path.join(workspace, 'Figure06_swi2ex1.{0}'.format(fext)) - fig.savefig(outfig, dpi=300) - print('created...', outfig) - - return 0 - - -if __name__ == "__main__": - success = run() - sys.exit(success) +from __future__ import print_function + +import os +import sys +import math + +import numpy as np + +import flopy + +import matplotlib.pyplot as plt + +# --modify default matplotlib settings +updates = {'font.family': ['Univers 57 Condensed', 'Arial'], + 'mathtext.default': 'regular', + 'pdf.compression': 0, + 'pdf.fonttype': 42, + 'legend.fontsize': 7, + 'axes.labelsize': 8, + 'xtick.labelsize': 7, + 'ytick.labelsize': 7} +plt.rcParams.update(updates) + + +def run(): + workspace = 'swiex1' + + cleanFiles = False + fext = 'png' + narg = len(sys.argv) + iarg = 0 + if narg > 1: + while iarg < narg - 1: + iarg += 1 + basearg = sys.argv[iarg].lower() + if basearg == '--clean': + cleanFiles = True + elif basearg == '--pdf': + fext = 'pdf' + + if cleanFiles: + print('cleaning all files') + print('excluding *.py files') + files = os.listdir(workspace) + for f in files: + if os.path.isdir(f): + continue + if '.py' != os.path.splitext(f)[1].lower(): + print(' removing...{}'.format(os.path.basename(f))) + os.remove(os.path.join(workspace, f)) + return 1 + + modelname = 'swiex1' + exe_name = 'mf2005' + + nlay = 1 + nrow = 1 + ncol = 50 + + delr = 5. + delc = 1. + + ibound = np.ones((nrow, ncol), np.int) + ibound[0, -1] = -1 + + # create initial zeta surface + z = np.zeros((nrow, ncol), np.float) + z[0, 16:24] = np.arange(-2.5, -40, -5) + z[0, 24:] = -40 + z = [z] + # create isource for SWI2 + isource = np.ones((nrow, ncol), np.int) + isource[0, 0] = 2 + + ocdict = {} + for idx in range(49, 200, 50): + key = (0, idx) + ocdict[key] = ['save head', 'save budget'] + key = (0, idx + 1) + ocdict[key] = [] + + # create flopy modflow object + ml = flopy.modflow.Modflow(modelname, version='mf2005', exe_name=exe_name, + model_ws=workspace) + # create flopy modflow package objects + discret = flopy.modflow.ModflowDis(ml, nlay=nlay, nrow=nrow, ncol=ncol, + delr=delr, delc=delc, + top=0, botm=[-40.0], + perlen=400, nstp=200) + bas = flopy.modflow.ModflowBas(ml, ibound=ibound, strt=0.0) + lpf = flopy.modflow.ModflowLpf(ml, hk=2., vka=2.0, vkcb=0, laytyp=0, + layavg=0) + wel = flopy.modflow.ModflowWel(ml, stress_period_data={0: [(0, 0, 0, 1)]}) + swi = flopy.modflow.ModflowSwi2(ml, iswizt=55, npln=1, istrat=1, + toeslope=0.2, tipslope=0.2, nu=[0, 0.025], + zeta=z, ssz=0.2, isource=isource, + nsolver=1) + oc = flopy.modflow.ModflowOc(ml, stress_period_data=ocdict) + pcg = flopy.modflow.ModflowPcg(ml) + # create model files + ml.write_input() + # run the model + m = ml.run_model(silent=False) + # read model heads + headfile = os.path.join(workspace, '{}.hds'.format(modelname)) + hdobj = flopy.utils.HeadFile(headfile) + head = hdobj.get_alldata() + head = np.array(head) + # read model zeta + zetafile = os.path.join(workspace, '{}.zta'.format(modelname)) + zobj = flopy.utils.CellBudgetFile(zetafile) + zkstpkper = zobj.get_kstpkper() + zeta = [] + for kk in zkstpkper: + zeta.append(zobj.get_data(kstpkper=kk, text=' ZETASRF 1')[0]) + zeta = np.array(zeta) + + x = np.arange(0.5 * delr, ncol * delr, delr) + + # Wilson and Sa Da Costa + k = 2.0 + n = 0.2 + nu = 0.025 + H = 40.0 + tzero = H * n / (k * nu) / 4.0 + Ltoe = np.zeros(4) + v = 0.125 + t = np.arange(100, 500, 100) + + fwid = 7.00 + fhgt = 3.50 + flft = 0.125 + frgt = 0.95 + fbot = 0.125 + ftop = 0.925 + + fig = plt.figure(figsize=(fwid, fhgt), facecolor='w') + fig.subplots_adjust(wspace=0.25, hspace=0.25, left=flft, right=frgt, + bottom=fbot, top=ftop) + + ax = fig.add_subplot(211) + ax.text(-0.075, 1.05, 'A', transform=ax.transAxes, va='center', + ha='center', + size='8') + ax.plot([80, 120], [0, -40], 'k') + ax.set_xlim(0, 250) + ax.set_ylim(-40, 0) + ax.set_yticks(np.arange(-40, 1, 10)) + ax.text(50, -10, 'salt') + ax.text(130, -10, 'fresh') + a = ax.annotate("", xy=(50, -25), xytext=(30, -25), + arrowprops=dict(arrowstyle='->', fc='k')) + ax.text(40, -22, 'groundwater flow velocity=0.125 m/d', ha='center', + size=7) + ax.set_ylabel('Elevation, in meters') + + ax = fig.add_subplot(212) + ax.text(-0.075, 1.05, 'B', transform=ax.transAxes, va='center', + ha='center', + size='8') + + for i in range(4): + Ltoe[i] = H * math.sqrt(k * nu * (t[i] + tzero) / n / H) + ax.plot([100 - Ltoe[i] + v * t[i], 100 + Ltoe[i] + v * t[i]], [0, -40], + 'k', label='_None') + + for i in range(4): + zi = zeta[i, 0, 0, :] + p = (zi < 0) & (zi > -39.9) + ax.plot(x[p], zeta[i, 0, 0, p], 'bo', + markersize=3, markeredgecolor='blue', markerfacecolor='None', + label='_None') + ipos = 0 + for jdx, t in enumerate(zeta[i, 0, 0, :]): + if t > -39.9: + ipos = jdx + ax.text(x[ipos], -37.75, '{0} days'.format(((i + 1) * 100)), size=5, + ha='left', va='center') + + # fake items for labels + ax.plot([-100., -100], [-100., -100], 'k', label='Analytical solution') + ax.plot([-100., -100], [-100., -100], 'bo', markersize=3, + markeredgecolor='blue', markerfacecolor='None', label='SWI2') + # legend + leg = ax.legend(loc='upper right', numpoints=1) + leg._drawFrame = False + # axes + ax.set_xlim(0, 250) + ax.set_ylim(-40, 0) + ax.set_yticks(np.arange(-40, 1, 10)) + a = ax.annotate("", xy=(50, -25), xytext=(30, -25), + arrowprops=dict(arrowstyle='->', fc='k')) + ax.text(40, -22, 'groundwater flow velocity=0.125 m/d', ha='center', + size=7) + ax.set_ylabel('Elevation, in meters') + ax.set_xlabel('Horizontal distance, in meters') + + outfig = os.path.join(workspace, 'Figure06_swi2ex1.{0}'.format(fext)) + fig.savefig(outfig, dpi=300) + print('created...', outfig) + + return 0 + + +if __name__ == "__main__": + success = run() + sys.exit(success) diff --git a/flopy/__init__.py b/flopy/__init__.py index 4beccf0931..f6227cbac0 100644 --- a/flopy/__init__.py +++ b/flopy/__init__.py @@ -1,38 +1,38 @@ -""" -The FloPy package consists of a set of Python scripts to run MODFLOW, MT3D, -SEAWAT and other MODFLOW-related groundwater programs. FloPy enables you to -run all these programs with Python scripts. The FloPy project started in 2009 -and has grown to a fairly complete set of scripts with a growing user base. - -This version of Flopy (FloPy3) was released in December 2015 with a few great -enhancements that make FloPy3 backwards incompatible. The first significant -change is that FloPy3 uses zero-based indexing everywhere, which means that -all layers, rows, columns, and stress periods start numbering at zero. This -change was made for consistency as all array-indexing was already zero-based -(as are all arrays in Python). This may take a little getting-used-to, but -hopefully will avoid confusion in the future. A second significant enhancement -concerns the ability to specify time-varying boundary conditions that are -specified with a sequence of layer-row-column-values, like the WEL and GHB -packages. A variety of flexible and readable ways have been implemented to -specify these boundary conditions. FloPy is an open-source project and any -assistance is welcomed. Please email the development team if you want to -contribute. - -""" - -from .version import __version__, __author__, __author_email__ - -# imports -from . import modflow -from . import mt3d -from . import seawat -from . import modpath -from . import modflowlgr -from . import utils -from . import plot -from . import export -from . import pest -from . import mf6 -from . import discretization - -from .mbase import run_model, which +""" +The FloPy package consists of a set of Python scripts to run MODFLOW, MT3D, +SEAWAT and other MODFLOW-related groundwater programs. FloPy enables you to +run all these programs with Python scripts. The FloPy project started in 2009 +and has grown to a fairly complete set of scripts with a growing user base. + +This version of Flopy (FloPy3) was released in December 2015 with a few great +enhancements that make FloPy3 backwards incompatible. The first significant +change is that FloPy3 uses zero-based indexing everywhere, which means that +all layers, rows, columns, and stress periods start numbering at zero. This +change was made for consistency as all array-indexing was already zero-based +(as are all arrays in Python). This may take a little getting-used-to, but +hopefully will avoid confusion in the future. A second significant enhancement +concerns the ability to specify time-varying boundary conditions that are +specified with a sequence of layer-row-column-values, like the WEL and GHB +packages. A variety of flexible and readable ways have been implemented to +specify these boundary conditions. FloPy is an open-source project and any +assistance is welcomed. Please email the development team if you want to +contribute. + +""" + +from .version import __version__, __author__, __author_email__ + +# imports +from . import modflow +from . import mt3d +from . import seawat +from . import modpath +from . import modflowlgr +from . import utils +from . import plot +from . import export +from . import pest +from . import mf6 +from . import discretization + +from .mbase import run_model, which diff --git a/flopy/datbase.py b/flopy/datbase.py index c3cdd1482e..d96ce39176 100644 --- a/flopy/datbase.py +++ b/flopy/datbase.py @@ -1,82 +1,82 @@ -import abc -from enum import Enum - - -class DataType(Enum): - array2d = 1 - array3d = 2 - transient2d = 3 - transient3d = 4 - list = 5 - transientlist = 6 - scalar = 7 - transientscalar = 8 - - -class DataInterface(object): - @property - @abc.abstractmethod - def data_type(self): - raise NotImplementedError( - 'must define dat_type in child ' - 'class to use this base class') - - @property - @abc.abstractmethod - def dtype(self): - def dtype(self): - raise NotImplementedError( - 'must define dtype in child ' - 'class to use this base class') - - @property - @abc.abstractmethod - def array(self): - raise NotImplementedError( - 'must define array in child ' - 'class to use this base class') - - @property - @abc.abstractmethod - def name(self): - raise NotImplementedError( - 'must define name in child ' - 'class to use this base class') - - @property - @abc.abstractmethod - def model(self): - raise NotImplementedError( - 'must define name in child ' - 'class to use this base class') - - @property - @abc.abstractmethod - def plotable(self): - raise NotImplementedError( - 'must define plotable in child ' - 'class to use this base class') - - -class DataListInterface(object): - @property - @abc.abstractmethod - def package(self): - raise NotImplementedError( - 'must define package in child ' - 'class to use this base class') - - @property - @abc.abstractmethod - def to_array(self, kper=0, mask=False): - def to_array(self): - raise NotImplementedError( - 'must define to_array in child ' - 'class to use this base class') - - @abc.abstractmethod - def masked_4D_arrays_itr(self): - def masked_4D_arrays_itr(self): - raise NotImplementedError( - 'must define masked_4D_arrays_itr in child ' - 'class to use this base class') +import abc +from enum import Enum + + +class DataType(Enum): + array2d = 1 + array3d = 2 + transient2d = 3 + transient3d = 4 + list = 5 + transientlist = 6 + scalar = 7 + transientscalar = 8 + + +class DataInterface(object): + @property + @abc.abstractmethod + def data_type(self): + raise NotImplementedError( + 'must define dat_type in child ' + 'class to use this base class') + + @property + @abc.abstractmethod + def dtype(self): + def dtype(self): + raise NotImplementedError( + 'must define dtype in child ' + 'class to use this base class') + + @property + @abc.abstractmethod + def array(self): + raise NotImplementedError( + 'must define array in child ' + 'class to use this base class') + + @property + @abc.abstractmethod + def name(self): + raise NotImplementedError( + 'must define name in child ' + 'class to use this base class') + + @property + @abc.abstractmethod + def model(self): + raise NotImplementedError( + 'must define name in child ' + 'class to use this base class') + + @property + @abc.abstractmethod + def plotable(self): + raise NotImplementedError( + 'must define plotable in child ' + 'class to use this base class') + + +class DataListInterface(object): + @property + @abc.abstractmethod + def package(self): + raise NotImplementedError( + 'must define package in child ' + 'class to use this base class') + + @property + @abc.abstractmethod + def to_array(self, kper=0, mask=False): + def to_array(self): + raise NotImplementedError( + 'must define to_array in child ' + 'class to use this base class') + + @abc.abstractmethod + def masked_4D_arrays_itr(self): + def masked_4D_arrays_itr(self): + raise NotImplementedError( + 'must define masked_4D_arrays_itr in child ' + 'class to use this base class') diff --git a/flopy/discretization/__init__.py b/flopy/discretization/__init__.py index a409b19ff6..ed9801f894 100644 --- a/flopy/discretization/__init__.py +++ b/flopy/discretization/__init__.py @@ -1,3 +1,3 @@ -from .structuredgrid import StructuredGrid -from .vertexgrid import VertexGrid +from .structuredgrid import StructuredGrid +from .vertexgrid import VertexGrid from .unstructuredgrid import UnstructuredGrid \ No newline at end of file diff --git a/flopy/discretization/grid.py b/flopy/discretization/grid.py index 42d9ae7c6d..6e981885f7 100644 --- a/flopy/discretization/grid.py +++ b/flopy/discretization/grid.py @@ -1,573 +1,573 @@ -import numpy as np -import copy, os -import warnings -from ..utils import geometry - - -class CachedData(object): - def __init__(self, data): - self._data = data - self.out_of_date = False - - @property - def data_nocopy(self): - return self._data - - @property - def data(self): - return copy.deepcopy(self._data) - - def update_data(self, data): - self._data = data - self.out_of_date = False - - -class Grid(object): - """ - Base class for a structured or unstructured model grid - - Parameters - ---------- - grid_type : enumeration - type of model grid ('structured', 'vertex_layered', - 'vertex_unlayered') - top : ndarray(np.float) - top elevations of cells in topmost layer - botm : ndarray(np.float) - bottom elevations of all cells - idomain : ndarray(np.int) - ibound/idomain value for each cell - lenuni : ndarray(np.int) - model length units - origin_loc : str - Corner of the model grid that is the model origin - 'ul' (upper left corner) or 'll' (lower left corner) - origin_x : float - x coordinate of the origin point (lower left corner of model grid) - in the spatial reference coordinate system - origin_y : float - y coordinate of the origin point (lower left corner of model grid) - in the spatial reference coordinate system - rotation : float - rotation angle of model grid, as it is rotated around the origin point - - Properties - ---------- - grid_type : enumeration - type of model grid ('structured', 'vertex_layered', - 'vertex_unlayered') - top : ndarray(np.float) - top elevations of cells in topmost layer - botm : ndarray(np.float) - bottom elevations of all cells - idomain : ndarray(np.int) - ibound/idomain value for each cell - proj4 : proj4 SpatialReference - spatial reference locates the grid in a coordinate system - epsg : epsg SpatialReference - spatial reference locates the grid in a coordinate system - lenuni : int - modflow lenuni parameter - origin_x : float - x coordinate of the origin point in the spatial reference coordinate - system - origin_y : float - y coordinate of the origin point in the spatial reference coordinate - system - rotation : float - rotation angle of model grid, as it is rotated around the origin point - xgrid : ndarray - returns numpy meshgrid of x edges in reference frame defined by - point_type - ygrid : ndarray - returns numpy meshgrid of y edges in reference frame defined by - point_type - zgrid : ndarray - returns numpy meshgrid of z edges in reference frame defined by - point_type - xcenters : ndarray - returns x coordinate of cell centers - ycenters : ndarray - returns y coordinate of cell centers - ycenters : ndarray - returns z coordinate of cell centers - xyzgrid : [ndarray, ndarray, ndarray] - returns the location of grid edges of all model cells. if the model - grid contains spatial reference information, the grid edges are in the - coordinate system provided by the spatial reference information. - returns a list of three ndarrays for the x, y, and z coordinates - xyzcellcenters : [ndarray, ndarray, ndarray] - returns the cell centers of all model cells in the model grid. if - the model grid contains spatial reference information, the cell centers - are in the coordinate system provided by the spatial reference - information. otherwise the cell centers are based on a 0,0 location - for the upper left corner of the model grid. returns a list of three - ndarrays for the x, y, and z coordinates - - Methods - ---------- - get_coords(x, y) - transform point or array of points x, y from model coordinates to - spatial coordinates - grid_lines : (point_type=PointType.spatialxyz) : list - returns the model grid lines in a list. each line is returned as a - list containing two tuples in the format [(x1,y1), (x2,y2)] where - x1,y1 and x2,y2 are the endpoints of the line. - xyvertices : (point_type) : ndarray - 1D array of x and y coordinates of cell vertices for whole grid - (single layer) in C-style (row-major) order - (same as np.ravel()) - intersect(x, y, local) - returns the row and column of the grid that the x, y point is in - - See Also - -------- - - Notes - ----- - - Examples - -------- - """ - def __init__(self, grid_type=None, top=None, botm=None, idomain=None, - lenuni=None, epsg=None, proj4=None, prj=None, xoff=0.0, yoff=0.0, - angrot=0.0): - lenunits = {0: "undefined", 1: "feet", 2: "meters", 3: "centimeters"} - LENUNI = {"u": 0, "f": 1, "m": 2, "c": 3} - self.use_ref_coords = True - self._grid_type = grid_type - if top is not None: - top = top.astype(float) - self._top = top - if botm is not None: - botm = botm.astype(float) - self._botm = botm - self._idomain = idomain - - if lenuni is None: - lenuni = 0 - elif isinstance(lenuni, str): - lenuni = LENUNI[lenuni.lower()[0]] - self._lenuni = lenuni - - self._units = lenunits[self._lenuni] - self._epsg = epsg - self._proj4 = proj4 - self._prj = prj - self._xoff = xoff - self._yoff = yoff - if angrot is None: - angrot = 0.0 - self._angrot = angrot - self._cache_dict = {} - self._copy_cache = True - - ################################### - # access to basic grid properties - ################################### - def __repr__(self): - items = [] - if self.xoffset is not None and self.yoffset is not None \ - and self.angrot is not None: - items += [ - "xll:" + str(self.xoffset), - "yll:" + str(self.yoffset), - "rotation:" + str(self.angrot)] - if self.proj4 is not None: - items.append("proj4_str:" + str(self.proj4)) - if self.units is not None: - items.append("units:" + str(self.units)) - if self.lenuni is not None: - items.append("lenuni:" + str(self.lenuni)) - return '; '.join(items) - - @property - def is_valid(self): - return True - - @property - def is_complete(self): - if self._top is not None and self._botm is not None and \ - self._idomain is not None: - return True - return False - - @property - def grid_type(self): - return self._grid_type - - @property - def xoffset(self): - return self._xoff - - @property - def yoffset(self): - return self._yoff - - @property - def angrot(self): - return self._angrot - - @property - def angrot_radians(self): - return self._angrot * np.pi / 180. - - @property - def epsg(self): - return self._epsg - - @epsg.setter - def epsg(self, epsg): - self._epsg = epsg - - @property - def proj4(self): - proj4 = None - if self._proj4 is not None: - if "epsg" in self._proj4.lower(): - proj4 = self._proj4 - # set the epsg if proj4 specifies it - tmp = [i for i in self._proj4.split() if - 'epsg' in i.lower()] - self._epsg = int(tmp[0].split(':')[1]) - else: - proj4 = self._proj4 - elif self.epsg is not None: - proj4 = 'epsg:{}'.format(self.epsg) - return proj4 - - @proj4.setter - def proj4(self, proj4): - self._proj4 = proj4 - - @property - def prj(self): - return self._prj - - @prj.setter - def prj(self, prj): - self._proj4 = prj - - @property - def top(self): - return copy.deepcopy(self._top) - - @property - def botm(self): - return copy.deepcopy(self._botm) - - @property - def top_botm(self): - new_top = np.expand_dims(self._top, 0) - return np.concatenate((new_top, self._botm), axis=0) - - @property - def units(self): - return self._units - - @property - def lenuni(self): - return self._lenuni - - @property - def idomain(self): - return copy.deepcopy(self._idomain) - - @property - def nnodes(self): - raise NotImplementedError( - 'must define nnodes in child class') - - @property - def shape(self): - raise NotImplementedError( - 'must define shape in child class') - - @property - def extent(self): - raise NotImplementedError( - 'must define extent in child class') - - @property - def xyzextent(self): - return (np.min(self.xyzvertices[0]), np.max(self.xyzvertices[0]), - np.min(self.xyzvertices[1]), np.max(self.xyzvertices[1]), - np.min(self.xyzvertices[2]), np.max(self.xyzvertices[2])) - - @property - def grid_lines(self): - raise NotImplementedError( - 'must define grid_lines in child class') - - @property - def xcellcenters(self): - return self.xyzcellcenters[0] - - @property - def ycellcenters(self): - return self.xyzcellcenters[1] - - @property - def zcellcenters(self): - return self.xyzcellcenters[2] - - @property - def xyzcellcenters(self): - raise NotImplementedError( - 'must define get_cellcenters in child ' - 'class to use this base class') - - @property - def xvertices(self): - return self.xyzvertices[0] - - @property - def yvertices(self): - return self.xyzvertices[1] - - @property - def zvertices(self): - return self.xyzvertices[2] - - @property - def xyzvertices(self): - raise NotImplementedError( - 'must define xyzvertices in child class') - - #@property - #def indices(self): - # raise NotImplementedError( - # 'must define indices in child ' - # 'class to use this base class') - - def get_coords(self, x, y): - """ - Given x and y array-like values, apply rotation, scale and offset, - to convert them from model coordinates to real-world coordinates. - """ - if isinstance(x, list): - x = np.array(x) - y = np.array(y) - if not np.isscalar(x): - x, y = x.copy(), y.copy() - - x += self._xoff - y += self._yoff - return geometry.rotate(x, y, self._xoff, self._yoff, - self.angrot_radians) - - def get_local_coords(self, x, y): - """ - Given x and y array-like values, apply rotation, scale and offset, - to convert them from real-world coordinates to model coordinates. - """ - if isinstance(x, list): - x = np.array(x) - y = np.array(y) - if not np.isscalar(x): - x, y = x.copy(), y.copy() - - x, y = geometry.rotate(x, y, self._xoff, self._yoff, - -self.angrot_radians) - x -= self._xoff - y -= self._yoff - - return x, y - - def intersect(self, x, y, local=False, forgive=False): - if not local: - return self.get_local_coords(x, y) - else: - return x, y - - def set_coord_info(self, xoff=0.0, yoff=0.0, angrot=0.0, epsg=None, - proj4=None, merge_coord_info=True): - if merge_coord_info: - if xoff is None: - xoff = self._xoff - if yoff is None: - yoff = self._yoff - if angrot is None: - angrot = self._angrot - if epsg is None: - epsg = self._epsg - if proj4 is None: - proj4 = self._proj4 - - self._xoff = xoff - self._yoff = yoff - self._angrot = angrot - self._epsg = epsg - self._proj4 = proj4 - self._require_cache_updates() - - def load_coord_info(self, namefile=None, reffile='usgs.model.reference'): - """Attempts to load spatial reference information from - the following files (in order): - 1) usgs.model.reference - 2) NAM file (header comment) - 3) defaults - """ - reffile = os.path.join(os.path.split(namefile)[0], reffile) - # try to load reference file - if not self.read_usgs_model_reference_file(reffile): - # try to load nam file - if not self.attribs_from_namfile_header(namefile): - # set defaults - self.set_coord_info() - - def attribs_from_namfile_header(self, namefile): - # check for reference info in the nam file header - if namefile is None: - return False - xul, yul = None, None - header = [] - with open(namefile, 'r') as f: - for line in f: - if not line.startswith('#'): - break - header.extend(line.strip().replace('#', '').split(';')) - - for item in header: - if "xll" in item.lower(): - try: - xll = float(item.split(':')[1]) - self._xoff = xll - except: - pass - elif "yll" in item.lower(): - try: - yll = float(item.split(':')[1]) - self._yoff = yll - except: - pass - elif "xul" in item.lower(): - try: - xul = float(item.split(':')[1]) - warnings.warn( - 'xul/yul have been deprecated. Use xll/yll instead.', - DeprecationWarning) - except: - pass - elif "yul" in item.lower(): - try: - yul = float(item.split(':')[1]) - warnings.warn( - 'xul/yul have been deprecated. Use xll/yll instead.', - DeprecationWarning) - except: - pass - elif "rotation" in item.lower(): - try: - self._angrot = float(item.split(':')[1]) - except: - pass - elif "proj4_str" in item.lower(): - try: - self._proj4 = ':'.join(item.split(':')[1:]).strip() - if self._proj4.lower() == 'none': - self._proj4 = None - except: - pass - elif "start" in item.lower(): - try: - start_datetime = item.split(':')[1].strip() - except: - pass - - # we need to rotate the modelgrid first, then we can - # calculate the xll and yll from xul and yul - if (xul, yul) != (None, None): - self.set_coord_info(xoff=self._xul_to_xll(xul), - yoff=self._yul_to_yll(yul), - angrot=self._angrot) - - return True - - def read_usgs_model_reference_file(self, reffile='usgs.model.reference'): - """read spatial reference info from the usgs.model.reference file - https://water.usgs.gov/ogw/policy/gw-model/modelers-setup.html""" - xul = None - yul = None - if os.path.exists(reffile): - with open(reffile) as input: - for line in input: - if len(line) > 1: - if line.strip()[0] != '#': - info = line.strip().split('#')[0].split() - if len(info) > 1: - data = ' '.join(info[1:]) - if info[0] == 'xll': - self._xoff = float(data) - elif info[0] == 'yll': - self._yoff = float(data) - elif info[0] == 'xul': - xul = float(data) - elif info[0] == 'yul': - yul = float(data) - elif info[0] == 'rotation': - self._angrot = float(data) - elif info[0] == 'epsg': - self._epsg = int(data) - elif info[0] == 'proj4': - self._proj4 = data - elif info[0] == 'start_date': - start_datetime = data - - # model must be rotated first, before setting xoff and yoff - # when xul and yul are provided. - if (xul, yul) != (None, None): - self.set_coord_info(xoff=self._xul_to_xll(xul), - yoff=self._yul_to_yll(yul), - angrot=self._angrot) - - return True - else: - return False - - # Internal - def _xul_to_xll(self, xul, angrot=None): - yext = self.xyedges[1][0] - if angrot is not None: - return xul + (np.sin(angrot * np.pi / 180) * yext) - else: - return xul + (np.sin(self.angrot_radians) * yext) - - def _yul_to_yll(self, yul, angrot=None): - yext = self.xyedges[1][0] - if angrot is not None: - return yul - (np.cos(angrot * np.pi / 180) * yext) - else: - return yul - (np.cos(self.angrot_radians) * yext) - - def _set_sr_coord_info(self, sr): - self._xoff = sr.xll - self._yoff = sr.yll - self._angrot = sr.rotation - self._epsg = sr.epsg - self._proj4 = sr.proj4_str - self._require_cache_updates() - - def _require_cache_updates(self): - for cache_data in self._cache_dict.values(): - cache_data.out_of_date = True - - @property - def _has_ref_coordinates(self): - return self._xoff != 0.0 or self._yoff != 0.0 or self._angrot != 0.0 - - def _load_settings(self, d): - self._xoff = d.xul - - def _zcoords(self): - if self.top is not None and self.botm is not None: - zcenters = [] - top_3d = np.expand_dims(self.top, 0) - zbdryelevs = np.concatenate((top_3d, self.botm), axis=0) - - for ix in range(1, len(zbdryelevs)): - zcenters.append((zbdryelevs[ix - 1] + zbdryelevs[ix]) / 2.) - else: - zbdryelevs = None - zcenters = None +import numpy as np +import copy, os +import warnings +from ..utils import geometry + + +class CachedData(object): + def __init__(self, data): + self._data = data + self.out_of_date = False + + @property + def data_nocopy(self): + return self._data + + @property + def data(self): + return copy.deepcopy(self._data) + + def update_data(self, data): + self._data = data + self.out_of_date = False + + +class Grid(object): + """ + Base class for a structured or unstructured model grid + + Parameters + ---------- + grid_type : enumeration + type of model grid ('structured', 'vertex_layered', + 'vertex_unlayered') + top : ndarray(np.float) + top elevations of cells in topmost layer + botm : ndarray(np.float) + bottom elevations of all cells + idomain : ndarray(np.int) + ibound/idomain value for each cell + lenuni : ndarray(np.int) + model length units + origin_loc : str + Corner of the model grid that is the model origin + 'ul' (upper left corner) or 'll' (lower left corner) + origin_x : float + x coordinate of the origin point (lower left corner of model grid) + in the spatial reference coordinate system + origin_y : float + y coordinate of the origin point (lower left corner of model grid) + in the spatial reference coordinate system + rotation : float + rotation angle of model grid, as it is rotated around the origin point + + Properties + ---------- + grid_type : enumeration + type of model grid ('structured', 'vertex_layered', + 'vertex_unlayered') + top : ndarray(np.float) + top elevations of cells in topmost layer + botm : ndarray(np.float) + bottom elevations of all cells + idomain : ndarray(np.int) + ibound/idomain value for each cell + proj4 : proj4 SpatialReference + spatial reference locates the grid in a coordinate system + epsg : epsg SpatialReference + spatial reference locates the grid in a coordinate system + lenuni : int + modflow lenuni parameter + origin_x : float + x coordinate of the origin point in the spatial reference coordinate + system + origin_y : float + y coordinate of the origin point in the spatial reference coordinate + system + rotation : float + rotation angle of model grid, as it is rotated around the origin point + xgrid : ndarray + returns numpy meshgrid of x edges in reference frame defined by + point_type + ygrid : ndarray + returns numpy meshgrid of y edges in reference frame defined by + point_type + zgrid : ndarray + returns numpy meshgrid of z edges in reference frame defined by + point_type + xcenters : ndarray + returns x coordinate of cell centers + ycenters : ndarray + returns y coordinate of cell centers + ycenters : ndarray + returns z coordinate of cell centers + xyzgrid : [ndarray, ndarray, ndarray] + returns the location of grid edges of all model cells. if the model + grid contains spatial reference information, the grid edges are in the + coordinate system provided by the spatial reference information. + returns a list of three ndarrays for the x, y, and z coordinates + xyzcellcenters : [ndarray, ndarray, ndarray] + returns the cell centers of all model cells in the model grid. if + the model grid contains spatial reference information, the cell centers + are in the coordinate system provided by the spatial reference + information. otherwise the cell centers are based on a 0,0 location + for the upper left corner of the model grid. returns a list of three + ndarrays for the x, y, and z coordinates + + Methods + ---------- + get_coords(x, y) + transform point or array of points x, y from model coordinates to + spatial coordinates + grid_lines : (point_type=PointType.spatialxyz) : list + returns the model grid lines in a list. each line is returned as a + list containing two tuples in the format [(x1,y1), (x2,y2)] where + x1,y1 and x2,y2 are the endpoints of the line. + xyvertices : (point_type) : ndarray + 1D array of x and y coordinates of cell vertices for whole grid + (single layer) in C-style (row-major) order + (same as np.ravel()) + intersect(x, y, local) + returns the row and column of the grid that the x, y point is in + + See Also + -------- + + Notes + ----- + + Examples + -------- + """ + def __init__(self, grid_type=None, top=None, botm=None, idomain=None, + lenuni=None, epsg=None, proj4=None, prj=None, xoff=0.0, yoff=0.0, + angrot=0.0): + lenunits = {0: "undefined", 1: "feet", 2: "meters", 3: "centimeters"} + LENUNI = {"u": 0, "f": 1, "m": 2, "c": 3} + self.use_ref_coords = True + self._grid_type = grid_type + if top is not None: + top = top.astype(float) + self._top = top + if botm is not None: + botm = botm.astype(float) + self._botm = botm + self._idomain = idomain + + if lenuni is None: + lenuni = 0 + elif isinstance(lenuni, str): + lenuni = LENUNI[lenuni.lower()[0]] + self._lenuni = lenuni + + self._units = lenunits[self._lenuni] + self._epsg = epsg + self._proj4 = proj4 + self._prj = prj + self._xoff = xoff + self._yoff = yoff + if angrot is None: + angrot = 0.0 + self._angrot = angrot + self._cache_dict = {} + self._copy_cache = True + + ################################### + # access to basic grid properties + ################################### + def __repr__(self): + items = [] + if self.xoffset is not None and self.yoffset is not None \ + and self.angrot is not None: + items += [ + "xll:" + str(self.xoffset), + "yll:" + str(self.yoffset), + "rotation:" + str(self.angrot)] + if self.proj4 is not None: + items.append("proj4_str:" + str(self.proj4)) + if self.units is not None: + items.append("units:" + str(self.units)) + if self.lenuni is not None: + items.append("lenuni:" + str(self.lenuni)) + return '; '.join(items) + + @property + def is_valid(self): + return True + + @property + def is_complete(self): + if self._top is not None and self._botm is not None and \ + self._idomain is not None: + return True + return False + + @property + def grid_type(self): + return self._grid_type + + @property + def xoffset(self): + return self._xoff + + @property + def yoffset(self): + return self._yoff + + @property + def angrot(self): + return self._angrot + + @property + def angrot_radians(self): + return self._angrot * np.pi / 180. + + @property + def epsg(self): + return self._epsg + + @epsg.setter + def epsg(self, epsg): + self._epsg = epsg + + @property + def proj4(self): + proj4 = None + if self._proj4 is not None: + if "epsg" in self._proj4.lower(): + proj4 = self._proj4 + # set the epsg if proj4 specifies it + tmp = [i for i in self._proj4.split() if + 'epsg' in i.lower()] + self._epsg = int(tmp[0].split(':')[1]) + else: + proj4 = self._proj4 + elif self.epsg is not None: + proj4 = 'epsg:{}'.format(self.epsg) + return proj4 + + @proj4.setter + def proj4(self, proj4): + self._proj4 = proj4 + + @property + def prj(self): + return self._prj + + @prj.setter + def prj(self, prj): + self._proj4 = prj + + @property + def top(self): + return copy.deepcopy(self._top) + + @property + def botm(self): + return copy.deepcopy(self._botm) + + @property + def top_botm(self): + new_top = np.expand_dims(self._top, 0) + return np.concatenate((new_top, self._botm), axis=0) + + @property + def units(self): + return self._units + + @property + def lenuni(self): + return self._lenuni + + @property + def idomain(self): + return copy.deepcopy(self._idomain) + + @property + def nnodes(self): + raise NotImplementedError( + 'must define nnodes in child class') + + @property + def shape(self): + raise NotImplementedError( + 'must define shape in child class') + + @property + def extent(self): + raise NotImplementedError( + 'must define extent in child class') + + @property + def xyzextent(self): + return (np.min(self.xyzvertices[0]), np.max(self.xyzvertices[0]), + np.min(self.xyzvertices[1]), np.max(self.xyzvertices[1]), + np.min(self.xyzvertices[2]), np.max(self.xyzvertices[2])) + + @property + def grid_lines(self): + raise NotImplementedError( + 'must define grid_lines in child class') + + @property + def xcellcenters(self): + return self.xyzcellcenters[0] + + @property + def ycellcenters(self): + return self.xyzcellcenters[1] + + @property + def zcellcenters(self): + return self.xyzcellcenters[2] + + @property + def xyzcellcenters(self): + raise NotImplementedError( + 'must define get_cellcenters in child ' + 'class to use this base class') + + @property + def xvertices(self): + return self.xyzvertices[0] + + @property + def yvertices(self): + return self.xyzvertices[1] + + @property + def zvertices(self): + return self.xyzvertices[2] + + @property + def xyzvertices(self): + raise NotImplementedError( + 'must define xyzvertices in child class') + + #@property + #def indices(self): + # raise NotImplementedError( + # 'must define indices in child ' + # 'class to use this base class') + + def get_coords(self, x, y): + """ + Given x and y array-like values, apply rotation, scale and offset, + to convert them from model coordinates to real-world coordinates. + """ + if isinstance(x, list): + x = np.array(x) + y = np.array(y) + if not np.isscalar(x): + x, y = x.copy(), y.copy() + + x += self._xoff + y += self._yoff + return geometry.rotate(x, y, self._xoff, self._yoff, + self.angrot_radians) + + def get_local_coords(self, x, y): + """ + Given x and y array-like values, apply rotation, scale and offset, + to convert them from real-world coordinates to model coordinates. + """ + if isinstance(x, list): + x = np.array(x) + y = np.array(y) + if not np.isscalar(x): + x, y = x.copy(), y.copy() + + x, y = geometry.rotate(x, y, self._xoff, self._yoff, + -self.angrot_radians) + x -= self._xoff + y -= self._yoff + + return x, y + + def intersect(self, x, y, local=False, forgive=False): + if not local: + return self.get_local_coords(x, y) + else: + return x, y + + def set_coord_info(self, xoff=0.0, yoff=0.0, angrot=0.0, epsg=None, + proj4=None, merge_coord_info=True): + if merge_coord_info: + if xoff is None: + xoff = self._xoff + if yoff is None: + yoff = self._yoff + if angrot is None: + angrot = self._angrot + if epsg is None: + epsg = self._epsg + if proj4 is None: + proj4 = self._proj4 + + self._xoff = xoff + self._yoff = yoff + self._angrot = angrot + self._epsg = epsg + self._proj4 = proj4 + self._require_cache_updates() + + def load_coord_info(self, namefile=None, reffile='usgs.model.reference'): + """Attempts to load spatial reference information from + the following files (in order): + 1) usgs.model.reference + 2) NAM file (header comment) + 3) defaults + """ + reffile = os.path.join(os.path.split(namefile)[0], reffile) + # try to load reference file + if not self.read_usgs_model_reference_file(reffile): + # try to load nam file + if not self.attribs_from_namfile_header(namefile): + # set defaults + self.set_coord_info() + + def attribs_from_namfile_header(self, namefile): + # check for reference info in the nam file header + if namefile is None: + return False + xul, yul = None, None + header = [] + with open(namefile, 'r') as f: + for line in f: + if not line.startswith('#'): + break + header.extend(line.strip().replace('#', '').split(';')) + + for item in header: + if "xll" in item.lower(): + try: + xll = float(item.split(':')[1]) + self._xoff = xll + except: + pass + elif "yll" in item.lower(): + try: + yll = float(item.split(':')[1]) + self._yoff = yll + except: + pass + elif "xul" in item.lower(): + try: + xul = float(item.split(':')[1]) + warnings.warn( + 'xul/yul have been deprecated. Use xll/yll instead.', + DeprecationWarning) + except: + pass + elif "yul" in item.lower(): + try: + yul = float(item.split(':')[1]) + warnings.warn( + 'xul/yul have been deprecated. Use xll/yll instead.', + DeprecationWarning) + except: + pass + elif "rotation" in item.lower(): + try: + self._angrot = float(item.split(':')[1]) + except: + pass + elif "proj4_str" in item.lower(): + try: + self._proj4 = ':'.join(item.split(':')[1:]).strip() + if self._proj4.lower() == 'none': + self._proj4 = None + except: + pass + elif "start" in item.lower(): + try: + start_datetime = item.split(':')[1].strip() + except: + pass + + # we need to rotate the modelgrid first, then we can + # calculate the xll and yll from xul and yul + if (xul, yul) != (None, None): + self.set_coord_info(xoff=self._xul_to_xll(xul), + yoff=self._yul_to_yll(yul), + angrot=self._angrot) + + return True + + def read_usgs_model_reference_file(self, reffile='usgs.model.reference'): + """read spatial reference info from the usgs.model.reference file + https://water.usgs.gov/ogw/policy/gw-model/modelers-setup.html""" + xul = None + yul = None + if os.path.exists(reffile): + with open(reffile) as input: + for line in input: + if len(line) > 1: + if line.strip()[0] != '#': + info = line.strip().split('#')[0].split() + if len(info) > 1: + data = ' '.join(info[1:]) + if info[0] == 'xll': + self._xoff = float(data) + elif info[0] == 'yll': + self._yoff = float(data) + elif info[0] == 'xul': + xul = float(data) + elif info[0] == 'yul': + yul = float(data) + elif info[0] == 'rotation': + self._angrot = float(data) + elif info[0] == 'epsg': + self._epsg = int(data) + elif info[0] == 'proj4': + self._proj4 = data + elif info[0] == 'start_date': + start_datetime = data + + # model must be rotated first, before setting xoff and yoff + # when xul and yul are provided. + if (xul, yul) != (None, None): + self.set_coord_info(xoff=self._xul_to_xll(xul), + yoff=self._yul_to_yll(yul), + angrot=self._angrot) + + return True + else: + return False + + # Internal + def _xul_to_xll(self, xul, angrot=None): + yext = self.xyedges[1][0] + if angrot is not None: + return xul + (np.sin(angrot * np.pi / 180) * yext) + else: + return xul + (np.sin(self.angrot_radians) * yext) + + def _yul_to_yll(self, yul, angrot=None): + yext = self.xyedges[1][0] + if angrot is not None: + return yul - (np.cos(angrot * np.pi / 180) * yext) + else: + return yul - (np.cos(self.angrot_radians) * yext) + + def _set_sr_coord_info(self, sr): + self._xoff = sr.xll + self._yoff = sr.yll + self._angrot = sr.rotation + self._epsg = sr.epsg + self._proj4 = sr.proj4_str + self._require_cache_updates() + + def _require_cache_updates(self): + for cache_data in self._cache_dict.values(): + cache_data.out_of_date = True + + @property + def _has_ref_coordinates(self): + return self._xoff != 0.0 or self._yoff != 0.0 or self._angrot != 0.0 + + def _load_settings(self, d): + self._xoff = d.xul + + def _zcoords(self): + if self.top is not None and self.botm is not None: + zcenters = [] + top_3d = np.expand_dims(self.top, 0) + zbdryelevs = np.concatenate((top_3d, self.botm), axis=0) + + for ix in range(1, len(zbdryelevs)): + zcenters.append((zbdryelevs[ix - 1] + zbdryelevs[ix]) / 2.) + else: + zbdryelevs = None + zcenters = None return zbdryelevs, zcenters \ No newline at end of file diff --git a/flopy/discretization/modeltime.py b/flopy/discretization/modeltime.py index 606b7f8e5a..f07129c46d 100644 --- a/flopy/discretization/modeltime.py +++ b/flopy/discretization/modeltime.py @@ -1,45 +1,45 @@ -class ModelTime(): - """ - Class for MODFLOW simulation time - - Parameters - ---------- - stress_periods : pandas dataframe - headings are: perlen, nstp, tsmult - temporal_reference : TemporalReference - contains start time and time units information - """ - def __init__(self, period_data=None, time_units='days', - start_datetime=None, steady_state=None): - self._period_data = period_data - self._time_units = time_units - self._start_datetime = start_datetime - self._steady_state = steady_state - - @property - def time_units(self): - return self._time_units - - @property - def start_datetime(self): - return self._start_datetime - - @property - def perlen(self): - return self._period_data['perlen'] - - @property - def nper(self): - return len(self._period_data['perlen']) - - @property - def nstp(self): - return self._period_data['nstp'] - - @property - def tsmult(self): - return self._period_data['tsmult'] - - @property - def steady_state(self): +class ModelTime(): + """ + Class for MODFLOW simulation time + + Parameters + ---------- + stress_periods : pandas dataframe + headings are: perlen, nstp, tsmult + temporal_reference : TemporalReference + contains start time and time units information + """ + def __init__(self, period_data=None, time_units='days', + start_datetime=None, steady_state=None): + self._period_data = period_data + self._time_units = time_units + self._start_datetime = start_datetime + self._steady_state = steady_state + + @property + def time_units(self): + return self._time_units + + @property + def start_datetime(self): + return self._start_datetime + + @property + def perlen(self): + return self._period_data['perlen'] + + @property + def nper(self): + return len(self._period_data['perlen']) + + @property + def nstp(self): + return self._period_data['nstp'] + + @property + def tsmult(self): + return self._period_data['tsmult'] + + @property + def steady_state(self): return self._steady_state \ No newline at end of file diff --git a/flopy/discretization/structuredgrid.py b/flopy/discretization/structuredgrid.py index f916fa3733..36409d6378 100644 --- a/flopy/discretization/structuredgrid.py +++ b/flopy/discretization/structuredgrid.py @@ -1,1287 +1,1287 @@ -import copy -import numpy as np -from .grid import Grid, CachedData - -def array_at_verts_basic2d(a): - """ - Computes values at cell vertices on 2d array using neighbor averaging. - - Parameters - ---------- - a : ndarray - Array values at cell centers, could be a slice in any orientation. - - Returns - ------- - averts : ndarray - Array values at cell vertices, shape (a.shape[0]+1, a.shape[1]+1). - """ - assert a.ndim == 2 - shape_verts2d = (a.shape[0]+1, a.shape[1]+1) - - # create a 3D array of size (nrow+1, ncol+1, 4) - averts3d = np.full(shape_verts2d + (4,), np.nan) - averts3d[:-1, :-1, 0] = a - averts3d[:-1, 1:, 1] = a - averts3d[1:, :-1, 2] = a - averts3d[1:, 1:, 3] = a - - # calculate the mean over the last axis, ignoring NaNs - averts = np.nanmean(averts3d, axis=2) - - return averts - -def array_at_faces_1d(a, delta): - """ - Interpolate array at cell faces of a 1d grid using linear interpolation. - - Parameters - ---------- - a : 1d ndarray - Values at cell centers. - delta : 1d ndarray - Grid steps. - - Returns - ------- - afaces : 1d ndarray - Array values interpolated at cell faces, shape as input extended by 1. - - """ - # extended array with ghost cells on both sides having zero values - ghost_shape = list(a.shape) - ghost_shape[0] += 2 - a_ghost = np.zeros(ghost_shape, dtype=a.dtype) - - # extended delta with ghost cells on both sides having zero values - delta_ghost = np.zeros(ghost_shape, dtype=a.dtype) - - # fill array with ghost cells - a_ghost[1:-1] = a - a_ghost[0] = a[0] - a_ghost[-1] = a[-1] - - # calculate weights - delta_ghost[1:-1] = delta - weight2 = delta_ghost[:-1] / (delta_ghost[:-1] + delta_ghost[1:]) - weight1 = 1. - weight2 - - # interpolate - afaces = a_ghost[:-1]*weight1 + a_ghost[1:]*weight2 - - return afaces - -class StructuredGrid(Grid): - """ - class for a structured model grid - - Parameters - ---------- - delc - delc array - delr - delr array - - Properties - ---------- - nlay - returns the number of model layers - nrow - returns the number of model rows - ncol - returns the number of model columns - delc - returns the delc array - delr - returns the delr array - xyedges - returns x-location points for the edges of the model grid and - y-location points for the edges of the model grid - - Methods - ---------- - get_cell_vertices(i, j) - returns vertices for a single cell at row, column i, j. - """ - def __init__(self, delc=None, delr=None, top=None, botm=None, idomain=None, - lenuni=None, epsg=None, proj4=None, prj=None, xoff=0.0, - yoff=0.0, angrot=0.0, nlay=None, nrow=None, ncol=None, - laycbd=None): - super(StructuredGrid, self).__init__('structured', top, botm, idomain, - lenuni, epsg, proj4, prj, xoff, - yoff, angrot) - if delc is not None: - self.__nrow = len(delc) - self.__delc = delc.astype(float) - else: - self.__nrow = nrow - self.__delc = delc - if delr is not None: - self.__ncol = len(delr) - self.__delr = delr.astype(float) - else: - self.__ncol = ncol - self.__delr = delr - if top is not None: - assert self.__nrow * self.__ncol == len(np.ravel(top)) - if botm is not None: - assert self.__nrow * self.__ncol == len(np.ravel(botm[0])) - if nlay is not None: - self.__nlay = nlay - else: - if laycbd is not None: - self.__nlay = len(botm) - np.sum(laycbd>0) - else: - self.__nlay = len(botm) - else: - self.__nlay = nlay - if laycbd is not None: - self.__laycbd = laycbd - else: - self.__laycbd = np.zeros(self.__nlay, dtype=int) - - #################### - # Properties - #################### - @property - def is_valid(self): - if self.__delc is not None and self.__delr is not None: - return True - return False - - @property - def is_complete(self): - if self.__delc is not None and self.__delr is not None and \ - super(StructuredGrid, self).is_complete: - return True - return False - - @property - def nlay(self): - return self.__nlay - - @property - def nrow(self): - return self.__nrow - - @property - def ncol(self): - return self.__ncol - - @property - def nnodes(self): - return self.__nlay * self.__nrow * self.__ncol - - @property - def shape(self): - return self.__nlay, self.__nrow, self.__ncol - - @property - def extent(self): - self._copy_cache = False - xyzgrid = self.xyzvertices - self._copy_cache = True - return (np.min(xyzgrid[0]), np.max(xyzgrid[0]), - np.min(xyzgrid[1]), np.max(xyzgrid[1])) - - @property - def delc(self): - return copy.deepcopy(self.__delc) - - @property - def delr(self): - return copy.deepcopy(self.__delr) - - @property - def delz(self): - cache_index = 'delz' - if cache_index not in self._cache_dict or \ - self._cache_dict[cache_index].out_of_date: - delz = self.top_botm[:-1, :, :] - self.top_botm[1:, :, :] - self._cache_dict[cache_index] = CachedData(delz) - if self._copy_cache: - return self._cache_dict[cache_index].data - else: - return self._cache_dict[cache_index].data_nocopy - - @property - def top_botm_withnan(self): - """ - Same as top_botm array but with NaN where idomain==0 both above and - below a cell. - """ - cache_index = 'top_botm_withnan' - if cache_index not in self._cache_dict or \ - self._cache_dict[cache_index].out_of_date: - is_inactive_above = np.full(self.top_botm.shape, True) - is_inactive_above[:-1, :, :] = self._idomain==0 - is_inactive_below = np.full(self.top_botm.shape, True) - is_inactive_below[1:, :, :] = self._idomain==0 - where_to_nan = np.logical_and(is_inactive_above, is_inactive_below) - top_botm_withnan = np.where(where_to_nan, np.nan, self.top_botm) - self._cache_dict[cache_index] = CachedData(top_botm_withnan) - if self._copy_cache: - return self._cache_dict[cache_index].data - else: - return self._cache_dict[cache_index].data_nocopy - - @property - def xyzvertices(self): - """ - Method to get all grid vertices in a layer - - Returns: - [] - 2D array - """ - cache_index = 'xyzgrid' - if cache_index not in self._cache_dict or \ - self._cache_dict[cache_index].out_of_date: - xedge = np.concatenate(([0.], np.add.accumulate(self.__delr))) - length_y = np.add.reduce(self.__delc) - yedge = np.concatenate(([length_y], length_y - - np.add.accumulate(self.delc))) - xgrid, ygrid = np.meshgrid(xedge, yedge) - zgrid, zcenter = self._zcoords() - if self._has_ref_coordinates: - # transform x and y - pass - xgrid, ygrid = self.get_coords(xgrid, ygrid) - if zgrid is not None: - self._cache_dict[cache_index] = \ - CachedData([xgrid, ygrid, zgrid]) - else: - self._cache_dict[cache_index] = \ - CachedData([xgrid, ygrid]) - - if self._copy_cache: - return self._cache_dict[cache_index].data - else: - return self._cache_dict[cache_index].data_nocopy - - @property - def xyedges(self): - """ - Return a list of two 1D numpy arrays: one with the cell edge x - coordinate (size = ncol+1) and the other with the cell edge y - coordinate (size = nrow+1) in model space - not offset or rotated. - """ - cache_index = 'xyedges' - if cache_index not in self._cache_dict or \ - self._cache_dict[cache_index].out_of_date: - xedge = np.concatenate(([0.], np.add.accumulate(self.__delr))) - length_y = np.add.reduce(self.__delc) - yedge = np.concatenate(([length_y], length_y - - np.add.accumulate(self.delc))) - self._cache_dict[cache_index] = \ - CachedData([xedge, yedge]) - if self._copy_cache: - return self._cache_dict[cache_index].data - else: - return self._cache_dict[cache_index].data_nocopy - - @property - def zedges(self): - """ - Return zedges for (column, row)==(0, 0). - """ - cache_index = 'zedges' - if cache_index not in self._cache_dict or \ - self._cache_dict[cache_index].out_of_date: - zedges = np.concatenate((np.array([self.top[0, 0]]), - self.botm[:, 0, 0])) - self._cache_dict[cache_index] = CachedData(zedges) - if self._copy_cache: - return self._cache_dict[cache_index].data - else: - return self._cache_dict[cache_index].data_nocopy - - @property - def zverts_smooth(self): - """ - Get a unique z of cell vertices using bilinear interpolation of top and - bottom elevation layers. - - Returns - ------- - zverts : ndarray, shape (nlay+1, nrow+1, ncol+1) - z of cell vertices. NaN values are assigned in accordance with - inactive cells defined by idomain. - """ - cache_index = 'zverts_smooth' - if cache_index not in self._cache_dict or \ - self._cache_dict[cache_index].out_of_date: - zverts_smooth = self.array_at_verts(self.top_botm) - self._cache_dict[cache_index] = CachedData(zverts_smooth) - if self._copy_cache: - return self._cache_dict[cache_index].data - else: - return self._cache_dict[cache_index].data_nocopy - - @property - def xycenters(self): - """ - Return a list of two numpy one-dimensional float arrays for center x - and y coordinates in model space - not offset or rotated. - """ - cache_index = 'xycenters' - if cache_index not in self._cache_dict or \ - self._cache_dict[cache_index].out_of_date: - # get x centers - x = np.add.accumulate(self.__delr) - 0.5 * self.delr - # get y centers - Ly = np.add.reduce(self.__delc) - y = Ly - (np.add.accumulate(self.__delc) - 0.5 * - self.__delc) - # store in cache - self._cache_dict[cache_index] = CachedData([x, y]) - if self._copy_cache: - return self._cache_dict[cache_index].data - else: - return self._cache_dict[cache_index].data_nocopy - - @property - def xyzcellcenters(self): - """ - Return a list of three numpy float arrays: two two-dimensional arrays - for center x and y coordinates, and one three-dimensional array for - center z coordinates. Coordinates are given in real-world coordinates. - """ - cache_index = 'cellcenters' - if cache_index not in self._cache_dict or \ - self._cache_dict[cache_index].out_of_date: - # get x centers - x = np.add.accumulate(self.__delr) - 0.5 * self.delr - # get y centers - Ly = np.add.reduce(self.__delc) - y = Ly - (np.add.accumulate(self.__delc) - 0.5 * - self.__delc) - x_mesh, y_mesh = np.meshgrid(x, y) - if self.__nlay is not None: - # get z centers - z = np.empty((self.__nlay, self.__nrow, self.__ncol)) - z[0, :, :] = (self._top[:, :] + self._botm[0, :, :]) / 2. - ibs = np.arange(self.__nlay) - quasi3d = [cbd !=0 for cbd in self.__laycbd] - if np.any(quasi3d): - ibs[1:] = ibs[1:] + np.cumsum(quasi3d)[:self.__nlay - 1] - for l, ib in enumerate(ibs[1:], 1): - z[l, :, :] = (self._botm[ib - 1, :, :] + - self._botm[ib, :, :]) / 2. - else: - z = None - if self._has_ref_coordinates: - # transform x and y - x_mesh, y_mesh = self.get_coords(x_mesh, y_mesh) - # store in cache - self._cache_dict[cache_index] = CachedData([x_mesh, y_mesh, z]) - if self._copy_cache: - return self._cache_dict[cache_index].data - else: - return self._cache_dict[cache_index].data_nocopy - - @property - def grid_lines(self): - """ - Get the grid lines as a list - - """ - # get edges initially in model coordinates - use_ref_coords = self.use_ref_coords - self.use_ref_coords = False - xyedges = self.xyedges - self.use_ref_coords = use_ref_coords - - xmin = xyedges[0][0] - xmax = xyedges[0][-1] - ymin = xyedges[1][-1] - ymax = xyedges[1][0] - lines = [] - # Vertical lines - for j in range(self.ncol + 1): - x0 = xyedges[0][j] - x1 = x0 - y0 = ymin - y1 = ymax - lines.append([(x0, y0), (x1, y1)]) - - # horizontal lines - for i in range(self.nrow + 1): - x0 = xmin - x1 = xmax - y0 = xyedges[1][i] - y1 = y0 - lines.append([(x0, y0), (x1, y1)]) - - if self._has_ref_coordinates: - lines_trans = [] - for ln in lines: - lines_trans.append([self.get_coords(*ln[0]), - self.get_coords(*ln[1])]) - return lines_trans - return lines - - @property - def is_regular_x(self): - """ - Test whether the grid spacing is regular in the x direction. - """ - cache_index = 'is_regular_x' - if cache_index not in self._cache_dict or \ - self._cache_dict[cache_index].out_of_date: - # relative tolerance to use in test - rel_tol = 1.e-5 - - # regularity test in x direction - rel_diff_x = (self.__delr - self.__delr[0]) / self.__delr[0] - is_regular_x = np.count_nonzero(np.abs(rel_diff_x) > rel_tol) == 0 - - self._cache_dict[cache_index] = CachedData(is_regular_x) - if self._copy_cache: - return self._cache_dict[cache_index].data - else: - return self._cache_dict[cache_index].data_nocopy - - @property - def is_regular_y(self): - """ - Test whether the grid spacing is regular in the y direction. - """ - cache_index = 'is_regular_y' - if cache_index not in self._cache_dict or \ - self._cache_dict[cache_index].out_of_date: - # relative tolerance to use in test - rel_tol = 1.e-5 - - # regularity test in y direction - rel_diff_y = (self.__delc - self.__delc[0]) / self.__delc[0] - is_regular_y = np.count_nonzero(np.abs(rel_diff_y) > rel_tol) == 0 - - self._cache_dict[cache_index] = CachedData(is_regular_y) - if self._copy_cache: - return self._cache_dict[cache_index].data - else: - return self._cache_dict[cache_index].data_nocopy - - @property - def is_regular_z(self): - """ - Test if the grid spacing is regular in z direction. - """ - cache_index = 'is_regular_z' - if cache_index not in self._cache_dict or \ - self._cache_dict[cache_index].out_of_date: - # relative tolerance to use in test - rel_tol = 1.e-5 - - # regularity test in z direction - rel_diff_thick0 = (self.delz[0, :, :] - self.delz[0, 0, 0]) \ - / self.delz[0, 0, 0] - failed = np.abs(rel_diff_thick0) > rel_tol - is_regular_z = np.count_nonzero(failed) == 0 - for k in range(1, self.nlay): - rel_diff_zk = (self.delz[k, :, :] - self.delz[0, :, :]) \ - / self.delz[0, :, :] - failed = np.abs(rel_diff_zk) > rel_tol - is_regular_z = is_regular_z and np.count_nonzero(failed) == 0 - - self._cache_dict[cache_index] = CachedData(is_regular_z) - if self._copy_cache: - return self._cache_dict[cache_index].data - else: - return self._cache_dict[cache_index].data_nocopy - - @property - def is_regular_xy(self): - """ - Test if the grid spacing is regular and equal in x and y directions. - """ - cache_index = 'is_regular_xy' - if cache_index not in self._cache_dict or \ - self._cache_dict[cache_index].out_of_date: - # relative tolerance to use in test - rel_tol = 1.e-5 - - # test if the first delta is equal in x and z - rel_diff_0 = (self.__delc[0] - self.__delr[0]) / self.__delr[0] - first_equal = np.abs(rel_diff_0) <= rel_tol - - # combine with regularity tests in x and z directions - is_regular_xy = first_equal and self.is_regular_x and \ - self.is_regular_y - - self._cache_dict[cache_index] = CachedData(is_regular_xy) - if self._copy_cache: - return self._cache_dict[cache_index].data - else: - return self._cache_dict[cache_index].data_nocopy - - @property - def is_regular_xz(self): - """ - Test if the grid spacing is regular and equal in x and z directions. - """ - cache_index = 'is_regular_xz' - if cache_index not in self._cache_dict or \ - self._cache_dict[cache_index].out_of_date: - # relative tolerance to use in test - rel_tol = 1.e-5 - - # test if the first delta is equal in x and z - rel_diff_0 = (self.delz[0, 0, 0] - self.__delr[0]) / self.__delr[0] - first_equal = np.abs(rel_diff_0) <= rel_tol - - # combine with regularity tests in x and z directions - is_regular_xz = first_equal and self.is_regular_x and \ - self.is_regular_z - - self._cache_dict[cache_index] = CachedData(is_regular_xz) - if self._copy_cache: - return self._cache_dict[cache_index].data - else: - return self._cache_dict[cache_index].data_nocopy - - @property - def is_regular_yz(self): - """ - Test if the grid spacing is regular and equal in y and z directions. - """ - cache_index = 'is_regular_yz' - if cache_index not in self._cache_dict or \ - self._cache_dict[cache_index].out_of_date: - # relative tolerance to use in test - rel_tol = 1.e-5 - - # test if the first delta is equal in y and z - rel_diff_0 = (self.delz[0, 0, 0] - self.__delc[0]) / self.__delc[0] - first_equal = np.abs(rel_diff_0) <= rel_tol - - # combine with regularity tests in x and y directions - is_regular_yz = first_equal and self.is_regular_y and \ - self.is_regular_z - - self._cache_dict[cache_index] = CachedData(is_regular_yz) - if self._copy_cache: - return self._cache_dict[cache_index].data - else: - return self._cache_dict[cache_index].data_nocopy - - @property - def is_regular(self): - """ - Test if the grid spacing is regular and equal in x, y and z directions. - """ - cache_index = 'is_regular' - if cache_index not in self._cache_dict or \ - self._cache_dict[cache_index].out_of_date: - # relative tolerance to use in test - rel_tol = 1.e-5 - - # test if the first delta is equal in x and z - rel_diff_0 = (self.delz[0, 0, 0] - self.__delr[0]) / self.__delr[0] - first_equal = np.abs(rel_diff_0) <= rel_tol - - # combine with regularity tests in x, y and z directions - is_regular = first_equal and self.is_regular_z and \ - self.is_regular_xy - - self._cache_dict[cache_index] = CachedData(is_regular) - if self._copy_cache: - return self._cache_dict[cache_index].data - else: - return self._cache_dict[cache_index].data_nocopy - - @property - def is_rectilinear(self): - """ - Test whether the grid is rectilinear (it is always so in the x and - y directions, but not necessarily in the z direction). - """ - cache_index = 'is_rectilinear' - if cache_index not in self._cache_dict or \ - self._cache_dict[cache_index].out_of_date: - # relative tolerance to use in test - rel_tol = 1.e-5 - - # rectilinearity test in z direction - is_rect_z = True - for k in range(self.nlay): - rel_diff_zk = (self.delz[k, :, :] - self.delz[k, 0, 0]) \ - / self.delz[k, 0, 0] - failed = np.abs(rel_diff_zk) > rel_tol - is_rect_z = is_rect_z and np.count_nonzero(failed) == 0 - - self._cache_dict[cache_index] = CachedData(is_rect_z) - if self._copy_cache: - return self._cache_dict[cache_index].data - else: - return self._cache_dict[cache_index].data_nocopy - - ############### - ### Methods ### - ############### - def intersect(self, x, y, local=False, forgive=False): - """ - Get the row and column of a point with coordinates x and y - - When the point is on the edge of two cells, the cell with the lowest - row or column is returned. - - Parameters - ---------- - x : float - The x-coordinate of the requested point - y : float - The y-coordinate of the requested point - local: bool (optional) - If True, x and y are in local coordinates (defaults to False) - forgive: bool (optional) - Forgive x,y arguments that fall outside the model grid and - return NaNs instead (defaults to False - will throw exception) - - Returns - ------- - row : int - The row number - col : int - The column number - - """ - # transform x and y to local coordinates - x, y = super(StructuredGrid, self).intersect(x, y, local, forgive) - - # get the cell edges in local coordinates - xe, ye = self.xyedges - - xcomp = x > xe - if np.all(xcomp) or not np.any(xcomp): - if forgive: - col = np.nan - else: - raise Exception( - 'x, y point given is outside of the model area') - else: - col = np.where(xcomp)[0][-1] - - ycomp = y < ye - if np.all(ycomp) or not np.any(ycomp): - if forgive: - row = np.nan - else: - raise Exception( - 'x, y point given is outside of the model area') - else: - row = np.where(ycomp)[0][-1] - if np.any(np.isnan([row, col])): - row = col = np.nan - return row, col - - def _cell_vert_list(self, i, j): - """Get vertices for a single cell or sequence of i, j locations.""" - self._copy_cache = False - pts = [] - xgrid, ygrid = self.xvertices, self.yvertices - pts.append([xgrid[i, j], ygrid[i, j]]) - pts.append([xgrid[i + 1, j], ygrid[i + 1, j]]) - pts.append([xgrid[i + 1, j + 1], ygrid[i + 1, j + 1]]) - pts.append([xgrid[i, j + 1], ygrid[i, j + 1]]) - pts.append([xgrid[i, j], ygrid[i, j]]) - self._copy_cache = True - if np.isscalar(i): - return pts - else: - vrts = np.array(pts).transpose([2, 0, 1]) - return [v.tolist() for v in vrts] - - def get_cell_vertices(self, i, j): - """ - Method to get a set of cell vertices for a single cell - used in the Shapefile export utilities - :param i: (int) cell row number - :param j: (int) cell column number - :return: list of x,y cell vertices - """ - self._copy_cache = False - cell_verts = [(self.xvertices[i, j], self.yvertices[i, j]), - (self.xvertices[i, j+1], self.yvertices[i, j+1]), - (self.xvertices[i+1, j+1], self.yvertices[i+1, j+1]), - (self.xvertices[i+1, j], self.yvertices[i+1, j]),] - self._copy_cache = True - return cell_verts - - def plot(self, **kwargs): - """ - Plot the grid lines. - - Parameters - ---------- - kwargs : ax, colors. The remaining kwargs are passed into the - the LineCollection constructor. - - Returns - ------- - lc : matplotlib.collections.LineCollection - - """ - from ..plot import PlotMapView - - mm = PlotMapView(modelgrid=self) - return mm.plot_grid(**kwargs) - - # Importing - @classmethod - def from_gridspec(cls, gridspec_file, lenuni=0): - f = open(gridspec_file, 'r') - raw = f.readline().strip().split() - nrow = int(raw[0]) - ncol = int(raw[1]) - raw = f.readline().strip().split() - xul, yul, rot = float(raw[0]), float(raw[1]), float(raw[2]) - delr = [] - j = 0 - while j < ncol: - raw = f.readline().strip().split() - for r in raw: - if '*' in r: - rraw = r.split('*') - for n in range(int(rraw[0])): - delr.append(float(rraw[1])) - j += 1 - else: - delr.append(float(r)) - j += 1 - delc = [] - i = 0 - while i < nrow: - raw = f.readline().strip().split() - for r in raw: - if '*' in r: - rraw = r.split('*') - for n in range(int(rraw[0])): - delc.append(float(rraw[1])) - i += 1 - else: - delc.append(float(r)) - i += 1 - f.close() - grd = cls(np.array(delc), np.array(delr), lenuni=lenuni) - xll = grd._xul_to_xll(xul) - yll = grd._yul_to_yll(yul) - cls.set_coord_info(xoff=xll, yoff=yll, angrot=rot) - return cls - - # Exporting - def write_shapefile(self, filename='grid.shp', epsg=None, prj=None): - """ - Write a shapefile of the grid with just the row and column attributes. - """ - from ..export.shapefile_utils import write_grid_shapefile - if epsg is None and prj is None: - epsg = self.epsg - write_grid_shapefile(filename, self, array_dict={}, nan_val=-1.0e9, - epsg=epsg, prj=prj) - - def array_at_verts_basic(self, a): - """ - Computes values at cell vertices using neighbor averaging. - - Parameters - ---------- - a : ndarray - Array values at cell centers. - - Returns - ------- - averts : ndarray - Array values at cell vertices, shape - (a.shape[0]+1, a.shape[1]+1, a.shape[2]+1). NaN values are assigned - in accordance with inactive cells defined by idomain. - """ - assert a.ndim == 3 - shape_verts = (a.shape[0]+1, a.shape[1]+1, a.shape[2]+1) - - # set to NaN where idomain==0 - a[self._idomain==0] = np.nan - - # create a 4D array of size (nlay+1, nrow+1, ncol+1, 8) - averts4d = np.full(shape_verts + (8,), np.nan) - averts4d[:-1, :-1, :-1, 0] = a - averts4d[:-1, :-1, 1:, 1] = a - averts4d[:-1, 1:, :-1, 2] = a - averts4d[:-1, 1:, 1:, 3] = a - averts4d[1:, :-1, :-1, 4] = a - averts4d[1:, :-1, 1:, 5] = a - averts4d[1:, 1:, :-1, 6] = a - averts4d[1:, 1:, 1:, 7] = a - - # calculate the mean over the last axis, ignoring NaNs - averts = np.nanmean(averts4d, axis=3) - - return averts - - def array_at_verts(self, a): - """ - Interpolate array values at cell vertices. - - Parameters - ---------- - a : ndarray - Array values. Allowed shapes are: (nlay, nrow, ncol), - (nlay, nrow, ncol+1), (nlay, nrow+1, ncol) and - (nlay+1, nrow, ncol). - * When the shape is (nlay, nrow, ncol), input values are - considered at cell centers, and output values are computed by - trilinear interpolation. - * When the shape is extended in one direction, input values are - considered at the center of cell faces in this direction, and - output values are computed by bilinear interpolation in planes - defined by these cell faces. - - Returns - ------- - averts : ndarray - Array values interpolated at cell vertices, shape - (nlay+1, nrow+1, ncol+1). - - Notes - ----- - * Output values are smooth (continuous) even if top elevations or - bottom elevations are not constant across layers (i.e., in this - case, vertices of neighboring cells are implicitly merged). - * NaN values are assigned in accordance with inactive cells defined - by idomain. - """ - import scipy.interpolate as interp - - # define shapes - shape_ext_x = (self.nlay, self.nrow, self.ncol+1) - shape_ext_y = (self.nlay, self.nrow+1, self.ncol) - shape_ext_z = (self.nlay+1, self.nrow, self.ncol) - shape_verts = (self.nlay+1, self.nrow+1, self.ncol+1) - - # get inactive cells - if self._idomain is not None: - inactive = self._idomain == 0 - - # get local x and y cell center coordinates (1d arrays) - xcenters, ycenters = self.xycenters - - # get z center coordinates: make the grid rectilinear if it is not, - # in order to always use RegularGridInterpolator; in most cases this - # will give better results than with the non-structured interpolator - # LinearNDInterpolator (in addition, it will run faster) - zcenters = self.zcellcenters - if self._idomain is not None: - zcenters = np.where(inactive, np.nan, zcenters) - if not self.is_rectilinear or \ - np.count_nonzero(np.isnan(zcenters)) != 0: - zedges = np.nanmean(self.top_botm_withnan, axis=(1, 2)) - else: - zedges = self.top_botm_withnan[:, 0, 0] - zcenters = 0.5 * (zedges[1:] + zedges[:-1]) - - # test grid regularity in z - rel_tol = 1.e-5 - delz = np.diff(zedges) - rel_diff = (delz - delz[0]) / delz[0] - _is_regular_z = np.count_nonzero(np.abs(rel_diff) > rel_tol) == 0 - - # test equality of first grid spacing in x and z, and in y and z - first_equal_xz = np.abs(self.__delr[0] - delz[0]) / delz[0] <= rel_tol - first_equal_yz = np.abs(self.__delc[0] - delz[0]) / delz[0] <= rel_tol - - # get output coordinates (i.e. vertices) - xedges, yedges = self.xyedges - xedges = xedges.reshape((1, 1, self.ncol+1)) - xoutput = xedges * np.ones(shape_verts) - yedges = yedges.reshape((1, self.nrow+1, 1)) - youtput = yedges * np.ones(shape_verts) - zoutput = zedges.reshape((self.nlay+1, 1, 1)) - zoutput = zoutput * np.ones(shape_verts) - - # indicator of whether basic interpolation is used or not - basic = False - - if a.shape == self.shape: - # set array to NaN where inactive - if self._idomain is not None: - inactive = self._idomain == 0 - a = np.where(inactive, np.nan, a) - - # perform basic interpolation (this will be useful in all cases) - averts_basic = self.array_at_verts_basic(a) - - if self.is_regular_xy and _is_regular_z and first_equal_xz: - # in this case, basic interpolation is the correct one - averts = averts_basic - basic = True - - else: - if self.nlay == 1: - # in this case we need a 2d interpolation in the x, y plane - # flip y coordinates because RegularGridInterpolator - # requires increasing input coordinates - xyinput = (np.flip(ycenters), xcenters) - a = np.squeeze(np.flip(a, axis=[1])) - # interpolate - interp_func = interp.RegularGridInterpolator(xyinput, a, - bounds_error=False, fill_value=np.nan) - xyoutput = np.empty((youtput[0, :, :].size, 2)) - xyoutput[:, 0] = youtput[0, :, :].ravel() - xyoutput[:, 1] = xoutput[0, :, :].ravel() - averts2d = interp_func(xyoutput) - averts2d = averts2d.reshape((1, self.nrow+1, self.ncol+1)) - averts = averts2d * np.ones(shape_verts) - elif self.nrow == 1: - # in this case we need a 2d interpolation in the x, z plane - # flip z coordinates because RegularGridInterpolator - # requires increasing input coordinates - xzinput = (np.flip(zcenters), xcenters) - a = np.squeeze(np.flip(a, axis=[0])) - # interpolate - interp_func = interp.RegularGridInterpolator(xzinput, a, - bounds_error=False, fill_value=np.nan) - xzoutput = np.empty((zoutput[:, 0, :].size, 2)) - xzoutput[:, 0] = zoutput[:, 0, :].ravel() - xzoutput[:, 1] = xoutput[:, 0, :].ravel() - averts2d = interp_func(xzoutput) - averts2d = averts2d.reshape((self.nlay+1, 1, self.ncol+1)) - averts = averts2d * np.ones(shape_verts) - elif self.ncol == 1: - # in this case we need a 2d interpolation in the y, z plane - # flip y and z coordinates because RegularGridInterpolator - # requires increasing input coordinates - yzinput = (np.flip(zcenters), np.flip(ycenters)) - a = np.squeeze(np.flip(a, axis=[0, 1])) - # interpolate - interp_func = interp.RegularGridInterpolator(yzinput, a, - bounds_error=False, fill_value=np.nan) - yzoutput = np.empty((zoutput[:, :, 0].size, 2)) - yzoutput[:, 0] = zoutput[:, :, 0].ravel() - yzoutput[:, 1] = youtput[:, :, 0].ravel() - averts2d = interp_func(yzoutput) - averts2d = averts2d.reshape((self.nlay+1, self.nrow+1, 1)) - averts = averts2d * np.ones(shape_verts) - else: - # 3d interpolation - # flip y and z coordinates because RegularGridInterpolator - # requires increasing input coordinates - xyzinput = (np.flip(zcenters), np.flip(ycenters), xcenters) - a = np.flip(a, axis=[0, 1]) - # interpolate - interp_func = interp.RegularGridInterpolator(xyzinput, a, - bounds_error=False, fill_value=np.nan) - xyzoutput = np.empty((zoutput.size, 3)) - xyzoutput[:, 0] = zoutput.ravel() - xyzoutput[:, 1] = youtput.ravel() - xyzoutput[:, 2] = xoutput.ravel() - averts = interp_func(xyzoutput) - averts = averts.reshape(shape_verts) - - elif a.shape == shape_ext_x: - # set array to NaN where inactive on both side - if self._idomain is not None: - inactive_ext_x = np.full(shape_ext_x, True) - inactive_ext_x[:, :, :-1] = inactive - inactive_ext_x[:, :, 1:] = np.logical_and( - inactive_ext_x[:, :, 1:], inactive) - a = np.where(inactive_ext_x, np.nan, a) - - averts = np.empty(shape_verts, dtype=a.dtype) - averts_basic = np.empty(shape_verts, dtype=a.dtype) - for j in range(self.ncol+1): - # perform basic interpolation (will be useful in all cases) - averts_basic[:, :, j] = array_at_verts_basic2d(a[:, :, j]) - - if self.is_regular_y and _is_regular_z and first_equal_yz: - # in this case, basic interpolation is the correct one - averts2d = averts_basic[:, :, j] - basic = True - - else: - if self.nlay == 1: - # in this case we need a 1d interpolation along y - averts1d = array_at_faces_1d(a[0, :, j], self.__delc) - averts2d = averts1d.reshape((1, self.nrow+1)) - averts2d = averts2d * np.ones((2, self.nrow+1)) - elif self.nrow == 1: - # in this case we need a 1d interpolation along z - delz1d = np.abs(np.diff(self.zverts_smooth[:, 0, j])) - averts1d = array_at_faces_1d(a[:, 0, j], delz1d) - averts2d = averts1d.reshape((self.nlay+1, 1)) - averts2d = averts2d * np.ones((self.nlay+1, 2)) - else: - # 2d interpolation - # flip y and z coordinates because - # RegularGridInterpolator requires increasing input - # coordinates - yzinput = (np.flip(zcenters), np.flip(ycenters)) - a2d = np.flip(a[:, :, j], axis=[0, 1]) - interp_func = interp.RegularGridInterpolator(yzinput, - a2d, bounds_error=False, fill_value=np.nan) - yzoutput = np.empty((zoutput[:, :, j].size, 2)) - yzoutput[:, 0] = zoutput[:, :, j].ravel() - yzoutput[:, 1] = youtput[:, :, j].ravel() - averts2d = interp_func(yzoutput) - averts2d = averts2d.reshape(zoutput[:, :, j].shape) - - averts[:, :, j] = averts2d - - elif a.shape == shape_ext_y: - # set array to NaN where inactive on both side - if self._idomain is not None: - inactive_ext_y = np.full(shape_ext_y, True) - inactive_ext_y[:, :-1, :] = inactive - inactive_ext_y[:, 1:, :] = np.logical_and( - inactive_ext_y[:, 1:, :], inactive) - a = np.where(inactive_ext_y, np.nan, a) - - averts = np.empty(shape_verts, dtype=a.dtype) - averts_basic = np.empty(shape_verts, dtype=a.dtype) - for i in range(self.nrow+1): - # perform basic interpolation (will be useful in all cases) - averts_basic[:, i, :] = array_at_verts_basic2d(a[:, i, :]) - - if self.is_regular_x and _is_regular_z and first_equal_xz: - # in this case, basic interpolation is the correct one - averts2d = averts_basic[:, i, :] - basic = True - - else: - if self.nlay == 1: - # in this case we need a 1d interpolation along x - averts1d = array_at_faces_1d(a[0, i, :], self.__delr) - averts2d = averts1d.reshape((1, self.ncol+1)) - averts2d = averts2d * np.ones((2, self.ncol+1)) - elif self.ncol == 1: - # in this case we need a 1d interpolation along z - delz1d = np.abs(np.diff(self.zverts_smooth[:, i, 0])) - averts1d = array_at_faces_1d(a[:, i, 0], delz1d) - averts2d = averts1d.reshape((self.nlay+1, 1)) - averts2d = averts2d * np.ones((self.nlay+1, 2)) - else: - # 2d interpolation - # flip z coordinates because RegularGridInterpolator - # requires increasing input coordinates - xzinput = (np.flip(zcenters), xcenters) - a2d = np.flip(a[:, i, :], axis=[0]) - interp_func = interp.RegularGridInterpolator(xzinput, - a2d, bounds_error=False, fill_value=np.nan) - xzoutput = np.empty((zoutput[:, i, :].size, 2)) - xzoutput[:, 0] = zoutput[:, i, :].ravel() - xzoutput[:, 1] = xoutput[:, i, :].ravel() - averts2d = interp_func(xzoutput) - averts2d = averts2d.reshape(zoutput[:, i, :].shape) - - averts[:, i, :] = averts2d - - elif a.shape == shape_ext_z: - # set array to NaN where inactive on both side - if self._idomain is not None: - inactive_ext_z = np.full(shape_ext_z, True) - inactive_ext_z[:-1, :, :] = inactive - inactive_ext_z[1:, :, :] = np.logical_and( - inactive_ext_z[1:, :, :], inactive) - a = np.where(inactive_ext_z, np.nan, a) - - averts = np.empty(shape_verts, dtype=a.dtype) - averts_basic = np.empty(shape_verts, dtype=a.dtype) - for k in range(self.nlay+1): - # perform basic interpolation (will be useful in all cases) - averts_basic[k, :, :] = array_at_verts_basic2d(a[k, :, :]) - - if self.is_regular_xy: - # in this case, basic interpolation is the correct one - averts2d = averts_basic[k, :, :] - basic = True - - else: - if self.nrow == 1: - # in this case we need a 1d interpolation along x - averts1d = array_at_faces_1d(a[k, 0, :], self.__delr) - averts2d = averts1d.reshape((1, self.ncol+1)) - averts2d = averts2d * np.ones((2, self.ncol+1)) - elif self.ncol == 1: - # in this case we need a 1d interpolation along y - averts1d = array_at_faces_1d(a[k, :, 0], self.__delc) - averts2d = averts1d.reshape((self.nrow+1, 1)) - averts2d = averts2d * np.ones((self.nrow+1, 2)) - else: - # 2d interpolation - # flip y coordinates because RegularGridInterpolator - # requires increasing input coordinates - xyinput = (np.flip(ycenters), xcenters) - a2d = np.flip(a[k, :, :], axis=[0]) - interp_func = interp.RegularGridInterpolator(xyinput, - a2d, bounds_error=False, fill_value=np.nan) - xyoutput = np.empty((youtput[k, :, :].size, 2)) - xyoutput[:, 0] = youtput[k, :, :].ravel() - xyoutput[:, 1] = xoutput[k, :, :].ravel() - averts2d = interp_func(xyoutput) - averts2d = averts2d.reshape(youtput[k, :, :].shape) - - averts[k, :, :] = averts2d - - if not basic: - # use basic interpolation for remaining NaNs at boundaries - where_nan = np.isnan(averts) - averts[where_nan] = averts_basic[where_nan] - - return averts - - def array_at_faces(self, a, direction, withnan=True): - """ - Computes values at the center of cell faces using linear interpolation. - - Parameters - ---------- - a : ndarray - Values at cell centers, shape (nlay, row, ncol). - direction : str, possible values are 'x', 'y' and 'z' - Direction in which values will be interpolated at cell faces. - withnan : bool - If True (default), the result value will be set to NaN where the - cell face sits between inactive cells. If False, not. - - Returns - ------- - afaces : ndarray - Array values interpolated at cell vertices, shape as input extended - by 1 along the specified direction. - - """ - # get the dimension that corresponds to the direction - dir_to_dim = {'x': 2, 'y': 1, 'z': 0} - dim = dir_to_dim[direction] - - # extended array with ghost cells on both sides having zero values - ghost_shape = list(a.shape) - ghost_shape[dim] += 2 - a_ghost = np.zeros(ghost_shape, dtype=a.dtype) - - # extended delta with ghost cells on both sides having zero values - delta_ghost = np.zeros(ghost_shape, dtype=a.dtype) - - # inactive bool array - if withnan and self._idomain is not None: - inactive = self._idomain == 0 - - if dim == 0: - # fill array with ghost cells - a_ghost[1:-1, :, :] = a - a_ghost[0, :, :] = a[0, :, :] - a_ghost[-1, :, :] = a[-1, :, :] - - # calculate weights - delta_ghost[1:-1, :, :] = self.delz - weight2 = delta_ghost[:-1, :, :] / (delta_ghost[:-1, :, :] + \ - delta_ghost[1:, :, :]) - weight1 = 1. - weight2 - - # interpolate - afaces = a_ghost[:-1, :, :]*weight1 + a_ghost[1:, :, :]*weight2 - - # assign NaN where idomain==0 on both sides - if withnan and self._idomain is not None: - inactive_faces = np.full(afaces.shape, True) - inactive_faces[:-1, :, :] = np.logical_and( - inactive_faces[:-1, :, :], inactive) - inactive_faces[1:, :, :] = np.logical_and( - inactive_faces[1:, :, :], inactive) - afaces[inactive_faces] = np.nan - - elif dim == 1: - # fill array with ghost cells - a_ghost[:, 1:-1, :] = a - a_ghost[:, 0, :] = a[:, 0, :] - a_ghost[:, -1, :] = a[:, -1, :] - - # calculate weights - delc = np.reshape(self.delc, (1, self.nrow, 1)) - delc_3D = delc * np.ones(a.shape) - delta_ghost[:, 1:-1, :] = delc_3D - weight2 = delta_ghost[:, :-1, :] / (delta_ghost[:, :-1, :] + \ - delta_ghost[:, 1:, :]) - weight1 = 1. - weight2 - - # interpolate - afaces = a_ghost[:, :-1, :]*weight1 + a_ghost[:, 1:, :]*weight2 - - # assign NaN where idomain==0 on both sides - if withnan and self._idomain is not None: - inactive_faces = np.full(afaces.shape, True) - inactive_faces[:, :-1, :] = np.logical_and( - inactive_faces[:, :-1, :], inactive) - inactive_faces[:, 1:, :] = np.logical_and( - inactive_faces[:, 1:, :], inactive) - afaces[inactive_faces] = np.nan - - elif dim == 2: - # fill array with ghost cells - a_ghost[:, :, 1:-1] = a - a_ghost[:, :, 0] = a[:, :, 0] - a_ghost[:, :, -1] = a[:, :, -1] - - # calculate weights - delr = np.reshape(self.delr, (1, 1, self.ncol)) - delr_3D = delr * np.ones(a.shape) - delta_ghost[:, :, 1:-1] = delr_3D - weight2 = delta_ghost[:, :, :-1] / (delta_ghost[:, :, :-1] + \ - delta_ghost[:, :, 1:]) - weight1 = 1. - weight2 - - # interpolate - afaces = a_ghost[:, :, :-1]*weight1 + a_ghost[:, :, 1:]*weight2 - - # assign NaN where idomain==0 on both sides - if withnan and self._idomain is not None: - inactive_faces = np.full(afaces.shape, True) - inactive_faces[:, :, :-1] = np.logical_and( - inactive_faces[:, :, :-1], inactive) - inactive_faces[:, :, 1:] = np.logical_and( - inactive_faces[:, :, 1:], inactive) - afaces[inactive_faces] = np.nan - - return afaces - -if __name__ == "__main__": - import matplotlib.pyplot as plt - delc = np.ones((10,)) * 1 - delr = np.ones((20,)) * 1 - - top = np.ones((10, 20)) * 2000 - botm = np.ones((1, 10, 20)) * 1100 - - t = StructuredGrid(delc, delr, top, botm, xoff=0, yoff=0, - angrot=45) - - #plt.scatter(np.ravel(t.xcenters), np.ravel(t.ycenters), c="b") - #t.plot_grid_lines() - #plt.show() - #plt.close() - - #delc = np.ones(10,) * 2 - #t.delc = delc - - #plt.scatter(np.ravel(t.xcenters), np.ravel(t.ycenters), c="b") - #t.plot_grid_lines() - #plt.show() - - t.use_ref_coords = False - x = t.xvertices - y = t.yvertices - xc = t.xcellcenters - yc = t.ycellcenters - #extent = t.extent - grid = t.grid_lines - - t.use_ref_coords = True - sr_x = t.xvertices - sr_y = t.yvertices - sr_xc = t.xcellcenters - sr_yc = t.ycellcenters - #sr_extent = t.extent - sr_grid = t.grid_lines - print(sr_grid) - #t.plot_grid_lines() - #plt.show() +import copy +import numpy as np +from .grid import Grid, CachedData + +def array_at_verts_basic2d(a): + """ + Computes values at cell vertices on 2d array using neighbor averaging. + + Parameters + ---------- + a : ndarray + Array values at cell centers, could be a slice in any orientation. + + Returns + ------- + averts : ndarray + Array values at cell vertices, shape (a.shape[0]+1, a.shape[1]+1). + """ + assert a.ndim == 2 + shape_verts2d = (a.shape[0]+1, a.shape[1]+1) + + # create a 3D array of size (nrow+1, ncol+1, 4) + averts3d = np.full(shape_verts2d + (4,), np.nan) + averts3d[:-1, :-1, 0] = a + averts3d[:-1, 1:, 1] = a + averts3d[1:, :-1, 2] = a + averts3d[1:, 1:, 3] = a + + # calculate the mean over the last axis, ignoring NaNs + averts = np.nanmean(averts3d, axis=2) + + return averts + +def array_at_faces_1d(a, delta): + """ + Interpolate array at cell faces of a 1d grid using linear interpolation. + + Parameters + ---------- + a : 1d ndarray + Values at cell centers. + delta : 1d ndarray + Grid steps. + + Returns + ------- + afaces : 1d ndarray + Array values interpolated at cell faces, shape as input extended by 1. + + """ + # extended array with ghost cells on both sides having zero values + ghost_shape = list(a.shape) + ghost_shape[0] += 2 + a_ghost = np.zeros(ghost_shape, dtype=a.dtype) + + # extended delta with ghost cells on both sides having zero values + delta_ghost = np.zeros(ghost_shape, dtype=a.dtype) + + # fill array with ghost cells + a_ghost[1:-1] = a + a_ghost[0] = a[0] + a_ghost[-1] = a[-1] + + # calculate weights + delta_ghost[1:-1] = delta + weight2 = delta_ghost[:-1] / (delta_ghost[:-1] + delta_ghost[1:]) + weight1 = 1. - weight2 + + # interpolate + afaces = a_ghost[:-1]*weight1 + a_ghost[1:]*weight2 + + return afaces + +class StructuredGrid(Grid): + """ + class for a structured model grid + + Parameters + ---------- + delc + delc array + delr + delr array + + Properties + ---------- + nlay + returns the number of model layers + nrow + returns the number of model rows + ncol + returns the number of model columns + delc + returns the delc array + delr + returns the delr array + xyedges + returns x-location points for the edges of the model grid and + y-location points for the edges of the model grid + + Methods + ---------- + get_cell_vertices(i, j) + returns vertices for a single cell at row, column i, j. + """ + def __init__(self, delc=None, delr=None, top=None, botm=None, idomain=None, + lenuni=None, epsg=None, proj4=None, prj=None, xoff=0.0, + yoff=0.0, angrot=0.0, nlay=None, nrow=None, ncol=None, + laycbd=None): + super(StructuredGrid, self).__init__('structured', top, botm, idomain, + lenuni, epsg, proj4, prj, xoff, + yoff, angrot) + if delc is not None: + self.__nrow = len(delc) + self.__delc = delc.astype(float) + else: + self.__nrow = nrow + self.__delc = delc + if delr is not None: + self.__ncol = len(delr) + self.__delr = delr.astype(float) + else: + self.__ncol = ncol + self.__delr = delr + if top is not None: + assert self.__nrow * self.__ncol == len(np.ravel(top)) + if botm is not None: + assert self.__nrow * self.__ncol == len(np.ravel(botm[0])) + if nlay is not None: + self.__nlay = nlay + else: + if laycbd is not None: + self.__nlay = len(botm) - np.sum(laycbd>0) + else: + self.__nlay = len(botm) + else: + self.__nlay = nlay + if laycbd is not None: + self.__laycbd = laycbd + else: + self.__laycbd = np.zeros(self.__nlay, dtype=int) + + #################### + # Properties + #################### + @property + def is_valid(self): + if self.__delc is not None and self.__delr is not None: + return True + return False + + @property + def is_complete(self): + if self.__delc is not None and self.__delr is not None and \ + super(StructuredGrid, self).is_complete: + return True + return False + + @property + def nlay(self): + return self.__nlay + + @property + def nrow(self): + return self.__nrow + + @property + def ncol(self): + return self.__ncol + + @property + def nnodes(self): + return self.__nlay * self.__nrow * self.__ncol + + @property + def shape(self): + return self.__nlay, self.__nrow, self.__ncol + + @property + def extent(self): + self._copy_cache = False + xyzgrid = self.xyzvertices + self._copy_cache = True + return (np.min(xyzgrid[0]), np.max(xyzgrid[0]), + np.min(xyzgrid[1]), np.max(xyzgrid[1])) + + @property + def delc(self): + return copy.deepcopy(self.__delc) + + @property + def delr(self): + return copy.deepcopy(self.__delr) + + @property + def delz(self): + cache_index = 'delz' + if cache_index not in self._cache_dict or \ + self._cache_dict[cache_index].out_of_date: + delz = self.top_botm[:-1, :, :] - self.top_botm[1:, :, :] + self._cache_dict[cache_index] = CachedData(delz) + if self._copy_cache: + return self._cache_dict[cache_index].data + else: + return self._cache_dict[cache_index].data_nocopy + + @property + def top_botm_withnan(self): + """ + Same as top_botm array but with NaN where idomain==0 both above and + below a cell. + """ + cache_index = 'top_botm_withnan' + if cache_index not in self._cache_dict or \ + self._cache_dict[cache_index].out_of_date: + is_inactive_above = np.full(self.top_botm.shape, True) + is_inactive_above[:-1, :, :] = self._idomain==0 + is_inactive_below = np.full(self.top_botm.shape, True) + is_inactive_below[1:, :, :] = self._idomain==0 + where_to_nan = np.logical_and(is_inactive_above, is_inactive_below) + top_botm_withnan = np.where(where_to_nan, np.nan, self.top_botm) + self._cache_dict[cache_index] = CachedData(top_botm_withnan) + if self._copy_cache: + return self._cache_dict[cache_index].data + else: + return self._cache_dict[cache_index].data_nocopy + + @property + def xyzvertices(self): + """ + Method to get all grid vertices in a layer + + Returns: + [] + 2D array + """ + cache_index = 'xyzgrid' + if cache_index not in self._cache_dict or \ + self._cache_dict[cache_index].out_of_date: + xedge = np.concatenate(([0.], np.add.accumulate(self.__delr))) + length_y = np.add.reduce(self.__delc) + yedge = np.concatenate(([length_y], length_y - + np.add.accumulate(self.delc))) + xgrid, ygrid = np.meshgrid(xedge, yedge) + zgrid, zcenter = self._zcoords() + if self._has_ref_coordinates: + # transform x and y + pass + xgrid, ygrid = self.get_coords(xgrid, ygrid) + if zgrid is not None: + self._cache_dict[cache_index] = \ + CachedData([xgrid, ygrid, zgrid]) + else: + self._cache_dict[cache_index] = \ + CachedData([xgrid, ygrid]) + + if self._copy_cache: + return self._cache_dict[cache_index].data + else: + return self._cache_dict[cache_index].data_nocopy + + @property + def xyedges(self): + """ + Return a list of two 1D numpy arrays: one with the cell edge x + coordinate (size = ncol+1) and the other with the cell edge y + coordinate (size = nrow+1) in model space - not offset or rotated. + """ + cache_index = 'xyedges' + if cache_index not in self._cache_dict or \ + self._cache_dict[cache_index].out_of_date: + xedge = np.concatenate(([0.], np.add.accumulate(self.__delr))) + length_y = np.add.reduce(self.__delc) + yedge = np.concatenate(([length_y], length_y - + np.add.accumulate(self.delc))) + self._cache_dict[cache_index] = \ + CachedData([xedge, yedge]) + if self._copy_cache: + return self._cache_dict[cache_index].data + else: + return self._cache_dict[cache_index].data_nocopy + + @property + def zedges(self): + """ + Return zedges for (column, row)==(0, 0). + """ + cache_index = 'zedges' + if cache_index not in self._cache_dict or \ + self._cache_dict[cache_index].out_of_date: + zedges = np.concatenate((np.array([self.top[0, 0]]), + self.botm[:, 0, 0])) + self._cache_dict[cache_index] = CachedData(zedges) + if self._copy_cache: + return self._cache_dict[cache_index].data + else: + return self._cache_dict[cache_index].data_nocopy + + @property + def zverts_smooth(self): + """ + Get a unique z of cell vertices using bilinear interpolation of top and + bottom elevation layers. + + Returns + ------- + zverts : ndarray, shape (nlay+1, nrow+1, ncol+1) + z of cell vertices. NaN values are assigned in accordance with + inactive cells defined by idomain. + """ + cache_index = 'zverts_smooth' + if cache_index not in self._cache_dict or \ + self._cache_dict[cache_index].out_of_date: + zverts_smooth = self.array_at_verts(self.top_botm) + self._cache_dict[cache_index] = CachedData(zverts_smooth) + if self._copy_cache: + return self._cache_dict[cache_index].data + else: + return self._cache_dict[cache_index].data_nocopy + + @property + def xycenters(self): + """ + Return a list of two numpy one-dimensional float arrays for center x + and y coordinates in model space - not offset or rotated. + """ + cache_index = 'xycenters' + if cache_index not in self._cache_dict or \ + self._cache_dict[cache_index].out_of_date: + # get x centers + x = np.add.accumulate(self.__delr) - 0.5 * self.delr + # get y centers + Ly = np.add.reduce(self.__delc) + y = Ly - (np.add.accumulate(self.__delc) - 0.5 * + self.__delc) + # store in cache + self._cache_dict[cache_index] = CachedData([x, y]) + if self._copy_cache: + return self._cache_dict[cache_index].data + else: + return self._cache_dict[cache_index].data_nocopy + + @property + def xyzcellcenters(self): + """ + Return a list of three numpy float arrays: two two-dimensional arrays + for center x and y coordinates, and one three-dimensional array for + center z coordinates. Coordinates are given in real-world coordinates. + """ + cache_index = 'cellcenters' + if cache_index not in self._cache_dict or \ + self._cache_dict[cache_index].out_of_date: + # get x centers + x = np.add.accumulate(self.__delr) - 0.5 * self.delr + # get y centers + Ly = np.add.reduce(self.__delc) + y = Ly - (np.add.accumulate(self.__delc) - 0.5 * + self.__delc) + x_mesh, y_mesh = np.meshgrid(x, y) + if self.__nlay is not None: + # get z centers + z = np.empty((self.__nlay, self.__nrow, self.__ncol)) + z[0, :, :] = (self._top[:, :] + self._botm[0, :, :]) / 2. + ibs = np.arange(self.__nlay) + quasi3d = [cbd !=0 for cbd in self.__laycbd] + if np.any(quasi3d): + ibs[1:] = ibs[1:] + np.cumsum(quasi3d)[:self.__nlay - 1] + for l, ib in enumerate(ibs[1:], 1): + z[l, :, :] = (self._botm[ib - 1, :, :] + + self._botm[ib, :, :]) / 2. + else: + z = None + if self._has_ref_coordinates: + # transform x and y + x_mesh, y_mesh = self.get_coords(x_mesh, y_mesh) + # store in cache + self._cache_dict[cache_index] = CachedData([x_mesh, y_mesh, z]) + if self._copy_cache: + return self._cache_dict[cache_index].data + else: + return self._cache_dict[cache_index].data_nocopy + + @property + def grid_lines(self): + """ + Get the grid lines as a list + + """ + # get edges initially in model coordinates + use_ref_coords = self.use_ref_coords + self.use_ref_coords = False + xyedges = self.xyedges + self.use_ref_coords = use_ref_coords + + xmin = xyedges[0][0] + xmax = xyedges[0][-1] + ymin = xyedges[1][-1] + ymax = xyedges[1][0] + lines = [] + # Vertical lines + for j in range(self.ncol + 1): + x0 = xyedges[0][j] + x1 = x0 + y0 = ymin + y1 = ymax + lines.append([(x0, y0), (x1, y1)]) + + # horizontal lines + for i in range(self.nrow + 1): + x0 = xmin + x1 = xmax + y0 = xyedges[1][i] + y1 = y0 + lines.append([(x0, y0), (x1, y1)]) + + if self._has_ref_coordinates: + lines_trans = [] + for ln in lines: + lines_trans.append([self.get_coords(*ln[0]), + self.get_coords(*ln[1])]) + return lines_trans + return lines + + @property + def is_regular_x(self): + """ + Test whether the grid spacing is regular in the x direction. + """ + cache_index = 'is_regular_x' + if cache_index not in self._cache_dict or \ + self._cache_dict[cache_index].out_of_date: + # relative tolerance to use in test + rel_tol = 1.e-5 + + # regularity test in x direction + rel_diff_x = (self.__delr - self.__delr[0]) / self.__delr[0] + is_regular_x = np.count_nonzero(np.abs(rel_diff_x) > rel_tol) == 0 + + self._cache_dict[cache_index] = CachedData(is_regular_x) + if self._copy_cache: + return self._cache_dict[cache_index].data + else: + return self._cache_dict[cache_index].data_nocopy + + @property + def is_regular_y(self): + """ + Test whether the grid spacing is regular in the y direction. + """ + cache_index = 'is_regular_y' + if cache_index not in self._cache_dict or \ + self._cache_dict[cache_index].out_of_date: + # relative tolerance to use in test + rel_tol = 1.e-5 + + # regularity test in y direction + rel_diff_y = (self.__delc - self.__delc[0]) / self.__delc[0] + is_regular_y = np.count_nonzero(np.abs(rel_diff_y) > rel_tol) == 0 + + self._cache_dict[cache_index] = CachedData(is_regular_y) + if self._copy_cache: + return self._cache_dict[cache_index].data + else: + return self._cache_dict[cache_index].data_nocopy + + @property + def is_regular_z(self): + """ + Test if the grid spacing is regular in z direction. + """ + cache_index = 'is_regular_z' + if cache_index not in self._cache_dict or \ + self._cache_dict[cache_index].out_of_date: + # relative tolerance to use in test + rel_tol = 1.e-5 + + # regularity test in z direction + rel_diff_thick0 = (self.delz[0, :, :] - self.delz[0, 0, 0]) \ + / self.delz[0, 0, 0] + failed = np.abs(rel_diff_thick0) > rel_tol + is_regular_z = np.count_nonzero(failed) == 0 + for k in range(1, self.nlay): + rel_diff_zk = (self.delz[k, :, :] - self.delz[0, :, :]) \ + / self.delz[0, :, :] + failed = np.abs(rel_diff_zk) > rel_tol + is_regular_z = is_regular_z and np.count_nonzero(failed) == 0 + + self._cache_dict[cache_index] = CachedData(is_regular_z) + if self._copy_cache: + return self._cache_dict[cache_index].data + else: + return self._cache_dict[cache_index].data_nocopy + + @property + def is_regular_xy(self): + """ + Test if the grid spacing is regular and equal in x and y directions. + """ + cache_index = 'is_regular_xy' + if cache_index not in self._cache_dict or \ + self._cache_dict[cache_index].out_of_date: + # relative tolerance to use in test + rel_tol = 1.e-5 + + # test if the first delta is equal in x and z + rel_diff_0 = (self.__delc[0] - self.__delr[0]) / self.__delr[0] + first_equal = np.abs(rel_diff_0) <= rel_tol + + # combine with regularity tests in x and z directions + is_regular_xy = first_equal and self.is_regular_x and \ + self.is_regular_y + + self._cache_dict[cache_index] = CachedData(is_regular_xy) + if self._copy_cache: + return self._cache_dict[cache_index].data + else: + return self._cache_dict[cache_index].data_nocopy + + @property + def is_regular_xz(self): + """ + Test if the grid spacing is regular and equal in x and z directions. + """ + cache_index = 'is_regular_xz' + if cache_index not in self._cache_dict or \ + self._cache_dict[cache_index].out_of_date: + # relative tolerance to use in test + rel_tol = 1.e-5 + + # test if the first delta is equal in x and z + rel_diff_0 = (self.delz[0, 0, 0] - self.__delr[0]) / self.__delr[0] + first_equal = np.abs(rel_diff_0) <= rel_tol + + # combine with regularity tests in x and z directions + is_regular_xz = first_equal and self.is_regular_x and \ + self.is_regular_z + + self._cache_dict[cache_index] = CachedData(is_regular_xz) + if self._copy_cache: + return self._cache_dict[cache_index].data + else: + return self._cache_dict[cache_index].data_nocopy + + @property + def is_regular_yz(self): + """ + Test if the grid spacing is regular and equal in y and z directions. + """ + cache_index = 'is_regular_yz' + if cache_index not in self._cache_dict or \ + self._cache_dict[cache_index].out_of_date: + # relative tolerance to use in test + rel_tol = 1.e-5 + + # test if the first delta is equal in y and z + rel_diff_0 = (self.delz[0, 0, 0] - self.__delc[0]) / self.__delc[0] + first_equal = np.abs(rel_diff_0) <= rel_tol + + # combine with regularity tests in x and y directions + is_regular_yz = first_equal and self.is_regular_y and \ + self.is_regular_z + + self._cache_dict[cache_index] = CachedData(is_regular_yz) + if self._copy_cache: + return self._cache_dict[cache_index].data + else: + return self._cache_dict[cache_index].data_nocopy + + @property + def is_regular(self): + """ + Test if the grid spacing is regular and equal in x, y and z directions. + """ + cache_index = 'is_regular' + if cache_index not in self._cache_dict or \ + self._cache_dict[cache_index].out_of_date: + # relative tolerance to use in test + rel_tol = 1.e-5 + + # test if the first delta is equal in x and z + rel_diff_0 = (self.delz[0, 0, 0] - self.__delr[0]) / self.__delr[0] + first_equal = np.abs(rel_diff_0) <= rel_tol + + # combine with regularity tests in x, y and z directions + is_regular = first_equal and self.is_regular_z and \ + self.is_regular_xy + + self._cache_dict[cache_index] = CachedData(is_regular) + if self._copy_cache: + return self._cache_dict[cache_index].data + else: + return self._cache_dict[cache_index].data_nocopy + + @property + def is_rectilinear(self): + """ + Test whether the grid is rectilinear (it is always so in the x and + y directions, but not necessarily in the z direction). + """ + cache_index = 'is_rectilinear' + if cache_index not in self._cache_dict or \ + self._cache_dict[cache_index].out_of_date: + # relative tolerance to use in test + rel_tol = 1.e-5 + + # rectilinearity test in z direction + is_rect_z = True + for k in range(self.nlay): + rel_diff_zk = (self.delz[k, :, :] - self.delz[k, 0, 0]) \ + / self.delz[k, 0, 0] + failed = np.abs(rel_diff_zk) > rel_tol + is_rect_z = is_rect_z and np.count_nonzero(failed) == 0 + + self._cache_dict[cache_index] = CachedData(is_rect_z) + if self._copy_cache: + return self._cache_dict[cache_index].data + else: + return self._cache_dict[cache_index].data_nocopy + + ############### + ### Methods ### + ############### + def intersect(self, x, y, local=False, forgive=False): + """ + Get the row and column of a point with coordinates x and y + + When the point is on the edge of two cells, the cell with the lowest + row or column is returned. + + Parameters + ---------- + x : float + The x-coordinate of the requested point + y : float + The y-coordinate of the requested point + local: bool (optional) + If True, x and y are in local coordinates (defaults to False) + forgive: bool (optional) + Forgive x,y arguments that fall outside the model grid and + return NaNs instead (defaults to False - will throw exception) + + Returns + ------- + row : int + The row number + col : int + The column number + + """ + # transform x and y to local coordinates + x, y = super(StructuredGrid, self).intersect(x, y, local, forgive) + + # get the cell edges in local coordinates + xe, ye = self.xyedges + + xcomp = x > xe + if np.all(xcomp) or not np.any(xcomp): + if forgive: + col = np.nan + else: + raise Exception( + 'x, y point given is outside of the model area') + else: + col = np.where(xcomp)[0][-1] + + ycomp = y < ye + if np.all(ycomp) or not np.any(ycomp): + if forgive: + row = np.nan + else: + raise Exception( + 'x, y point given is outside of the model area') + else: + row = np.where(ycomp)[0][-1] + if np.any(np.isnan([row, col])): + row = col = np.nan + return row, col + + def _cell_vert_list(self, i, j): + """Get vertices for a single cell or sequence of i, j locations.""" + self._copy_cache = False + pts = [] + xgrid, ygrid = self.xvertices, self.yvertices + pts.append([xgrid[i, j], ygrid[i, j]]) + pts.append([xgrid[i + 1, j], ygrid[i + 1, j]]) + pts.append([xgrid[i + 1, j + 1], ygrid[i + 1, j + 1]]) + pts.append([xgrid[i, j + 1], ygrid[i, j + 1]]) + pts.append([xgrid[i, j], ygrid[i, j]]) + self._copy_cache = True + if np.isscalar(i): + return pts + else: + vrts = np.array(pts).transpose([2, 0, 1]) + return [v.tolist() for v in vrts] + + def get_cell_vertices(self, i, j): + """ + Method to get a set of cell vertices for a single cell + used in the Shapefile export utilities + :param i: (int) cell row number + :param j: (int) cell column number + :return: list of x,y cell vertices + """ + self._copy_cache = False + cell_verts = [(self.xvertices[i, j], self.yvertices[i, j]), + (self.xvertices[i, j+1], self.yvertices[i, j+1]), + (self.xvertices[i+1, j+1], self.yvertices[i+1, j+1]), + (self.xvertices[i+1, j], self.yvertices[i+1, j]),] + self._copy_cache = True + return cell_verts + + def plot(self, **kwargs): + """ + Plot the grid lines. + + Parameters + ---------- + kwargs : ax, colors. The remaining kwargs are passed into the + the LineCollection constructor. + + Returns + ------- + lc : matplotlib.collections.LineCollection + + """ + from ..plot import PlotMapView + + mm = PlotMapView(modelgrid=self) + return mm.plot_grid(**kwargs) + + # Importing + @classmethod + def from_gridspec(cls, gridspec_file, lenuni=0): + f = open(gridspec_file, 'r') + raw = f.readline().strip().split() + nrow = int(raw[0]) + ncol = int(raw[1]) + raw = f.readline().strip().split() + xul, yul, rot = float(raw[0]), float(raw[1]), float(raw[2]) + delr = [] + j = 0 + while j < ncol: + raw = f.readline().strip().split() + for r in raw: + if '*' in r: + rraw = r.split('*') + for n in range(int(rraw[0])): + delr.append(float(rraw[1])) + j += 1 + else: + delr.append(float(r)) + j += 1 + delc = [] + i = 0 + while i < nrow: + raw = f.readline().strip().split() + for r in raw: + if '*' in r: + rraw = r.split('*') + for n in range(int(rraw[0])): + delc.append(float(rraw[1])) + i += 1 + else: + delc.append(float(r)) + i += 1 + f.close() + grd = cls(np.array(delc), np.array(delr), lenuni=lenuni) + xll = grd._xul_to_xll(xul) + yll = grd._yul_to_yll(yul) + cls.set_coord_info(xoff=xll, yoff=yll, angrot=rot) + return cls + + # Exporting + def write_shapefile(self, filename='grid.shp', epsg=None, prj=None): + """ + Write a shapefile of the grid with just the row and column attributes. + """ + from ..export.shapefile_utils import write_grid_shapefile + if epsg is None and prj is None: + epsg = self.epsg + write_grid_shapefile(filename, self, array_dict={}, nan_val=-1.0e9, + epsg=epsg, prj=prj) + + def array_at_verts_basic(self, a): + """ + Computes values at cell vertices using neighbor averaging. + + Parameters + ---------- + a : ndarray + Array values at cell centers. + + Returns + ------- + averts : ndarray + Array values at cell vertices, shape + (a.shape[0]+1, a.shape[1]+1, a.shape[2]+1). NaN values are assigned + in accordance with inactive cells defined by idomain. + """ + assert a.ndim == 3 + shape_verts = (a.shape[0]+1, a.shape[1]+1, a.shape[2]+1) + + # set to NaN where idomain==0 + a[self._idomain==0] = np.nan + + # create a 4D array of size (nlay+1, nrow+1, ncol+1, 8) + averts4d = np.full(shape_verts + (8,), np.nan) + averts4d[:-1, :-1, :-1, 0] = a + averts4d[:-1, :-1, 1:, 1] = a + averts4d[:-1, 1:, :-1, 2] = a + averts4d[:-1, 1:, 1:, 3] = a + averts4d[1:, :-1, :-1, 4] = a + averts4d[1:, :-1, 1:, 5] = a + averts4d[1:, 1:, :-1, 6] = a + averts4d[1:, 1:, 1:, 7] = a + + # calculate the mean over the last axis, ignoring NaNs + averts = np.nanmean(averts4d, axis=3) + + return averts + + def array_at_verts(self, a): + """ + Interpolate array values at cell vertices. + + Parameters + ---------- + a : ndarray + Array values. Allowed shapes are: (nlay, nrow, ncol), + (nlay, nrow, ncol+1), (nlay, nrow+1, ncol) and + (nlay+1, nrow, ncol). + * When the shape is (nlay, nrow, ncol), input values are + considered at cell centers, and output values are computed by + trilinear interpolation. + * When the shape is extended in one direction, input values are + considered at the center of cell faces in this direction, and + output values are computed by bilinear interpolation in planes + defined by these cell faces. + + Returns + ------- + averts : ndarray + Array values interpolated at cell vertices, shape + (nlay+1, nrow+1, ncol+1). + + Notes + ----- + * Output values are smooth (continuous) even if top elevations or + bottom elevations are not constant across layers (i.e., in this + case, vertices of neighboring cells are implicitly merged). + * NaN values are assigned in accordance with inactive cells defined + by idomain. + """ + import scipy.interpolate as interp + + # define shapes + shape_ext_x = (self.nlay, self.nrow, self.ncol+1) + shape_ext_y = (self.nlay, self.nrow+1, self.ncol) + shape_ext_z = (self.nlay+1, self.nrow, self.ncol) + shape_verts = (self.nlay+1, self.nrow+1, self.ncol+1) + + # get inactive cells + if self._idomain is not None: + inactive = self._idomain == 0 + + # get local x and y cell center coordinates (1d arrays) + xcenters, ycenters = self.xycenters + + # get z center coordinates: make the grid rectilinear if it is not, + # in order to always use RegularGridInterpolator; in most cases this + # will give better results than with the non-structured interpolator + # LinearNDInterpolator (in addition, it will run faster) + zcenters = self.zcellcenters + if self._idomain is not None: + zcenters = np.where(inactive, np.nan, zcenters) + if not self.is_rectilinear or \ + np.count_nonzero(np.isnan(zcenters)) != 0: + zedges = np.nanmean(self.top_botm_withnan, axis=(1, 2)) + else: + zedges = self.top_botm_withnan[:, 0, 0] + zcenters = 0.5 * (zedges[1:] + zedges[:-1]) + + # test grid regularity in z + rel_tol = 1.e-5 + delz = np.diff(zedges) + rel_diff = (delz - delz[0]) / delz[0] + _is_regular_z = np.count_nonzero(np.abs(rel_diff) > rel_tol) == 0 + + # test equality of first grid spacing in x and z, and in y and z + first_equal_xz = np.abs(self.__delr[0] - delz[0]) / delz[0] <= rel_tol + first_equal_yz = np.abs(self.__delc[0] - delz[0]) / delz[0] <= rel_tol + + # get output coordinates (i.e. vertices) + xedges, yedges = self.xyedges + xedges = xedges.reshape((1, 1, self.ncol+1)) + xoutput = xedges * np.ones(shape_verts) + yedges = yedges.reshape((1, self.nrow+1, 1)) + youtput = yedges * np.ones(shape_verts) + zoutput = zedges.reshape((self.nlay+1, 1, 1)) + zoutput = zoutput * np.ones(shape_verts) + + # indicator of whether basic interpolation is used or not + basic = False + + if a.shape == self.shape: + # set array to NaN where inactive + if self._idomain is not None: + inactive = self._idomain == 0 + a = np.where(inactive, np.nan, a) + + # perform basic interpolation (this will be useful in all cases) + averts_basic = self.array_at_verts_basic(a) + + if self.is_regular_xy and _is_regular_z and first_equal_xz: + # in this case, basic interpolation is the correct one + averts = averts_basic + basic = True + + else: + if self.nlay == 1: + # in this case we need a 2d interpolation in the x, y plane + # flip y coordinates because RegularGridInterpolator + # requires increasing input coordinates + xyinput = (np.flip(ycenters), xcenters) + a = np.squeeze(np.flip(a, axis=[1])) + # interpolate + interp_func = interp.RegularGridInterpolator(xyinput, a, + bounds_error=False, fill_value=np.nan) + xyoutput = np.empty((youtput[0, :, :].size, 2)) + xyoutput[:, 0] = youtput[0, :, :].ravel() + xyoutput[:, 1] = xoutput[0, :, :].ravel() + averts2d = interp_func(xyoutput) + averts2d = averts2d.reshape((1, self.nrow+1, self.ncol+1)) + averts = averts2d * np.ones(shape_verts) + elif self.nrow == 1: + # in this case we need a 2d interpolation in the x, z plane + # flip z coordinates because RegularGridInterpolator + # requires increasing input coordinates + xzinput = (np.flip(zcenters), xcenters) + a = np.squeeze(np.flip(a, axis=[0])) + # interpolate + interp_func = interp.RegularGridInterpolator(xzinput, a, + bounds_error=False, fill_value=np.nan) + xzoutput = np.empty((zoutput[:, 0, :].size, 2)) + xzoutput[:, 0] = zoutput[:, 0, :].ravel() + xzoutput[:, 1] = xoutput[:, 0, :].ravel() + averts2d = interp_func(xzoutput) + averts2d = averts2d.reshape((self.nlay+1, 1, self.ncol+1)) + averts = averts2d * np.ones(shape_verts) + elif self.ncol == 1: + # in this case we need a 2d interpolation in the y, z plane + # flip y and z coordinates because RegularGridInterpolator + # requires increasing input coordinates + yzinput = (np.flip(zcenters), np.flip(ycenters)) + a = np.squeeze(np.flip(a, axis=[0, 1])) + # interpolate + interp_func = interp.RegularGridInterpolator(yzinput, a, + bounds_error=False, fill_value=np.nan) + yzoutput = np.empty((zoutput[:, :, 0].size, 2)) + yzoutput[:, 0] = zoutput[:, :, 0].ravel() + yzoutput[:, 1] = youtput[:, :, 0].ravel() + averts2d = interp_func(yzoutput) + averts2d = averts2d.reshape((self.nlay+1, self.nrow+1, 1)) + averts = averts2d * np.ones(shape_verts) + else: + # 3d interpolation + # flip y and z coordinates because RegularGridInterpolator + # requires increasing input coordinates + xyzinput = (np.flip(zcenters), np.flip(ycenters), xcenters) + a = np.flip(a, axis=[0, 1]) + # interpolate + interp_func = interp.RegularGridInterpolator(xyzinput, a, + bounds_error=False, fill_value=np.nan) + xyzoutput = np.empty((zoutput.size, 3)) + xyzoutput[:, 0] = zoutput.ravel() + xyzoutput[:, 1] = youtput.ravel() + xyzoutput[:, 2] = xoutput.ravel() + averts = interp_func(xyzoutput) + averts = averts.reshape(shape_verts) + + elif a.shape == shape_ext_x: + # set array to NaN where inactive on both side + if self._idomain is not None: + inactive_ext_x = np.full(shape_ext_x, True) + inactive_ext_x[:, :, :-1] = inactive + inactive_ext_x[:, :, 1:] = np.logical_and( + inactive_ext_x[:, :, 1:], inactive) + a = np.where(inactive_ext_x, np.nan, a) + + averts = np.empty(shape_verts, dtype=a.dtype) + averts_basic = np.empty(shape_verts, dtype=a.dtype) + for j in range(self.ncol+1): + # perform basic interpolation (will be useful in all cases) + averts_basic[:, :, j] = array_at_verts_basic2d(a[:, :, j]) + + if self.is_regular_y and _is_regular_z and first_equal_yz: + # in this case, basic interpolation is the correct one + averts2d = averts_basic[:, :, j] + basic = True + + else: + if self.nlay == 1: + # in this case we need a 1d interpolation along y + averts1d = array_at_faces_1d(a[0, :, j], self.__delc) + averts2d = averts1d.reshape((1, self.nrow+1)) + averts2d = averts2d * np.ones((2, self.nrow+1)) + elif self.nrow == 1: + # in this case we need a 1d interpolation along z + delz1d = np.abs(np.diff(self.zverts_smooth[:, 0, j])) + averts1d = array_at_faces_1d(a[:, 0, j], delz1d) + averts2d = averts1d.reshape((self.nlay+1, 1)) + averts2d = averts2d * np.ones((self.nlay+1, 2)) + else: + # 2d interpolation + # flip y and z coordinates because + # RegularGridInterpolator requires increasing input + # coordinates + yzinput = (np.flip(zcenters), np.flip(ycenters)) + a2d = np.flip(a[:, :, j], axis=[0, 1]) + interp_func = interp.RegularGridInterpolator(yzinput, + a2d, bounds_error=False, fill_value=np.nan) + yzoutput = np.empty((zoutput[:, :, j].size, 2)) + yzoutput[:, 0] = zoutput[:, :, j].ravel() + yzoutput[:, 1] = youtput[:, :, j].ravel() + averts2d = interp_func(yzoutput) + averts2d = averts2d.reshape(zoutput[:, :, j].shape) + + averts[:, :, j] = averts2d + + elif a.shape == shape_ext_y: + # set array to NaN where inactive on both side + if self._idomain is not None: + inactive_ext_y = np.full(shape_ext_y, True) + inactive_ext_y[:, :-1, :] = inactive + inactive_ext_y[:, 1:, :] = np.logical_and( + inactive_ext_y[:, 1:, :], inactive) + a = np.where(inactive_ext_y, np.nan, a) + + averts = np.empty(shape_verts, dtype=a.dtype) + averts_basic = np.empty(shape_verts, dtype=a.dtype) + for i in range(self.nrow+1): + # perform basic interpolation (will be useful in all cases) + averts_basic[:, i, :] = array_at_verts_basic2d(a[:, i, :]) + + if self.is_regular_x and _is_regular_z and first_equal_xz: + # in this case, basic interpolation is the correct one + averts2d = averts_basic[:, i, :] + basic = True + + else: + if self.nlay == 1: + # in this case we need a 1d interpolation along x + averts1d = array_at_faces_1d(a[0, i, :], self.__delr) + averts2d = averts1d.reshape((1, self.ncol+1)) + averts2d = averts2d * np.ones((2, self.ncol+1)) + elif self.ncol == 1: + # in this case we need a 1d interpolation along z + delz1d = np.abs(np.diff(self.zverts_smooth[:, i, 0])) + averts1d = array_at_faces_1d(a[:, i, 0], delz1d) + averts2d = averts1d.reshape((self.nlay+1, 1)) + averts2d = averts2d * np.ones((self.nlay+1, 2)) + else: + # 2d interpolation + # flip z coordinates because RegularGridInterpolator + # requires increasing input coordinates + xzinput = (np.flip(zcenters), xcenters) + a2d = np.flip(a[:, i, :], axis=[0]) + interp_func = interp.RegularGridInterpolator(xzinput, + a2d, bounds_error=False, fill_value=np.nan) + xzoutput = np.empty((zoutput[:, i, :].size, 2)) + xzoutput[:, 0] = zoutput[:, i, :].ravel() + xzoutput[:, 1] = xoutput[:, i, :].ravel() + averts2d = interp_func(xzoutput) + averts2d = averts2d.reshape(zoutput[:, i, :].shape) + + averts[:, i, :] = averts2d + + elif a.shape == shape_ext_z: + # set array to NaN where inactive on both side + if self._idomain is not None: + inactive_ext_z = np.full(shape_ext_z, True) + inactive_ext_z[:-1, :, :] = inactive + inactive_ext_z[1:, :, :] = np.logical_and( + inactive_ext_z[1:, :, :], inactive) + a = np.where(inactive_ext_z, np.nan, a) + + averts = np.empty(shape_verts, dtype=a.dtype) + averts_basic = np.empty(shape_verts, dtype=a.dtype) + for k in range(self.nlay+1): + # perform basic interpolation (will be useful in all cases) + averts_basic[k, :, :] = array_at_verts_basic2d(a[k, :, :]) + + if self.is_regular_xy: + # in this case, basic interpolation is the correct one + averts2d = averts_basic[k, :, :] + basic = True + + else: + if self.nrow == 1: + # in this case we need a 1d interpolation along x + averts1d = array_at_faces_1d(a[k, 0, :], self.__delr) + averts2d = averts1d.reshape((1, self.ncol+1)) + averts2d = averts2d * np.ones((2, self.ncol+1)) + elif self.ncol == 1: + # in this case we need a 1d interpolation along y + averts1d = array_at_faces_1d(a[k, :, 0], self.__delc) + averts2d = averts1d.reshape((self.nrow+1, 1)) + averts2d = averts2d * np.ones((self.nrow+1, 2)) + else: + # 2d interpolation + # flip y coordinates because RegularGridInterpolator + # requires increasing input coordinates + xyinput = (np.flip(ycenters), xcenters) + a2d = np.flip(a[k, :, :], axis=[0]) + interp_func = interp.RegularGridInterpolator(xyinput, + a2d, bounds_error=False, fill_value=np.nan) + xyoutput = np.empty((youtput[k, :, :].size, 2)) + xyoutput[:, 0] = youtput[k, :, :].ravel() + xyoutput[:, 1] = xoutput[k, :, :].ravel() + averts2d = interp_func(xyoutput) + averts2d = averts2d.reshape(youtput[k, :, :].shape) + + averts[k, :, :] = averts2d + + if not basic: + # use basic interpolation for remaining NaNs at boundaries + where_nan = np.isnan(averts) + averts[where_nan] = averts_basic[where_nan] + + return averts + + def array_at_faces(self, a, direction, withnan=True): + """ + Computes values at the center of cell faces using linear interpolation. + + Parameters + ---------- + a : ndarray + Values at cell centers, shape (nlay, row, ncol). + direction : str, possible values are 'x', 'y' and 'z' + Direction in which values will be interpolated at cell faces. + withnan : bool + If True (default), the result value will be set to NaN where the + cell face sits between inactive cells. If False, not. + + Returns + ------- + afaces : ndarray + Array values interpolated at cell vertices, shape as input extended + by 1 along the specified direction. + + """ + # get the dimension that corresponds to the direction + dir_to_dim = {'x': 2, 'y': 1, 'z': 0} + dim = dir_to_dim[direction] + + # extended array with ghost cells on both sides having zero values + ghost_shape = list(a.shape) + ghost_shape[dim] += 2 + a_ghost = np.zeros(ghost_shape, dtype=a.dtype) + + # extended delta with ghost cells on both sides having zero values + delta_ghost = np.zeros(ghost_shape, dtype=a.dtype) + + # inactive bool array + if withnan and self._idomain is not None: + inactive = self._idomain == 0 + + if dim == 0: + # fill array with ghost cells + a_ghost[1:-1, :, :] = a + a_ghost[0, :, :] = a[0, :, :] + a_ghost[-1, :, :] = a[-1, :, :] + + # calculate weights + delta_ghost[1:-1, :, :] = self.delz + weight2 = delta_ghost[:-1, :, :] / (delta_ghost[:-1, :, :] + \ + delta_ghost[1:, :, :]) + weight1 = 1. - weight2 + + # interpolate + afaces = a_ghost[:-1, :, :]*weight1 + a_ghost[1:, :, :]*weight2 + + # assign NaN where idomain==0 on both sides + if withnan and self._idomain is not None: + inactive_faces = np.full(afaces.shape, True) + inactive_faces[:-1, :, :] = np.logical_and( + inactive_faces[:-1, :, :], inactive) + inactive_faces[1:, :, :] = np.logical_and( + inactive_faces[1:, :, :], inactive) + afaces[inactive_faces] = np.nan + + elif dim == 1: + # fill array with ghost cells + a_ghost[:, 1:-1, :] = a + a_ghost[:, 0, :] = a[:, 0, :] + a_ghost[:, -1, :] = a[:, -1, :] + + # calculate weights + delc = np.reshape(self.delc, (1, self.nrow, 1)) + delc_3D = delc * np.ones(a.shape) + delta_ghost[:, 1:-1, :] = delc_3D + weight2 = delta_ghost[:, :-1, :] / (delta_ghost[:, :-1, :] + \ + delta_ghost[:, 1:, :]) + weight1 = 1. - weight2 + + # interpolate + afaces = a_ghost[:, :-1, :]*weight1 + a_ghost[:, 1:, :]*weight2 + + # assign NaN where idomain==0 on both sides + if withnan and self._idomain is not None: + inactive_faces = np.full(afaces.shape, True) + inactive_faces[:, :-1, :] = np.logical_and( + inactive_faces[:, :-1, :], inactive) + inactive_faces[:, 1:, :] = np.logical_and( + inactive_faces[:, 1:, :], inactive) + afaces[inactive_faces] = np.nan + + elif dim == 2: + # fill array with ghost cells + a_ghost[:, :, 1:-1] = a + a_ghost[:, :, 0] = a[:, :, 0] + a_ghost[:, :, -1] = a[:, :, -1] + + # calculate weights + delr = np.reshape(self.delr, (1, 1, self.ncol)) + delr_3D = delr * np.ones(a.shape) + delta_ghost[:, :, 1:-1] = delr_3D + weight2 = delta_ghost[:, :, :-1] / (delta_ghost[:, :, :-1] + \ + delta_ghost[:, :, 1:]) + weight1 = 1. - weight2 + + # interpolate + afaces = a_ghost[:, :, :-1]*weight1 + a_ghost[:, :, 1:]*weight2 + + # assign NaN where idomain==0 on both sides + if withnan and self._idomain is not None: + inactive_faces = np.full(afaces.shape, True) + inactive_faces[:, :, :-1] = np.logical_and( + inactive_faces[:, :, :-1], inactive) + inactive_faces[:, :, 1:] = np.logical_and( + inactive_faces[:, :, 1:], inactive) + afaces[inactive_faces] = np.nan + + return afaces + +if __name__ == "__main__": + import matplotlib.pyplot as plt + delc = np.ones((10,)) * 1 + delr = np.ones((20,)) * 1 + + top = np.ones((10, 20)) * 2000 + botm = np.ones((1, 10, 20)) * 1100 + + t = StructuredGrid(delc, delr, top, botm, xoff=0, yoff=0, + angrot=45) + + #plt.scatter(np.ravel(t.xcenters), np.ravel(t.ycenters), c="b") + #t.plot_grid_lines() + #plt.show() + #plt.close() + + #delc = np.ones(10,) * 2 + #t.delc = delc + + #plt.scatter(np.ravel(t.xcenters), np.ravel(t.ycenters), c="b") + #t.plot_grid_lines() + #plt.show() + + t.use_ref_coords = False + x = t.xvertices + y = t.yvertices + xc = t.xcellcenters + yc = t.ycellcenters + #extent = t.extent + grid = t.grid_lines + + t.use_ref_coords = True + sr_x = t.xvertices + sr_y = t.yvertices + sr_xc = t.xcellcenters + sr_yc = t.ycellcenters + #sr_extent = t.extent + sr_grid = t.grid_lines + print(sr_grid) + #t.plot_grid_lines() + #plt.show() diff --git a/flopy/discretization/unstructuredgrid.py b/flopy/discretization/unstructuredgrid.py index af4235b47d..04e7d5a7f1 100644 --- a/flopy/discretization/unstructuredgrid.py +++ b/flopy/discretization/unstructuredgrid.py @@ -1,291 +1,291 @@ -import numpy as np -from .grid import Grid, CachedData - - -class UnstructuredGrid(Grid): - """ - Class for an unstructured model grid - - Parameters - ---------- - vertices - list of vertices that make up the grid - cell2d - list of cells and their vertices - - Properties - ---------- - vertices - returns list of vertices that make up the grid - cell2d - returns list of cells and their vertices - - Methods - ---------- - get_cell_vertices(cellid) - returns vertices for a single cell at cellid. - """ - def __init__(self, vertices=None, iverts=None, xcenters=None, ycenters=None, - top=None, botm=None, idomain=None, lenuni=None, - ncpl=None, epsg=None, proj4=None, prj=None, - xoff=0., yoff=0., angrot=0., layered=True, nodes=None): - super(UnstructuredGrid, self).__init__('unstructured', top, botm, - idomain, lenuni, epsg, proj4, - prj, xoff, yoff, angrot) - - self._vertices = vertices - self._iverts = iverts - self._top = top - self._botm = botm - self._ncpl = ncpl - self._layered = layered - self._xc = xcenters - self._yc = ycenters - self._nodes = nodes - - if iverts is not None: - if self.layered: - assert np.all([n == len(iverts) for n in ncpl]) - assert np.array(self.xcellcenters).shape[0] == self.ncpl[0] - assert np.array(self.ycellcenters).shape[0] == self.ncpl[0] - else: - msg = ('Length of iverts must equal ncpl.sum ' - '({} {})'.format(len(iverts), ncpl)) - assert len(iverts) == np.sum(ncpl), msg - assert np.array(self.xcellcenters).shape[0] == self.ncpl - assert np.array(self.ycellcenters).shape[0] == self.ncpl - - @property - def is_valid(self): - if self._nodes is not None: - return True - return False - - @property - def is_complete(self): - if self._nodes is not None and \ - super(UnstructuredGrid, self).is_complete: - return True - return False - - @property - def nlay(self): - if self.layered: - try: - return len(self.ncpl) - except TypeError: - return 1 - else: - return 1 - - @property - def layered(self): - return self._layered - - @property - def nnodes(self): - if self._nodes is not None: - return self._nodes - else: - return self.nlay * self.ncpl - - @property - def ncpl(self): - if self._ncpl is None: - if self._iverts is None: - return None - else: - return len(self._iverts) - return self._ncpl - - @property - def shape(self): - if self.ncpl is None: - return self.nnodes - if isinstance(self.ncpl, (list, np.ndarray)): - return self.nlay, self.ncpl[0] - else: - return self.nlay, self.ncpl - - @property - def extent(self): - self._copy_cache = False - xvertices = np.hstack(self.xvertices) - yvertices = np.hstack(self.yvertices) - self._copy_cache = True - return (np.min(xvertices), - np.max(xvertices), - np.min(yvertices), - np.max(yvertices)) - - @property - def grid_lines(self): - """ - Creates a series of grid line vertices for drawing - a model grid line collection - - Returns: - list: grid line vertices - """ - self._copy_cache = False - xgrid = self.xvertices - ygrid = self.yvertices - - lines = [] - for ncell, verts in enumerate(xgrid): - for ix, vert in enumerate(verts): - lines.append([(xgrid[ncell][ix - 1], ygrid[ncell][ix - 1]), - (xgrid[ncell][ix], ygrid[ncell][ix])]) - self._copy_cache = True - return lines - - @property - def xyzcellcenters(self): - """ - Method to get cell centers and set to grid - """ - cache_index = 'cellcenters' - if cache_index not in self._cache_dict or \ - self._cache_dict[cache_index].out_of_date: - self._build_grid_geometry_info() - if self._copy_cache: - return self._cache_dict[cache_index].data - else: - return self._cache_dict[cache_index].data_nocopy - - @property - def xyzvertices(self): - """ - Method to get model grid verticies - - Returns: - list of dimension ncpl by nvertices - """ - cache_index = 'xyzgrid' - if cache_index not in self._cache_dict or \ - self._cache_dict[cache_index].out_of_date: - self._build_grid_geometry_info() - if self._copy_cache: - return self._cache_dict[cache_index].data - else: - return self._cache_dict[cache_index].data_nocopy - - def intersect(self, x, y, local=False, forgive=False): - x, y = super(UnstructuredGrid, self).intersect(x, y, local, forgive) - raise Exception('Not implemented yet') - - def get_cell_vertices(self, cellid): - """ - Method to get a set of cell vertices for a single cell - used in the Shapefile export utilities - :param cellid: (int) cellid number - :return: list of x,y cell vertices - """ - self._copy_cache = False - cell_vert = list(zip(self.xvertices[cellid], - self.yvertices[cellid])) - self._copy_cache = True - return cell_vert - - def _build_grid_geometry_info(self): - cache_index_cc = 'cellcenters' - cache_index_vert = 'xyzgrid' - - vertexdict = {ix: list(v[-2:]) - for ix, v in enumerate(self._vertices)} - - xcenters = self._xc - ycenters = self._yc - xvertices = [] - yvertices = [] - - # build xy vertex and cell center info - for iverts in self._iverts: - - xcellvert = [] - ycellvert = [] - for ix in iverts: - xcellvert.append(vertexdict[ix][0]) - ycellvert.append(vertexdict[ix][1]) - - xvertices.append(xcellvert) - yvertices.append(ycellvert) - - zvertices, zcenters = self._zcoords() - - if self._has_ref_coordinates: - # transform x and y - xcenters, ycenters = self.get_coords(xcenters, ycenters) - xvertxform = [] - yvertxform = [] - # vertices are a list within a list - for xcellvertices, ycellvertices in zip(xvertices, yvertices): - xcellvertices, \ - ycellvertices = self.get_coords(xcellvertices, ycellvertices) - xvertxform.append(xcellvertices) - yvertxform.append(ycellvertices) - xvertices = xvertxform - yvertices = yvertxform - - self._cache_dict[cache_index_cc] = CachedData([xcenters, - ycenters, - zcenters]) - self._cache_dict[cache_index_vert] = CachedData([xvertices, - yvertices, - zvertices]) - - @classmethod - def from_argus_export(cls, fname, nlay=1): - """ - Create a new SpatialReferenceUnstructured grid from an Argus One - Trimesh file - - Parameters - ---------- - fname : string - File name - - nlay : int - Number of layers to create - - Returns - ------- - sru : flopy.utils.reference.SpatialReferenceUnstructured - - """ - from ..utils.geometry import get_polygon_centroid - f = open(fname, 'r') - line = f.readline() - ll = line.split() - ncells, nverts = ll[0:2] - ncells = int(ncells) - nverts = int(nverts) - verts = np.empty((nverts, 2), dtype=np.float) - xc = np.empty((ncells), dtype=np.float) - yc = np.empty((ncells), dtype=np.float) - - # read the vertices - f.readline() - for ivert in range(nverts): - line = f.readline() - ll = line.split() - c, iv, x, y = ll[0:4] - verts[ivert, 0] = x - verts[ivert, 1] = y - - # read the cell information and create iverts, xc, and yc - iverts = [] - for icell in range(ncells): - line = f.readline() - ll = line.split() - ivlist = [] - for ic in ll[2:5]: - ivlist.append(int(ic) - 1) - if ivlist[0] != ivlist[-1]: - ivlist.append(ivlist[0]) - iverts.append(ivlist) - xc[icell], yc[icell] = get_polygon_centroid(verts[ivlist, :]) - - # close file and return spatial reference - f.close() - return cls(verts, iverts, xc, yc, ncpl=np.array(nlay * [len(iverts)])) +import numpy as np +from .grid import Grid, CachedData + + +class UnstructuredGrid(Grid): + """ + Class for an unstructured model grid + + Parameters + ---------- + vertices + list of vertices that make up the grid + cell2d + list of cells and their vertices + + Properties + ---------- + vertices + returns list of vertices that make up the grid + cell2d + returns list of cells and their vertices + + Methods + ---------- + get_cell_vertices(cellid) + returns vertices for a single cell at cellid. + """ + def __init__(self, vertices=None, iverts=None, xcenters=None, ycenters=None, + top=None, botm=None, idomain=None, lenuni=None, + ncpl=None, epsg=None, proj4=None, prj=None, + xoff=0., yoff=0., angrot=0., layered=True, nodes=None): + super(UnstructuredGrid, self).__init__('unstructured', top, botm, + idomain, lenuni, epsg, proj4, + prj, xoff, yoff, angrot) + + self._vertices = vertices + self._iverts = iverts + self._top = top + self._botm = botm + self._ncpl = ncpl + self._layered = layered + self._xc = xcenters + self._yc = ycenters + self._nodes = nodes + + if iverts is not None: + if self.layered: + assert np.all([n == len(iverts) for n in ncpl]) + assert np.array(self.xcellcenters).shape[0] == self.ncpl[0] + assert np.array(self.ycellcenters).shape[0] == self.ncpl[0] + else: + msg = ('Length of iverts must equal ncpl.sum ' + '({} {})'.format(len(iverts), ncpl)) + assert len(iverts) == np.sum(ncpl), msg + assert np.array(self.xcellcenters).shape[0] == self.ncpl + assert np.array(self.ycellcenters).shape[0] == self.ncpl + + @property + def is_valid(self): + if self._nodes is not None: + return True + return False + + @property + def is_complete(self): + if self._nodes is not None and \ + super(UnstructuredGrid, self).is_complete: + return True + return False + + @property + def nlay(self): + if self.layered: + try: + return len(self.ncpl) + except TypeError: + return 1 + else: + return 1 + + @property + def layered(self): + return self._layered + + @property + def nnodes(self): + if self._nodes is not None: + return self._nodes + else: + return self.nlay * self.ncpl + + @property + def ncpl(self): + if self._ncpl is None: + if self._iverts is None: + return None + else: + return len(self._iverts) + return self._ncpl + + @property + def shape(self): + if self.ncpl is None: + return self.nnodes + if isinstance(self.ncpl, (list, np.ndarray)): + return self.nlay, self.ncpl[0] + else: + return self.nlay, self.ncpl + + @property + def extent(self): + self._copy_cache = False + xvertices = np.hstack(self.xvertices) + yvertices = np.hstack(self.yvertices) + self._copy_cache = True + return (np.min(xvertices), + np.max(xvertices), + np.min(yvertices), + np.max(yvertices)) + + @property + def grid_lines(self): + """ + Creates a series of grid line vertices for drawing + a model grid line collection + + Returns: + list: grid line vertices + """ + self._copy_cache = False + xgrid = self.xvertices + ygrid = self.yvertices + + lines = [] + for ncell, verts in enumerate(xgrid): + for ix, vert in enumerate(verts): + lines.append([(xgrid[ncell][ix - 1], ygrid[ncell][ix - 1]), + (xgrid[ncell][ix], ygrid[ncell][ix])]) + self._copy_cache = True + return lines + + @property + def xyzcellcenters(self): + """ + Method to get cell centers and set to grid + """ + cache_index = 'cellcenters' + if cache_index not in self._cache_dict or \ + self._cache_dict[cache_index].out_of_date: + self._build_grid_geometry_info() + if self._copy_cache: + return self._cache_dict[cache_index].data + else: + return self._cache_dict[cache_index].data_nocopy + + @property + def xyzvertices(self): + """ + Method to get model grid verticies + + Returns: + list of dimension ncpl by nvertices + """ + cache_index = 'xyzgrid' + if cache_index not in self._cache_dict or \ + self._cache_dict[cache_index].out_of_date: + self._build_grid_geometry_info() + if self._copy_cache: + return self._cache_dict[cache_index].data + else: + return self._cache_dict[cache_index].data_nocopy + + def intersect(self, x, y, local=False, forgive=False): + x, y = super(UnstructuredGrid, self).intersect(x, y, local, forgive) + raise Exception('Not implemented yet') + + def get_cell_vertices(self, cellid): + """ + Method to get a set of cell vertices for a single cell + used in the Shapefile export utilities + :param cellid: (int) cellid number + :return: list of x,y cell vertices + """ + self._copy_cache = False + cell_vert = list(zip(self.xvertices[cellid], + self.yvertices[cellid])) + self._copy_cache = True + return cell_vert + + def _build_grid_geometry_info(self): + cache_index_cc = 'cellcenters' + cache_index_vert = 'xyzgrid' + + vertexdict = {ix: list(v[-2:]) + for ix, v in enumerate(self._vertices)} + + xcenters = self._xc + ycenters = self._yc + xvertices = [] + yvertices = [] + + # build xy vertex and cell center info + for iverts in self._iverts: + + xcellvert = [] + ycellvert = [] + for ix in iverts: + xcellvert.append(vertexdict[ix][0]) + ycellvert.append(vertexdict[ix][1]) + + xvertices.append(xcellvert) + yvertices.append(ycellvert) + + zvertices, zcenters = self._zcoords() + + if self._has_ref_coordinates: + # transform x and y + xcenters, ycenters = self.get_coords(xcenters, ycenters) + xvertxform = [] + yvertxform = [] + # vertices are a list within a list + for xcellvertices, ycellvertices in zip(xvertices, yvertices): + xcellvertices, \ + ycellvertices = self.get_coords(xcellvertices, ycellvertices) + xvertxform.append(xcellvertices) + yvertxform.append(ycellvertices) + xvertices = xvertxform + yvertices = yvertxform + + self._cache_dict[cache_index_cc] = CachedData([xcenters, + ycenters, + zcenters]) + self._cache_dict[cache_index_vert] = CachedData([xvertices, + yvertices, + zvertices]) + + @classmethod + def from_argus_export(cls, fname, nlay=1): + """ + Create a new SpatialReferenceUnstructured grid from an Argus One + Trimesh file + + Parameters + ---------- + fname : string + File name + + nlay : int + Number of layers to create + + Returns + ------- + sru : flopy.utils.reference.SpatialReferenceUnstructured + + """ + from ..utils.geometry import get_polygon_centroid + f = open(fname, 'r') + line = f.readline() + ll = line.split() + ncells, nverts = ll[0:2] + ncells = int(ncells) + nverts = int(nverts) + verts = np.empty((nverts, 2), dtype=np.float) + xc = np.empty((ncells), dtype=np.float) + yc = np.empty((ncells), dtype=np.float) + + # read the vertices + f.readline() + for ivert in range(nverts): + line = f.readline() + ll = line.split() + c, iv, x, y = ll[0:4] + verts[ivert, 0] = x + verts[ivert, 1] = y + + # read the cell information and create iverts, xc, and yc + iverts = [] + for icell in range(ncells): + line = f.readline() + ll = line.split() + ivlist = [] + for ic in ll[2:5]: + ivlist.append(int(ic) - 1) + if ivlist[0] != ivlist[-1]: + ivlist.append(ivlist[0]) + iverts.append(ivlist) + xc[icell], yc[icell] = get_polygon_centroid(verts[ivlist, :]) + + # close file and return spatial reference + f.close() + return cls(verts, iverts, xc, yc, ncpl=np.array(nlay * [len(iverts)])) diff --git a/flopy/discretization/vertexgrid.py b/flopy/discretization/vertexgrid.py index 72b76ce71e..b405fa4be5 100644 --- a/flopy/discretization/vertexgrid.py +++ b/flopy/discretization/vertexgrid.py @@ -1,360 +1,360 @@ -import numpy as np -try: - from matplotlib.path import Path -except ImportError: - Path = None - -from .grid import Grid, CachedData -from ..utils.geometry import is_clockwise - - -class VertexGrid(Grid): - """ - class for a vertex model grid - - Parameters - ---------- - vertices - list of vertices that make up the grid - cell2d - list of cells and their vertices - - Properties - ---------- - vertices - returns list of vertices that make up the grid - cell2d - returns list of cells and their vertices - - Methods - ---------- - get_cell_vertices(cellid) - returns vertices for a single cell at cellid. - """ - - def __init__(self, vertices=None, cell2d=None, top=None, - botm=None, idomain=None, lenuni=None, epsg=None, proj4=None, - prj=None, xoff=0.0, yoff=0.0, angrot=0.0, - nlay=None, ncpl=None, cell1d=None): - super(VertexGrid, self).__init__('vertex', top, botm, idomain, lenuni, - epsg, proj4, prj, xoff, yoff, angrot) - self._vertices = vertices - self._cell1d = cell1d - self._cell2d = cell2d - self._top = top - self._botm = botm - self._idomain = idomain - if botm is None: - self._nlay = nlay - self._ncpl = ncpl - else: - self._nlay = None - self._ncpl = None - - @property - def is_valid(self): - if self._vertices is not None and (self._cell2d is not None or - self._cell1d is not None): - return True - return False - - @property - def is_complete(self): - if self._vertices is not None and (self._cell2d is not None or - self._cell1d is not None) and \ - super(VertexGrid, self).is_complete: - return True - return False - - @property - def nlay(self): - if self._cell1d is not None: - return 1 - elif self._botm is not None: - return len(self._botm) - else: - return self._nlay - - @property - def ncpl(self): - if self._cell1d is not None: - return len(self._cell1d) - if self._botm is not None: - return len(self._botm[0]) - else: - return self._ncpl - - @property - def nnodes(self): - return self.nlay * self.ncpl - - @property - def shape(self): - return self.nlay, self.ncpl - - @property - def extent(self): - self._copy_cache = False - xvertices = np.hstack(self.xvertices) - yvertices = np.hstack(self.yvertices) - self._copy_cache = True - return (np.min(xvertices), - np.max(xvertices), - np.min(yvertices), - np.max(yvertices)) - - @property - def grid_lines(self): - """ - Creates a series of grid line vertices for drawing - a model grid line collection - - Returns: - list: grid line vertices - """ - self._copy_cache = False - xgrid = self.xvertices - ygrid = self.yvertices - - lines = [] - for ncell, verts in enumerate(xgrid): - for ix, vert in enumerate(verts): - lines.append([(xgrid[ncell][ix - 1], ygrid[ncell][ix - 1]), - (xgrid[ncell][ix], ygrid[ncell][ix])]) - self._copy_cache = True - return lines - - @property - def xyzcellcenters(self): - """ - Method to get cell centers and set to grid - """ - cache_index = 'cellcenters' - if cache_index not in self._cache_dict or \ - self._cache_dict[cache_index].out_of_date: - self._build_grid_geometry_info() - if self._copy_cache: - return self._cache_dict[cache_index].data - else: - return self._cache_dict[cache_index].data_nocopy - - @property - def xyzvertices(self): - """ - Method to get all grid vertices in a layer, arranged per cell - - Returns: - list of size sum(nvertices per cell) - """ - cache_index = 'xyzgrid' - if cache_index not in self._cache_dict or \ - self._cache_dict[cache_index].out_of_date: - self._build_grid_geometry_info() - if self._copy_cache: - return self._cache_dict[cache_index].data - else: - return self._cache_dict[cache_index].data_nocopy - - def intersect(self, x, y, local=False, forgive=False): - """ - Get the CELL2D number of a point with coordinates x and y - - When the point is on the edge of two cells, the cell with the lowest - CELL2D number is returned. - - Parameters - ---------- - x : float - The x-coordinate of the requested point - y : float - The y-coordinate of the requested point - local: bool (optional) - If True, x and y are in local coordinates (defaults to False) - forgive: bool (optional) - Forgive x,y arguments that fall outside the model grid and - return NaNs instead (defaults to False - will throw exception) - - Returns - ------- - icell2d : int - The CELL2D number - - """ - if Path is None: - s = 'Could not import matplotlib. Must install matplotlib ' + \ - ' in order to use VertexGrid.intersect() method' - raise ImportError(s) - - if local: - # transform x and y to real-world coordinates - x, y = super(VertexGrid, self).get_coords(x,y) - xv, yv, zv = self.xyzvertices - for icell2d in range(self.ncpl): - xa = np.array(xv[icell2d]) - ya = np.array(yv[icell2d]) - # x and y at least have to be within the bounding box of the cell - if np.any(x <= xa) and np.any(x >= xa) and \ - np.any(y <= ya) and np.any(y >= ya): - path = Path(np.stack((xa, ya)).transpose()) - # use a small radius, so that the edge of the cell is included - if is_clockwise(xa, ya): - radius = -1e-9 - else: - radius = 1e-9 - if path.contains_point((x, y), radius=radius): - return icell2d - if forgive: - icell2d = np.nan - return icell2d - raise Exception('x, y point given is outside of the model area') - - def get_cell_vertices(self, cellid): - """ - Method to get a set of cell vertices for a single cell - used in the Shapefile export utilities - :param cellid: (int) cellid number - :return: list of x,y cell vertices - """ - self._copy_cache = False - cell_verts = list(zip(self.xvertices[cellid], - self.yvertices[cellid])) - self._copy_cache = True - return cell_verts - - def plot(self, **kwargs): - """ - Plot the grid lines. - - Parameters - ---------- - kwargs : ax, colors. The remaining kwargs are passed into the - the LineCollection constructor. - - Returns - ------- - lc : matplotlib.collections.LineCollection - - """ - from flopy.plot import PlotMapView - - mm = PlotMapView(modelgrid=self) - return mm.plot_grid(**kwargs) - - def _build_grid_geometry_info(self): - cache_index_cc = 'cellcenters' - cache_index_vert = 'xyzgrid' - - xcenters = [] - ycenters = [] - xvertices = [] - yvertices = [] - - if self._cell1d is not None: - zcenters = [] - zvertices = [] - vertexdict = {v[0]: [v[1], v[2], v[3]] - for v in self._vertices} - for cell1d in self._cell1d: - cell1d = tuple(cell1d) - xcenters.append(cell1d[1]) - ycenters.append(cell1d[2]) - zcenters.append(cell1d[3]) - - vert_number = [] - for i in cell1d[3:]: - if i is not None: - vert_number.append(int(i)) - - xcellvert = [] - ycellvert = [] - zcellvert = [] - for ix in vert_number: - xcellvert.append(vertexdict[ix][0]) - ycellvert.append(vertexdict[ix][1]) - zcellvert.append(vertexdict[ix][2]) - xvertices.append(xcellvert) - yvertices.append(ycellvert) - zvertices.append(zcellvert) - - else: - vertexdict = {v[0]: [v[1], v[2]] - for v in self._vertices} - # build xy vertex and cell center info - for cell2d in self._cell2d: - cell2d = tuple(cell2d) - xcenters.append(cell2d[1]) - ycenters.append(cell2d[2]) - - vert_number = [] - for i in cell2d[4:]: - if i is not None: - vert_number.append(int(i)) - - xcellvert = [] - ycellvert = [] - for ix in vert_number: - xcellvert.append(vertexdict[ix][0]) - ycellvert.append(vertexdict[ix][1]) - xvertices.append(xcellvert) - yvertices.append(ycellvert) - - # build z cell centers - zvertices, zcenters = self._zcoords() - - if self._has_ref_coordinates: - # transform x and y - xcenters, ycenters = self.get_coords(xcenters, ycenters) - xvertxform = [] - yvertxform = [] - # vertices are a list within a list - for xcellvertices, ycellvertices in zip(xvertices, yvertices): - xcellvertices, \ - ycellvertices = self.get_coords(xcellvertices, - ycellvertices) - xvertxform.append(xcellvertices) - yvertxform.append(ycellvertices) - xvertices = xvertxform - yvertices = yvertxform - - self._cache_dict[cache_index_cc] = CachedData([xcenters, - ycenters, - zcenters]) - self._cache_dict[cache_index_vert] = CachedData([xvertices, - yvertices, - zvertices]) - - -if __name__ == "__main__": - import os - import flopy as fp - - ws = "../../examples/data/mf6/test003_gwfs_disv" - name = "mfsim.nam" - - sim = fp.mf6.modflow.MFSimulation.load(sim_name=name, sim_ws=ws) - - print(sim.model_names) - ml = sim.get_model("gwf_1") - - dis = ml.dis - - t = VertexGrid(dis.vertices.array, dis.cell2d.array, top=dis.top.array, - botm=dis.botm.array, idomain=dis.idomain.array, - epsg=26715, xoff=0, yoff=0, angrot=45) - - sr_x = t.xvertices - sr_y = t.yvertices - sr_xc = t.xcellcenters - sr_yc = t.ycellcenters - sr_lc = t.grid_lines - sr_e = t.extent - - t.use_ref_coords = False - x = t.xvertices - y = t.yvertices - z = t.zvertices - xc = t.xcellcenters - yc = t.ycellcenters - zc = t.zcellcenters - lc = t.grid_lines - e = t.extent +import numpy as np +try: + from matplotlib.path import Path +except ImportError: + Path = None + +from .grid import Grid, CachedData +from ..utils.geometry import is_clockwise + + +class VertexGrid(Grid): + """ + class for a vertex model grid + + Parameters + ---------- + vertices + list of vertices that make up the grid + cell2d + list of cells and their vertices + + Properties + ---------- + vertices + returns list of vertices that make up the grid + cell2d + returns list of cells and their vertices + + Methods + ---------- + get_cell_vertices(cellid) + returns vertices for a single cell at cellid. + """ + + def __init__(self, vertices=None, cell2d=None, top=None, + botm=None, idomain=None, lenuni=None, epsg=None, proj4=None, + prj=None, xoff=0.0, yoff=0.0, angrot=0.0, + nlay=None, ncpl=None, cell1d=None): + super(VertexGrid, self).__init__('vertex', top, botm, idomain, lenuni, + epsg, proj4, prj, xoff, yoff, angrot) + self._vertices = vertices + self._cell1d = cell1d + self._cell2d = cell2d + self._top = top + self._botm = botm + self._idomain = idomain + if botm is None: + self._nlay = nlay + self._ncpl = ncpl + else: + self._nlay = None + self._ncpl = None + + @property + def is_valid(self): + if self._vertices is not None and (self._cell2d is not None or + self._cell1d is not None): + return True + return False + + @property + def is_complete(self): + if self._vertices is not None and (self._cell2d is not None or + self._cell1d is not None) and \ + super(VertexGrid, self).is_complete: + return True + return False + + @property + def nlay(self): + if self._cell1d is not None: + return 1 + elif self._botm is not None: + return len(self._botm) + else: + return self._nlay + + @property + def ncpl(self): + if self._cell1d is not None: + return len(self._cell1d) + if self._botm is not None: + return len(self._botm[0]) + else: + return self._ncpl + + @property + def nnodes(self): + return self.nlay * self.ncpl + + @property + def shape(self): + return self.nlay, self.ncpl + + @property + def extent(self): + self._copy_cache = False + xvertices = np.hstack(self.xvertices) + yvertices = np.hstack(self.yvertices) + self._copy_cache = True + return (np.min(xvertices), + np.max(xvertices), + np.min(yvertices), + np.max(yvertices)) + + @property + def grid_lines(self): + """ + Creates a series of grid line vertices for drawing + a model grid line collection + + Returns: + list: grid line vertices + """ + self._copy_cache = False + xgrid = self.xvertices + ygrid = self.yvertices + + lines = [] + for ncell, verts in enumerate(xgrid): + for ix, vert in enumerate(verts): + lines.append([(xgrid[ncell][ix - 1], ygrid[ncell][ix - 1]), + (xgrid[ncell][ix], ygrid[ncell][ix])]) + self._copy_cache = True + return lines + + @property + def xyzcellcenters(self): + """ + Method to get cell centers and set to grid + """ + cache_index = 'cellcenters' + if cache_index not in self._cache_dict or \ + self._cache_dict[cache_index].out_of_date: + self._build_grid_geometry_info() + if self._copy_cache: + return self._cache_dict[cache_index].data + else: + return self._cache_dict[cache_index].data_nocopy + + @property + def xyzvertices(self): + """ + Method to get all grid vertices in a layer, arranged per cell + + Returns: + list of size sum(nvertices per cell) + """ + cache_index = 'xyzgrid' + if cache_index not in self._cache_dict or \ + self._cache_dict[cache_index].out_of_date: + self._build_grid_geometry_info() + if self._copy_cache: + return self._cache_dict[cache_index].data + else: + return self._cache_dict[cache_index].data_nocopy + + def intersect(self, x, y, local=False, forgive=False): + """ + Get the CELL2D number of a point with coordinates x and y + + When the point is on the edge of two cells, the cell with the lowest + CELL2D number is returned. + + Parameters + ---------- + x : float + The x-coordinate of the requested point + y : float + The y-coordinate of the requested point + local: bool (optional) + If True, x and y are in local coordinates (defaults to False) + forgive: bool (optional) + Forgive x,y arguments that fall outside the model grid and + return NaNs instead (defaults to False - will throw exception) + + Returns + ------- + icell2d : int + The CELL2D number + + """ + if Path is None: + s = 'Could not import matplotlib. Must install matplotlib ' + \ + ' in order to use VertexGrid.intersect() method' + raise ImportError(s) + + if local: + # transform x and y to real-world coordinates + x, y = super(VertexGrid, self).get_coords(x,y) + xv, yv, zv = self.xyzvertices + for icell2d in range(self.ncpl): + xa = np.array(xv[icell2d]) + ya = np.array(yv[icell2d]) + # x and y at least have to be within the bounding box of the cell + if np.any(x <= xa) and np.any(x >= xa) and \ + np.any(y <= ya) and np.any(y >= ya): + path = Path(np.stack((xa, ya)).transpose()) + # use a small radius, so that the edge of the cell is included + if is_clockwise(xa, ya): + radius = -1e-9 + else: + radius = 1e-9 + if path.contains_point((x, y), radius=radius): + return icell2d + if forgive: + icell2d = np.nan + return icell2d + raise Exception('x, y point given is outside of the model area') + + def get_cell_vertices(self, cellid): + """ + Method to get a set of cell vertices for a single cell + used in the Shapefile export utilities + :param cellid: (int) cellid number + :return: list of x,y cell vertices + """ + self._copy_cache = False + cell_verts = list(zip(self.xvertices[cellid], + self.yvertices[cellid])) + self._copy_cache = True + return cell_verts + + def plot(self, **kwargs): + """ + Plot the grid lines. + + Parameters + ---------- + kwargs : ax, colors. The remaining kwargs are passed into the + the LineCollection constructor. + + Returns + ------- + lc : matplotlib.collections.LineCollection + + """ + from flopy.plot import PlotMapView + + mm = PlotMapView(modelgrid=self) + return mm.plot_grid(**kwargs) + + def _build_grid_geometry_info(self): + cache_index_cc = 'cellcenters' + cache_index_vert = 'xyzgrid' + + xcenters = [] + ycenters = [] + xvertices = [] + yvertices = [] + + if self._cell1d is not None: + zcenters = [] + zvertices = [] + vertexdict = {v[0]: [v[1], v[2], v[3]] + for v in self._vertices} + for cell1d in self._cell1d: + cell1d = tuple(cell1d) + xcenters.append(cell1d[1]) + ycenters.append(cell1d[2]) + zcenters.append(cell1d[3]) + + vert_number = [] + for i in cell1d[3:]: + if i is not None: + vert_number.append(int(i)) + + xcellvert = [] + ycellvert = [] + zcellvert = [] + for ix in vert_number: + xcellvert.append(vertexdict[ix][0]) + ycellvert.append(vertexdict[ix][1]) + zcellvert.append(vertexdict[ix][2]) + xvertices.append(xcellvert) + yvertices.append(ycellvert) + zvertices.append(zcellvert) + + else: + vertexdict = {v[0]: [v[1], v[2]] + for v in self._vertices} + # build xy vertex and cell center info + for cell2d in self._cell2d: + cell2d = tuple(cell2d) + xcenters.append(cell2d[1]) + ycenters.append(cell2d[2]) + + vert_number = [] + for i in cell2d[4:]: + if i is not None: + vert_number.append(int(i)) + + xcellvert = [] + ycellvert = [] + for ix in vert_number: + xcellvert.append(vertexdict[ix][0]) + ycellvert.append(vertexdict[ix][1]) + xvertices.append(xcellvert) + yvertices.append(ycellvert) + + # build z cell centers + zvertices, zcenters = self._zcoords() + + if self._has_ref_coordinates: + # transform x and y + xcenters, ycenters = self.get_coords(xcenters, ycenters) + xvertxform = [] + yvertxform = [] + # vertices are a list within a list + for xcellvertices, ycellvertices in zip(xvertices, yvertices): + xcellvertices, \ + ycellvertices = self.get_coords(xcellvertices, + ycellvertices) + xvertxform.append(xcellvertices) + yvertxform.append(ycellvertices) + xvertices = xvertxform + yvertices = yvertxform + + self._cache_dict[cache_index_cc] = CachedData([xcenters, + ycenters, + zcenters]) + self._cache_dict[cache_index_vert] = CachedData([xvertices, + yvertices, + zvertices]) + + +if __name__ == "__main__": + import os + import flopy as fp + + ws = "../../examples/data/mf6/test003_gwfs_disv" + name = "mfsim.nam" + + sim = fp.mf6.modflow.MFSimulation.load(sim_name=name, sim_ws=ws) + + print(sim.model_names) + ml = sim.get_model("gwf_1") + + dis = ml.dis + + t = VertexGrid(dis.vertices.array, dis.cell2d.array, top=dis.top.array, + botm=dis.botm.array, idomain=dis.idomain.array, + epsg=26715, xoff=0, yoff=0, angrot=45) + + sr_x = t.xvertices + sr_y = t.yvertices + sr_xc = t.xcellcenters + sr_yc = t.ycellcenters + sr_lc = t.grid_lines + sr_e = t.extent + + t.use_ref_coords = False + x = t.xvertices + y = t.yvertices + z = t.zvertices + xc = t.xcellcenters + yc = t.ycellcenters + zc = t.zcellcenters + lc = t.grid_lines + e = t.extent diff --git a/flopy/export/__init__.py b/flopy/export/__init__.py index f827363849..bdac64c70a 100644 --- a/flopy/export/__init__.py +++ b/flopy/export/__init__.py @@ -1,7 +1,7 @@ - -#imports -from .netcdf import NetCdf -from . import utils -from . import shapefile_utils -from .netcdf import Logger -from . import metadata + +#imports +from .netcdf import NetCdf +from . import utils +from . import shapefile_utils +from .netcdf import Logger +from . import metadata diff --git a/flopy/export/shapefile_utils.py b/flopy/export/shapefile_utils.py index 0e8c348c73..4c05719c01 100755 --- a/flopy/export/shapefile_utils.py +++ b/flopy/export/shapefile_utils.py @@ -1,924 +1,924 @@ -""" -Module for exporting and importing flopy model attributes -""" -import copy -import shutil -import json -import numpy as np -import os -import warnings -from collections import OrderedDict - -from ..datbase import DataType, DataInterface -from ..utils import Util3d, SpatialReference - -# web address of spatial reference dot org -srefhttp = 'https://spatialreference.org' - - -def import_shapefile(): - try: - import shapefile as sf - return sf - except Exception as e: - raise Exception("io.to_shapefile(): error " + - "importing shapefile - try pip install pyshp") - - -def write_gridlines_shapefile(filename, mg): - """ - Write a polyline shapefile of the grid lines - a lightweight alternative - to polygons. - - Parameters - ---------- - filename : string - name of the shapefile to write - mg : model grid - - Returns - ------- - None - - """ - shapefile = import_shapefile() - wr = shapefile.Writer(filename, shapeType=shapefile.POLYLINE) - wr.field("number", "N", 18, 0) - if isinstance(mg, SpatialReference): - grid_lines = mg.get_grid_lines() - warnings.warn( - "SpatialReference has been deprecated. Use StructuredGrid" - " instead.", - category=DeprecationWarning) - else: - grid_lines = mg.grid_lines - for i, line in enumerate(grid_lines): - wr.line([line]) - wr.record(i) - - wr.close() - return - - -def write_grid_shapefile(filename, mg, array_dict, nan_val=np.nan, # -1.0e9, - epsg=None, prj=None): - """ - Method to write a shapefile of gridded input data - - Parameters - ---------- - filename : str - shapefile file name path - mg : flopy.discretization.Grid object - flopy model grid - array_dict : dict - dictionary of model input arrays - nan_val : float - value to fill nans - epsg : str, int - epsg code - prj : str - projection file name path - - Returns - ------- - None - - """ - shapefile = import_shapefile() - w = shapefile.Writer(filename, shapeType=shapefile.POLYGON) - w.autoBalance = 1 - - if isinstance(mg, SpatialReference): - verts = copy.deepcopy(mg.vertices) - warnings.warn( - "SpatialReference has been deprecated. Use StructuredGrid" - " instead.", - category=DeprecationWarning) - elif mg.grid_type == 'structured': - verts = [mg.get_cell_vertices(i, j) - for i in range(mg.nrow) - for j in range(mg.ncol)] - elif mg.grid_type == 'vertex': - verts = [mg.get_cell_vertices(cellid) - for cellid in range(mg.ncpl)] - else: - raise Exception('Grid type {} not supported.'.format(mg.grid_type)) - - # set up the attribute fields and arrays of attributes - if isinstance(mg, SpatialReference) or mg.grid_type == 'structured': - names = ['node', 'row', 'column'] + list(array_dict.keys()) - dtypes = [('node', np.dtype('int')), - ('row', np.dtype('int')), - ('column', np.dtype('int'))] + \ - [(enforce_10ch_limit([name])[0], array_dict[name].dtype) - for name in names[3:]] - node = list(range(1, mg.ncol * mg.nrow + 1)) - col = list(range(1, mg.ncol + 1)) * mg.nrow - row = sorted(list(range(1, mg.nrow + 1)) * mg.ncol) - at = np.vstack( - [node, row, col] + - [array_dict[name].ravel() for name in names[3:]]).transpose() - - names = enforce_10ch_limit(names) - - elif mg.grid_type == 'vertex': - names = ['node'] + list(array_dict.keys()) - dtypes = [('node', np.dtype('int'))] + \ - [(enforce_10ch_limit([name])[0], array_dict[name].dtype) - for name in names[1:]] - node = list(range(1, mg.ncpl + 1)) - at = np.vstack( - [node] + - [array_dict[name].ravel() for name in names[1:]]).transpose() - - names = enforce_10ch_limit(names) - - # flag nan values and explicitly set the dtypes - if at.dtype in [np.float, np.float32, np.float64]: - at[np.isnan(at)] = nan_val - at = np.array([tuple(i) for i in at], dtype=dtypes) - - # write field information - fieldinfo = {name: get_pyshp_field_info(dtype.name) for name, dtype in - dtypes} - for n in names: - w.field(n, *fieldinfo[n]) - - for i, r in enumerate(at): - # check if polygon is closed, if not close polygon for QGIS - if verts[i][-1] != verts[i][0]: - verts[i] = verts[i] + [verts[i][0]] - w.poly([verts[i]]) - w.record(*r) - - # close - w.close() - print('wrote {}'.format(filename)) - # write the projection file - write_prj(filename, mg, epsg, prj) - return - - -def model_attributes_to_shapefile(filename, ml, package_names=None, - array_dict=None, - **kwargs): - """ - Wrapper function for writing a shapefile of model data. If package_names - is not None, then search through the requested packages looking for arrays - that can be added to the shapefile as attributes - - Parameters - ---------- - filename : string - name of the shapefile to write - ml : flopy.mbase - model instance - package_names : list of package names (e.g. ["dis","lpf"]) - Packages to export data arrays to shapefile. (default is None) - array_dict : dict of {name:2D array} pairs - Additional 2D arrays to add as attributes to the shapefile. - (default is None) - - **kwargs : keyword arguments - modelgrid : fp.modflow.Grid object - if modelgrid is supplied, user supplied modelgrid is used in lieu - of the modelgrid attached to the modflow model object - epsg : int - epsg projection information - prj : str - user supplied prj file - - Returns - ------- - None - - Examples - -------- - - >>> import flopy - >>> m = flopy.modflow.Modflow() - >>> flopy.utils.model_attributes_to_shapefile('model.shp', m) - - """ - - if array_dict is None: - array_dict = {} - - if package_names is not None: - if not isinstance(package_names, list): - package_names = [package_names] - else: - package_names = [pak.name[0] for pak in ml.packagelist] - - if "modelgrid" in kwargs: - grid = kwargs.pop("modelgrid") - else: - grid = ml.modelgrid - - if grid.grid_type == 'USG-Unstructured': - raise Exception('Flopy does not support exporting to shapefile from ' - 'and MODFLOW-USG unstructured grid.') - horz_shape = grid.shape[1:] - for pname in package_names: - pak = ml.get_package(pname) - attrs = dir(pak) - if pak is not None: - if 'sr' in attrs: - attrs.remove('sr') - if 'start_datetime' in attrs: - attrs.remove('start_datetime') - for attr in attrs: - a = pak.__getattribute__(attr) - if a is None or not hasattr(a, - 'data_type') or a.name == 'thickness': - continue - if a.data_type == DataType.array2d and a.array.shape == horz_shape: - name = shape_attr_name(a.name, keep_layer=True) - # name = a.name.lower() - array_dict[name] = a.array - # elif isinstance(a, Util3d): - elif a.data_type == DataType.array3d: - # Not sure how best to check if an object has array data - try: - assert a.array is not None - except: - print( - 'Failed to get data for {} array, {} package'.format( - a.name, - pak.name[0])) - continue - if isinstance(a.name, list) and a.name[0] == 'thickness': - continue - for ilay in range(a.array.shape[0]): - try: - arr = a.array[ilay] - except: - arr = a[ilay] - - if isinstance(a, Util3d): - aname = shape_attr_name(a[ilay].name) - else: - aname = a.name - - if arr.shape == (1,) + horz_shape: - # fix for mf6 case. TODO: fix this in the mf6 code - arr = arr[0] - assert arr.shape == horz_shape - name = '{}_{}'.format(aname, ilay + 1) - array_dict[name] = arr - elif a.data_type == DataType.transient2d: # elif isinstance(a, Transient2d): - # Not sure how best to check if an object has array data - try: - assert a.array is not None - except: - print( - 'Failed to get data for {} array, {} package'.format( - a.name, - pak.name[0])) - continue - for kper in range(a.array.shape[0]): - name = '{}{}'.format( - shape_attr_name(a.name), kper + 1) - arr = a.array[kper][0] - assert arr.shape == horz_shape - array_dict[name] = arr - elif a.data_type == DataType.transientlist: # elif isinstance(a, MfList): - try: - list(a.masked_4D_arrays_itr()) - except: - continue - for name, array in a.masked_4D_arrays_itr(): - for kper in range(array.shape[0]): - for k in range(array.shape[1]): - n = shape_attr_name(name, length=4) - aname = "{}{}{}".format(n, k + 1, kper + 1) - arr = array[kper][k] - assert arr.shape == horz_shape - if np.all(np.isnan(arr)): - continue - array_dict[aname] = arr - elif isinstance(a, list): - for v in a: - if isinstance(a, DataInterface) and \ - v.data_type == DataType.array3d: - for ilay in range(a.model.modelgrid.nlay): - u2d = a[ilay] - name = '{}_{}'.format( - shape_attr_name(u2d.name), ilay + 1) - arr = u2d.array - assert arr.shape == horz_shape - array_dict[name] = arr - - # write data arrays to a shapefile - write_grid_shapefile(filename, grid, array_dict) - epsg = kwargs.get('epsg', None) - prj = kwargs.get('prj', None) - write_prj(filename, grid, epsg, prj) - - -def shape_attr_name(name, length=6, keep_layer=False): - """ - Function for to format an array name to a maximum of 10 characters to - conform with ESRI shapefile maximum attribute name length - - Parameters - ---------- - name : string - data array name - length : int - maximum length of string to return. Value passed to function is - overridden and set to 10 if keep_layer=True. (default is 6) - keep_layer : bool - Boolean that determines if layer number in name should be retained. - (default is False) - - - Returns - ------- - String - - Examples - -------- - - >>> import flopy - >>> name = flopy.utils.shape_attr_name('averylongstring') - >>> name - >>> 'averyl' - - """ - # kludges - if name == 'model_top': - name = 'top' - # replace spaces with "_" - n = name.lower().replace(' ', '_') - # exclude "_layer_X" portion of string - if keep_layer: - length = 10 - n = n.replace('_layer', '_') - else: - try: - idx = n.index('_layer') - n = n[:idx] - except: - pass - - if len(n) > length: - n = n[:length] - return n - - -def enforce_10ch_limit(names): - """Enforce 10 character limit for fieldnames. - Add suffix for duplicate names starting at 0. - - Parameters - ---------- - names : list of strings - - Returns - ------- - names : list of unique strings of len <= 10. - """ - names = [n[:5] + n[-4:] + '_' if len(n) > 10 else n - for n in names] - dups = {x: names.count(x) for x in names} - suffix = {n: list(range(cnt)) for n, cnt in dups.items() if cnt > 1} - for i, n in enumerate(names): - if dups[n] > 1: - names[i] = n[:9] + str(suffix[n].pop(0)) - return names - - -def get_pyshp_field_info(dtypename): - """Get pyshp dtype information for a given numpy dtype. - """ - fields = {'int': ('N', 18, 0), - ' 0 else None - proj = self.crs['proj'] - names = {'aea': 'albers_conical_equal_area', - 'aeqd': 'azimuthal_equidistant', - 'laea': 'lambert_azimuthal_equal_area', - 'longlat': 'latitude_longitude', - 'lcc': 'lambert_conformal_conic', - 'merc': 'mercator', - 'tmerc': 'transverse_mercator', - 'utm': 'transverse_mercator'} - attribs = {'grid_mapping_name': names[proj], - 'semi_major_axis': self.crs['a'], - 'inverse_flattening': self.crs['rf'], - 'standard_parallel': sp, - 'longitude_of_central_meridian': self.crs['lon_0'], - 'latitude_of_projection_origin': self.crs['lat_0'], - 'scale_factor_at_projection_origin': self.crs['k_0'], - 'false_easting': self.crs['x_0'], - 'false_northing': self.crs['y_0']} - return {k: v for k, v in attribs.items() if v is not None} - - @property - def proj4(self): - """ - Not implemented yet - """ - return None - - def parse_wkt(self): - - self.projcs = self._gettxt('PROJCS["', '"') - self.utm_zone = None - if self.projcs is not None and 'utm' in self.projcs.lower(): - self.utm_zone = self.projcs[-3:].lower().strip('n').strip('s') - self.geogcs = self._gettxt('GEOGCS["', '"') - self.datum = self._gettxt('DATUM["', '"') - tmp = self._getgcsparam('SPHEROID') - self.spheroid_name = tmp.pop(0) - self.semi_major_axis = tmp.pop(0) - self.inverse_flattening = tmp.pop(0) - self.primem = self._getgcsparam('PRIMEM') - self.gcs_unit = self._getgcsparam('UNIT') - self.projection = self._gettxt('PROJECTION["', '"') - self.latitude_of_origin = self._getvalue('latitude_of_origin') - self.central_meridian = self._getvalue('central_meridian') - self.standard_parallel_1 = self._getvalue('standard_parallel_1') - self.standard_parallel_2 = self._getvalue('standard_parallel_2') - self.scale_factor = self._getvalue('scale_factor') - self.false_easting = self._getvalue('false_easting') - self.false_northing = self._getvalue('false_northing') - self.projcs_unit = self._getprojcs_unit() - - def _gettxt(self, s1, s2): - s = self.wktstr.lower() - strt = s.find(s1.lower()) - if strt >= 0: # -1 indicates not found - strt += len(s1) - end = s[strt:].find(s2.lower()) + strt - return self.wktstr[strt:end] - - def _getvalue(self, k): - s = self.wktstr.lower() - strt = s.find(k.lower()) - if strt >= 0: - strt += len(k) - end = s[strt:].find(']') + strt - try: - return float(self.wktstr[strt:end].split(',')[1]) - except (IndexError, TypeError, ValueError, AttributeError): - pass - - def _getgcsparam(self, txt): - nvalues = 3 if txt.lower() == 'spheroid' else 2 - tmp = self._gettxt('{}["'.format(txt), ']') - if tmp is not None: - tmp = tmp.replace('"', '').split(',') - name = tmp[0:1] - values = list(map(float, tmp[1:nvalues])) - return name + values - else: - return [None] * nvalues - - def _getprojcs_unit(self): - if self.projcs is not None: - tmp = self.wktstr.lower().split('unit["')[-1] - uname, ufactor = tmp.strip().strip(']').split('",')[0:2] - ufactor = float(ufactor.split(']')[0].split()[0].split(',')[0]) - return uname, ufactor - return None, None - - @staticmethod - def getprj(epsg, addlocalreference=True, text='esriwkt'): - """ - Gets projection file (.prj) text for given epsg code from - spatialreference.org - See: https://www.epsg-registry.org/ - - Parameters - ---------- - epsg : int - epsg code for coordinate system - addlocalreference : boolean - adds the projection file text associated with epsg to a local - database, epsgref.json, located in the user's data directory. - Returns - ------- - prj : str - text for a projection (*.prj) file. - - """ - epsgfile = EpsgReference() - wktstr = epsgfile.get(epsg) - if wktstr is None: - wktstr = CRS.get_spatialreference(epsg, text=text) - if addlocalreference and wktstr is not None: - epsgfile.add(epsg, wktstr) - return wktstr - - @staticmethod - def get_spatialreference(epsg, text='esriwkt'): - """ - Gets text for given epsg code and text format from spatialreference.org - Fetches the reference text using the url: - https://spatialreference.org/ref/epsg/// - See: https://www.epsg-registry.org/ - - Parameters - ---------- - epsg : int - epsg code for coordinate system - text : str - string added to url - Returns - ------- - url : str - - """ - from flopy.utils.flopy_io import get_url_text - - epsg_categories = ['epsg', 'esri'] - for cat in epsg_categories: - url = '{}/ref/'.format(srefhttp) + \ - '{}/{}/{}/'.format(cat, epsg, text) - result = get_url_text(url) - if result is not None: - break - if result is not None: - return result.replace("\n", "") - elif result is None and text != 'epsg': - for cat in epsg_categories: - error_msg = 'No internet connection or ' + \ - 'epsg code {} '.format(epsg) + \ - 'not found at {}/ref/'.format(srefhttp) + \ - '{}/{}/{}'.format(cat, epsg, text) - print(error_msg) - # epsg code not listed on spatialreference.org - # may still work with pyproj - elif text == 'epsg': - return 'epsg:{}'.format(epsg) - - @staticmethod - def getproj4(epsg): - """ - Gets projection file (.prj) text for given epsg code from - spatialreference.org. See: https://www.epsg-registry.org/ - - Parameters - ---------- - epsg : int - epsg code for coordinate system - Returns - ------- - prj : str - text for a projection (*.prj) file. - """ - return CRS.get_spatialreference(epsg, text='proj4') - - -class EpsgReference: - """ - Sets up a local database of text representations of coordinate reference - systems, keyed by EPSG code. - - The database is epsgref.json, located in the user's data directory. If - optional 'appdirs' package is available, this is in the platform-dependent - user directory, otherwise in the user's 'HOME/.flopy' directory. - """ - - def __init__(self): - try: - from appdirs import user_data_dir - except ImportError: - user_data_dir = None - if user_data_dir: - datadir = user_data_dir('flopy') - else: - # if appdirs is not installed, use user's home directory - datadir = os.path.join(os.path.expanduser('~'), '.flopy') - if not os.path.isdir(datadir): - os.makedirs(datadir) - dbname = 'epsgref.json' - self.location = os.path.join(datadir, dbname) - - def to_dict(self): - """ - returns dict with EPSG code integer key, and WKT CRS text - """ - data = OrderedDict() - if os.path.exists(self.location): - with open(self.location, 'r') as f: - loaded_data = json.load(f, object_pairs_hook=OrderedDict) - # convert JSON key from str to EPSG integer - for key, value in loaded_data.items(): - try: - data[int(key)] = value - except ValueError: - data[key] = value - return data - - def _write(self, data): - with open(self.location, 'w') as f: - json.dump(data, f, indent=0) - f.write('\n') - - def reset(self, verbose=True): - if os.path.exists(self.location): - if verbose: - print('Resetting {}'.format(self.location)) - os.remove(self.location) - elif verbose: - print('{} does not exist, no reset required'.format(self.location)) - - def add(self, epsg, prj): - """ - add an epsg code to epsgref.json - """ - data = self.to_dict() - data[epsg] = prj - self._write(data) - - def get(self, epsg): - """ - returns prj from a epsg code, otherwise None if not found - """ - data = self.to_dict() - return data.get(epsg) - - def remove(self, epsg): - """ - removes an epsg entry from epsgref.json - """ - data = self.to_dict() - if epsg in data: - del data[epsg] - self._write(data) - - @staticmethod - def show(): - ep = EpsgReference() - prj = ep.to_dict() - for k, v in prj.items(): - print('{}:\n{}\n'.format(k, v)) +""" +Module for exporting and importing flopy model attributes +""" +import copy +import shutil +import json +import numpy as np +import os +import warnings +from collections import OrderedDict + +from ..datbase import DataType, DataInterface +from ..utils import Util3d, SpatialReference + +# web address of spatial reference dot org +srefhttp = 'https://spatialreference.org' + + +def import_shapefile(): + try: + import shapefile as sf + return sf + except Exception as e: + raise Exception("io.to_shapefile(): error " + + "importing shapefile - try pip install pyshp") + + +def write_gridlines_shapefile(filename, mg): + """ + Write a polyline shapefile of the grid lines - a lightweight alternative + to polygons. + + Parameters + ---------- + filename : string + name of the shapefile to write + mg : model grid + + Returns + ------- + None + + """ + shapefile = import_shapefile() + wr = shapefile.Writer(filename, shapeType=shapefile.POLYLINE) + wr.field("number", "N", 18, 0) + if isinstance(mg, SpatialReference): + grid_lines = mg.get_grid_lines() + warnings.warn( + "SpatialReference has been deprecated. Use StructuredGrid" + " instead.", + category=DeprecationWarning) + else: + grid_lines = mg.grid_lines + for i, line in enumerate(grid_lines): + wr.line([line]) + wr.record(i) + + wr.close() + return + + +def write_grid_shapefile(filename, mg, array_dict, nan_val=np.nan, # -1.0e9, + epsg=None, prj=None): + """ + Method to write a shapefile of gridded input data + + Parameters + ---------- + filename : str + shapefile file name path + mg : flopy.discretization.Grid object + flopy model grid + array_dict : dict + dictionary of model input arrays + nan_val : float + value to fill nans + epsg : str, int + epsg code + prj : str + projection file name path + + Returns + ------- + None + + """ + shapefile = import_shapefile() + w = shapefile.Writer(filename, shapeType=shapefile.POLYGON) + w.autoBalance = 1 + + if isinstance(mg, SpatialReference): + verts = copy.deepcopy(mg.vertices) + warnings.warn( + "SpatialReference has been deprecated. Use StructuredGrid" + " instead.", + category=DeprecationWarning) + elif mg.grid_type == 'structured': + verts = [mg.get_cell_vertices(i, j) + for i in range(mg.nrow) + for j in range(mg.ncol)] + elif mg.grid_type == 'vertex': + verts = [mg.get_cell_vertices(cellid) + for cellid in range(mg.ncpl)] + else: + raise Exception('Grid type {} not supported.'.format(mg.grid_type)) + + # set up the attribute fields and arrays of attributes + if isinstance(mg, SpatialReference) or mg.grid_type == 'structured': + names = ['node', 'row', 'column'] + list(array_dict.keys()) + dtypes = [('node', np.dtype('int')), + ('row', np.dtype('int')), + ('column', np.dtype('int'))] + \ + [(enforce_10ch_limit([name])[0], array_dict[name].dtype) + for name in names[3:]] + node = list(range(1, mg.ncol * mg.nrow + 1)) + col = list(range(1, mg.ncol + 1)) * mg.nrow + row = sorted(list(range(1, mg.nrow + 1)) * mg.ncol) + at = np.vstack( + [node, row, col] + + [array_dict[name].ravel() for name in names[3:]]).transpose() + + names = enforce_10ch_limit(names) + + elif mg.grid_type == 'vertex': + names = ['node'] + list(array_dict.keys()) + dtypes = [('node', np.dtype('int'))] + \ + [(enforce_10ch_limit([name])[0], array_dict[name].dtype) + for name in names[1:]] + node = list(range(1, mg.ncpl + 1)) + at = np.vstack( + [node] + + [array_dict[name].ravel() for name in names[1:]]).transpose() + + names = enforce_10ch_limit(names) + + # flag nan values and explicitly set the dtypes + if at.dtype in [np.float, np.float32, np.float64]: + at[np.isnan(at)] = nan_val + at = np.array([tuple(i) for i in at], dtype=dtypes) + + # write field information + fieldinfo = {name: get_pyshp_field_info(dtype.name) for name, dtype in + dtypes} + for n in names: + w.field(n, *fieldinfo[n]) + + for i, r in enumerate(at): + # check if polygon is closed, if not close polygon for QGIS + if verts[i][-1] != verts[i][0]: + verts[i] = verts[i] + [verts[i][0]] + w.poly([verts[i]]) + w.record(*r) + + # close + w.close() + print('wrote {}'.format(filename)) + # write the projection file + write_prj(filename, mg, epsg, prj) + return + + +def model_attributes_to_shapefile(filename, ml, package_names=None, + array_dict=None, + **kwargs): + """ + Wrapper function for writing a shapefile of model data. If package_names + is not None, then search through the requested packages looking for arrays + that can be added to the shapefile as attributes + + Parameters + ---------- + filename : string + name of the shapefile to write + ml : flopy.mbase + model instance + package_names : list of package names (e.g. ["dis","lpf"]) + Packages to export data arrays to shapefile. (default is None) + array_dict : dict of {name:2D array} pairs + Additional 2D arrays to add as attributes to the shapefile. + (default is None) + + **kwargs : keyword arguments + modelgrid : fp.modflow.Grid object + if modelgrid is supplied, user supplied modelgrid is used in lieu + of the modelgrid attached to the modflow model object + epsg : int + epsg projection information + prj : str + user supplied prj file + + Returns + ------- + None + + Examples + -------- + + >>> import flopy + >>> m = flopy.modflow.Modflow() + >>> flopy.utils.model_attributes_to_shapefile('model.shp', m) + + """ + + if array_dict is None: + array_dict = {} + + if package_names is not None: + if not isinstance(package_names, list): + package_names = [package_names] + else: + package_names = [pak.name[0] for pak in ml.packagelist] + + if "modelgrid" in kwargs: + grid = kwargs.pop("modelgrid") + else: + grid = ml.modelgrid + + if grid.grid_type == 'USG-Unstructured': + raise Exception('Flopy does not support exporting to shapefile from ' + 'and MODFLOW-USG unstructured grid.') + horz_shape = grid.shape[1:] + for pname in package_names: + pak = ml.get_package(pname) + attrs = dir(pak) + if pak is not None: + if 'sr' in attrs: + attrs.remove('sr') + if 'start_datetime' in attrs: + attrs.remove('start_datetime') + for attr in attrs: + a = pak.__getattribute__(attr) + if a is None or not hasattr(a, + 'data_type') or a.name == 'thickness': + continue + if a.data_type == DataType.array2d and a.array.shape == horz_shape: + name = shape_attr_name(a.name, keep_layer=True) + # name = a.name.lower() + array_dict[name] = a.array + # elif isinstance(a, Util3d): + elif a.data_type == DataType.array3d: + # Not sure how best to check if an object has array data + try: + assert a.array is not None + except: + print( + 'Failed to get data for {} array, {} package'.format( + a.name, + pak.name[0])) + continue + if isinstance(a.name, list) and a.name[0] == 'thickness': + continue + for ilay in range(a.array.shape[0]): + try: + arr = a.array[ilay] + except: + arr = a[ilay] + + if isinstance(a, Util3d): + aname = shape_attr_name(a[ilay].name) + else: + aname = a.name + + if arr.shape == (1,) + horz_shape: + # fix for mf6 case. TODO: fix this in the mf6 code + arr = arr[0] + assert arr.shape == horz_shape + name = '{}_{}'.format(aname, ilay + 1) + array_dict[name] = arr + elif a.data_type == DataType.transient2d: # elif isinstance(a, Transient2d): + # Not sure how best to check if an object has array data + try: + assert a.array is not None + except: + print( + 'Failed to get data for {} array, {} package'.format( + a.name, + pak.name[0])) + continue + for kper in range(a.array.shape[0]): + name = '{}{}'.format( + shape_attr_name(a.name), kper + 1) + arr = a.array[kper][0] + assert arr.shape == horz_shape + array_dict[name] = arr + elif a.data_type == DataType.transientlist: # elif isinstance(a, MfList): + try: + list(a.masked_4D_arrays_itr()) + except: + continue + for name, array in a.masked_4D_arrays_itr(): + for kper in range(array.shape[0]): + for k in range(array.shape[1]): + n = shape_attr_name(name, length=4) + aname = "{}{}{}".format(n, k + 1, kper + 1) + arr = array[kper][k] + assert arr.shape == horz_shape + if np.all(np.isnan(arr)): + continue + array_dict[aname] = arr + elif isinstance(a, list): + for v in a: + if isinstance(a, DataInterface) and \ + v.data_type == DataType.array3d: + for ilay in range(a.model.modelgrid.nlay): + u2d = a[ilay] + name = '{}_{}'.format( + shape_attr_name(u2d.name), ilay + 1) + arr = u2d.array + assert arr.shape == horz_shape + array_dict[name] = arr + + # write data arrays to a shapefile + write_grid_shapefile(filename, grid, array_dict) + epsg = kwargs.get('epsg', None) + prj = kwargs.get('prj', None) + write_prj(filename, grid, epsg, prj) + + +def shape_attr_name(name, length=6, keep_layer=False): + """ + Function for to format an array name to a maximum of 10 characters to + conform with ESRI shapefile maximum attribute name length + + Parameters + ---------- + name : string + data array name + length : int + maximum length of string to return. Value passed to function is + overridden and set to 10 if keep_layer=True. (default is 6) + keep_layer : bool + Boolean that determines if layer number in name should be retained. + (default is False) + + + Returns + ------- + String + + Examples + -------- + + >>> import flopy + >>> name = flopy.utils.shape_attr_name('averylongstring') + >>> name + >>> 'averyl' + + """ + # kludges + if name == 'model_top': + name = 'top' + # replace spaces with "_" + n = name.lower().replace(' ', '_') + # exclude "_layer_X" portion of string + if keep_layer: + length = 10 + n = n.replace('_layer', '_') + else: + try: + idx = n.index('_layer') + n = n[:idx] + except: + pass + + if len(n) > length: + n = n[:length] + return n + + +def enforce_10ch_limit(names): + """Enforce 10 character limit for fieldnames. + Add suffix for duplicate names starting at 0. + + Parameters + ---------- + names : list of strings + + Returns + ------- + names : list of unique strings of len <= 10. + """ + names = [n[:5] + n[-4:] + '_' if len(n) > 10 else n + for n in names] + dups = {x: names.count(x) for x in names} + suffix = {n: list(range(cnt)) for n, cnt in dups.items() if cnt > 1} + for i, n in enumerate(names): + if dups[n] > 1: + names[i] = n[:9] + str(suffix[n].pop(0)) + return names + + +def get_pyshp_field_info(dtypename): + """Get pyshp dtype information for a given numpy dtype. + """ + fields = {'int': ('N', 18, 0), + ' 0 else None + proj = self.crs['proj'] + names = {'aea': 'albers_conical_equal_area', + 'aeqd': 'azimuthal_equidistant', + 'laea': 'lambert_azimuthal_equal_area', + 'longlat': 'latitude_longitude', + 'lcc': 'lambert_conformal_conic', + 'merc': 'mercator', + 'tmerc': 'transverse_mercator', + 'utm': 'transverse_mercator'} + attribs = {'grid_mapping_name': names[proj], + 'semi_major_axis': self.crs['a'], + 'inverse_flattening': self.crs['rf'], + 'standard_parallel': sp, + 'longitude_of_central_meridian': self.crs['lon_0'], + 'latitude_of_projection_origin': self.crs['lat_0'], + 'scale_factor_at_projection_origin': self.crs['k_0'], + 'false_easting': self.crs['x_0'], + 'false_northing': self.crs['y_0']} + return {k: v for k, v in attribs.items() if v is not None} + + @property + def proj4(self): + """ + Not implemented yet + """ + return None + + def parse_wkt(self): + + self.projcs = self._gettxt('PROJCS["', '"') + self.utm_zone = None + if self.projcs is not None and 'utm' in self.projcs.lower(): + self.utm_zone = self.projcs[-3:].lower().strip('n').strip('s') + self.geogcs = self._gettxt('GEOGCS["', '"') + self.datum = self._gettxt('DATUM["', '"') + tmp = self._getgcsparam('SPHEROID') + self.spheroid_name = tmp.pop(0) + self.semi_major_axis = tmp.pop(0) + self.inverse_flattening = tmp.pop(0) + self.primem = self._getgcsparam('PRIMEM') + self.gcs_unit = self._getgcsparam('UNIT') + self.projection = self._gettxt('PROJECTION["', '"') + self.latitude_of_origin = self._getvalue('latitude_of_origin') + self.central_meridian = self._getvalue('central_meridian') + self.standard_parallel_1 = self._getvalue('standard_parallel_1') + self.standard_parallel_2 = self._getvalue('standard_parallel_2') + self.scale_factor = self._getvalue('scale_factor') + self.false_easting = self._getvalue('false_easting') + self.false_northing = self._getvalue('false_northing') + self.projcs_unit = self._getprojcs_unit() + + def _gettxt(self, s1, s2): + s = self.wktstr.lower() + strt = s.find(s1.lower()) + if strt >= 0: # -1 indicates not found + strt += len(s1) + end = s[strt:].find(s2.lower()) + strt + return self.wktstr[strt:end] + + def _getvalue(self, k): + s = self.wktstr.lower() + strt = s.find(k.lower()) + if strt >= 0: + strt += len(k) + end = s[strt:].find(']') + strt + try: + return float(self.wktstr[strt:end].split(',')[1]) + except (IndexError, TypeError, ValueError, AttributeError): + pass + + def _getgcsparam(self, txt): + nvalues = 3 if txt.lower() == 'spheroid' else 2 + tmp = self._gettxt('{}["'.format(txt), ']') + if tmp is not None: + tmp = tmp.replace('"', '').split(',') + name = tmp[0:1] + values = list(map(float, tmp[1:nvalues])) + return name + values + else: + return [None] * nvalues + + def _getprojcs_unit(self): + if self.projcs is not None: + tmp = self.wktstr.lower().split('unit["')[-1] + uname, ufactor = tmp.strip().strip(']').split('",')[0:2] + ufactor = float(ufactor.split(']')[0].split()[0].split(',')[0]) + return uname, ufactor + return None, None + + @staticmethod + def getprj(epsg, addlocalreference=True, text='esriwkt'): + """ + Gets projection file (.prj) text for given epsg code from + spatialreference.org + See: https://www.epsg-registry.org/ + + Parameters + ---------- + epsg : int + epsg code for coordinate system + addlocalreference : boolean + adds the projection file text associated with epsg to a local + database, epsgref.json, located in the user's data directory. + Returns + ------- + prj : str + text for a projection (*.prj) file. + + """ + epsgfile = EpsgReference() + wktstr = epsgfile.get(epsg) + if wktstr is None: + wktstr = CRS.get_spatialreference(epsg, text=text) + if addlocalreference and wktstr is not None: + epsgfile.add(epsg, wktstr) + return wktstr + + @staticmethod + def get_spatialreference(epsg, text='esriwkt'): + """ + Gets text for given epsg code and text format from spatialreference.org + Fetches the reference text using the url: + https://spatialreference.org/ref/epsg/// + See: https://www.epsg-registry.org/ + + Parameters + ---------- + epsg : int + epsg code for coordinate system + text : str + string added to url + Returns + ------- + url : str + + """ + from flopy.utils.flopy_io import get_url_text + + epsg_categories = ['epsg', 'esri'] + for cat in epsg_categories: + url = '{}/ref/'.format(srefhttp) + \ + '{}/{}/{}/'.format(cat, epsg, text) + result = get_url_text(url) + if result is not None: + break + if result is not None: + return result.replace("\n", "") + elif result is None and text != 'epsg': + for cat in epsg_categories: + error_msg = 'No internet connection or ' + \ + 'epsg code {} '.format(epsg) + \ + 'not found at {}/ref/'.format(srefhttp) + \ + '{}/{}/{}'.format(cat, epsg, text) + print(error_msg) + # epsg code not listed on spatialreference.org + # may still work with pyproj + elif text == 'epsg': + return 'epsg:{}'.format(epsg) + + @staticmethod + def getproj4(epsg): + """ + Gets projection file (.prj) text for given epsg code from + spatialreference.org. See: https://www.epsg-registry.org/ + + Parameters + ---------- + epsg : int + epsg code for coordinate system + Returns + ------- + prj : str + text for a projection (*.prj) file. + """ + return CRS.get_spatialreference(epsg, text='proj4') + + +class EpsgReference: + """ + Sets up a local database of text representations of coordinate reference + systems, keyed by EPSG code. + + The database is epsgref.json, located in the user's data directory. If + optional 'appdirs' package is available, this is in the platform-dependent + user directory, otherwise in the user's 'HOME/.flopy' directory. + """ + + def __init__(self): + try: + from appdirs import user_data_dir + except ImportError: + user_data_dir = None + if user_data_dir: + datadir = user_data_dir('flopy') + else: + # if appdirs is not installed, use user's home directory + datadir = os.path.join(os.path.expanduser('~'), '.flopy') + if not os.path.isdir(datadir): + os.makedirs(datadir) + dbname = 'epsgref.json' + self.location = os.path.join(datadir, dbname) + + def to_dict(self): + """ + returns dict with EPSG code integer key, and WKT CRS text + """ + data = OrderedDict() + if os.path.exists(self.location): + with open(self.location, 'r') as f: + loaded_data = json.load(f, object_pairs_hook=OrderedDict) + # convert JSON key from str to EPSG integer + for key, value in loaded_data.items(): + try: + data[int(key)] = value + except ValueError: + data[key] = value + return data + + def _write(self, data): + with open(self.location, 'w') as f: + json.dump(data, f, indent=0) + f.write('\n') + + def reset(self, verbose=True): + if os.path.exists(self.location): + if verbose: + print('Resetting {}'.format(self.location)) + os.remove(self.location) + elif verbose: + print('{} does not exist, no reset required'.format(self.location)) + + def add(self, epsg, prj): + """ + add an epsg code to epsgref.json + """ + data = self.to_dict() + data[epsg] = prj + self._write(data) + + def get(self, epsg): + """ + returns prj from a epsg code, otherwise None if not found + """ + data = self.to_dict() + return data.get(epsg) + + def remove(self, epsg): + """ + removes an epsg entry from epsgref.json + """ + data = self.to_dict() + if epsg in data: + del data[epsg] + self._write(data) + + @staticmethod + def show(): + ep = EpsgReference() + prj = ep.to_dict() + for k, v in prj.items(): + print('{}:\n{}\n'.format(k, v)) diff --git a/flopy/export/utils.py b/flopy/export/utils.py index f49be79f82..9268177066 100644 --- a/flopy/export/utils.py +++ b/flopy/export/utils.py @@ -1,1644 +1,1644 @@ -from __future__ import print_function -import json -import os -import numpy as np -from ..utils import HeadFile, CellBudgetFile, UcnFile, FormattedHeadFile, \ - ZBNetOutput -from ..mbase import BaseModel, ModelInterface -from ..pakbase import PackageInterface -from ..datbase import DataType, DataInterface, DataListInterface -from . import NetCdf, netcdf -from . import shapefile_utils -from . import vtk - - -NC_PRECISION_TYPE = {np.float64: "f8", np.float32: "f4", np.int: "i4", - np.int64: "i4", np.int32: "i4"} - -path = os.path.split(netcdf.__file__)[0] -with open(path + '/longnames.json') as f: - NC_LONG_NAMES = json.load(f) -with open(path + '/unitsformat.json') as f: - NC_UNITS_FORMAT = json.load(f) - - -def ensemble_helper(inputs_filename, outputs_filename, models, add_reals=True, - **kwargs): - """ - Helper to export an ensemble of model instances. Assumes - all models have same dis and reference information, only difference is - properties and boundary conditions. Assumes model.nam.split('_')[-1] is - the realization suffix to use in the netcdf variable names - """ - f_in, f_out = None, None - for m in models[1:]: - assert m.get_nrow_ncol_nlay_nper() == models[ - 0].get_nrow_ncol_nlay_nper() - if inputs_filename is not None: - f_in = models[0].export(inputs_filename, **kwargs) - vdict = {} - vdicts = [models[0].export(vdict, **kwargs)] - i = 1 - for m in models[1:]: - suffix = m.name.split('.')[0].split('_')[-1] - vdict = {} - m.export(vdict, **kwargs) - vdicts.append(vdict) - if add_reals: - f_in.append(vdict, suffix=suffix) - i += 1 - mean, stdev = {}, {} - for vname in vdict.keys(): - alist = [] - for vd in vdicts: - alist.append(vd[vname]) - alist = np.array(alist) - mean[vname] = alist.mean(axis=0) - stdev[vname] = alist.std(axis=0) - mean[vname][vdict[vname] == netcdf.FILLVALUE] = netcdf.FILLVALUE - stdev[vname][vdict[vname] == netcdf.FILLVALUE] = netcdf.FILLVALUE - mean[vname][np.isnan(vdict[vname])] = netcdf.FILLVALUE - stdev[vname][np.isnan(vdict[vname])] = netcdf.FILLVALUE - - if i >= 2: - if not add_reals: - f_in.write() - f_in = NetCdf.empty_like(mean, output_filename=inputs_filename) - f_in.append(mean, suffix="**mean**") - f_in.append(stdev, suffix="**stdev**") - else: - f_in.append(mean, suffix="**mean**") - f_in.append(stdev, suffix="**stdev**") - f_in.add_global_attributes({"namefile": ''}) - - if outputs_filename is not None: - f_out = output_helper(outputs_filename, models[0], - models[0].load_results(as_dict=True), **kwargs) - vdict = {} - vdicts = [output_helper(vdict, models[0], models[0]. \ - load_results(as_dict=True), **kwargs)] - i = 1 - for m in models[1:]: - suffix = m.name.split('.')[0].split('_')[-1] - oudic = m.load_results(as_dict=True) - vdict = {} - output_helper(vdict, m, oudic, **kwargs) - vdicts.append(vdict) - if add_reals: - f_out.append(vdict, suffix=suffix) - i += 1 - - mean, stdev = {}, {} - for vname in vdict.keys(): - alist = [] - for vd in vdicts: - alist.append(vd[vname]) - alist = np.array(alist) - mean[vname] = alist.mean(axis=0) - stdev[vname] = alist.std(axis=0) - mean[vname][np.isnan(vdict[vname])] = netcdf.FILLVALUE - stdev[vname][np.isnan(vdict[vname])] = netcdf.FILLVALUE - mean[vname][vdict[vname] == netcdf.FILLVALUE] = netcdf.FILLVALUE - stdev[vname][vdict[vname] == netcdf.FILLVALUE] = netcdf.FILLVALUE - if i >= 2: - if not add_reals: - f_out.write() - f_out = NetCdf.empty_like(mean, - output_filename=outputs_filename) - f_out.append(mean, suffix="**mean**") - f_out.append(stdev, suffix="**stdev**") - - else: - f_out.append(mean, suffix="**mean**") - f_out.append(stdev, suffix="**stdev**") - f_out.add_global_attributes({"namefile": ''}) - return f_in, f_out - - -def _add_output_nc_variable(f, times, shape3d, out_obj, var_name, logger=None, - text='', mask_vals=(), mask_array3d=None): - if logger: - logger.log("creating array for {0}".format( - var_name)) - - array = np.zeros((len(times), shape3d[0], shape3d[1], shape3d[2]), - dtype=np.float32) - array[:] = np.NaN - - if isinstance(out_obj, ZBNetOutput): - a = np.asarray(out_obj.zone_array, dtype=np.float32) - if mask_array3d is not None: - a[mask_array3d] = np.NaN - for i, _ in enumerate(times): - array[i, :, :, :] = a - - else: - for i, t in enumerate(times): - if t in out_obj.recordarray["totim"]: - try: - if text: - a = out_obj.get_data(totim=t, full3D=True, text=text) - if isinstance(a, list): - a = a[0] - else: - a = out_obj.get_data(totim=t) - except Exception as e: - estr = "error getting data for {0} at time" \ - " {1}:{2}".format(var_name + - text.decode().strip().lower(), - t, str(e)) - if logger: - logger.warn(estr) - else: - print(estr) - continue - if mask_array3d is not None and a.shape == mask_array3d.shape: - a[mask_array3d] = np.NaN - try: - array[i, :, :, :] = a.astype(np.float32) - except Exception as e: - estr = "error assigning {0} data to array for time" \ - " {1}:{2}".format(var_name + - text.decode().strip().lower(), - t, str(e)) - if logger: - logger.warn(estr) - else: - print(estr) - continue - - if logger: - logger.log("creating array for {0}".format( - var_name)) - - for mask_val in mask_vals: - array[np.where(array == mask_val)] = np.NaN - mx, mn = np.nanmax(array), np.nanmin(array) - array[np.isnan(array)] = netcdf.FILLVALUE - - if isinstance(f, dict): - if text: - var_name = text.decode().strip().lower() - f[var_name] = array - return f - - units = None - if var_name in NC_UNITS_FORMAT: - units = NC_UNITS_FORMAT[var_name].format( - f.grid_units, f.time_units) - precision_str = "f4" - - if text: - var_name = text.decode().strip().lower() - attribs = {"long_name": var_name} - attribs["coordinates"] = "time layer latitude longitude" - attribs["min"] = mn - attribs["max"] = mx - if units is not None: - attribs["units"] = units - try: - dim_tuple = ("time",) + f.dimension_names - var = f.create_variable(var_name, attribs, - precision_str=precision_str, - dimensions=dim_tuple) - except Exception as e: - estr = "error creating variable {0}:\n{1}".format( - var_name, str(e)) - if logger: - logger.lraise(estr) - else: - raise Exception(estr) - - try: - var[:] = array - except Exception as e: - estr = "error setting array to variable {0}:\n{1}".format( - var_name, str(e)) - if logger: - logger.lraise(estr) - else: - raise Exception(estr) - - -def _add_output_nc_zonebudget_variable(f, array, var_name, flux, - logger=None): - """ - Method to add zonebudget output data to netcdf file - - Parameters - ---------- - f : NetCdf object - array : np.ndarray - zonebudget output budget group array - var_name : str - variable name - flux : bool - flag for flux data or volumetric data - logger : None or Logger - logger instance - - """ - if logger: - logger.log("creating array for {}".format(var_name)) - - mn = np.min(array) - mx = np.max(array) - - precision_str = "f4" - if flux: - units = "{}^3/{}".format(f.grid_units, f.time_units) - else: - units = "{}^3".format(f.grid_units) - attribs = {"long_name": var_name} - attribs["coordinates"] = "time zone" - attribs["min"] = mn - attribs["max"] = mx - attribs['units'] = units - dim_tuple = ('time', "zone") - - var = f.create_group_variable('zonebudget', var_name, attribs, - precision_str, dim_tuple) - - var[:] = array - -def output_helper(f, ml, oudic, **kwargs): - """ - Export model outputs using the model spatial reference info. - - Parameters - ---------- - f : str - filename for output - must have .shp or .nc extension - ml : flopy.mbase.ModelInterface derived type - oudic : dict - output_filename,flopy datafile/cellbudgetfile instance - **kwargs : keyword arguments - modelgrid : flopy.discretizaiton.Grid - user supplied model grid instance that will be used for export - in lieu of the models model grid instance - mflay : int - zero based model layer which can be used in shapefile exporting - kper : int - zero based stress period which can be used for shapefile exporting - - Returns - ------- - None - Note: - ---- - casts down double precision to single precision for netCDF files - - """ - assert isinstance(ml, (BaseModel, ModelInterface)) - assert len(oudic.keys()) > 0 - logger = kwargs.pop("logger", None) - stride = kwargs.pop("stride", 1) - forgive = kwargs.pop("forgive", False) - kwargs.pop("suffix", None) - mask_vals = [] - mflay = kwargs.pop('mflay', None) - kper = kwargs.pop('kper', None) - if "masked_vals" in kwargs: - mask_vals = kwargs.pop("masked_vals") - if len(kwargs) > 0 and logger is not None: - str_args = ','.join(kwargs) - logger.warn("unused kwargs: " + str_args) - - zonebud = None - zbkey = None - for key, value in oudic.items(): - if isinstance(value, ZBNetOutput): - zbkey = key - break - - if zbkey is not None: - zonebud = oudic.pop(zbkey) - - # ISSUE - need to round the totims in each output file instance so - # that they will line up - for key in oudic.keys(): - out = oudic[key] - times = [float("{0:15.6f}".format(t)) for t in - out.recordarray["totim"]] - out.recordarray["totim"] = times - - times = [] - for filename, df in oudic.items(): - for t in df.recordarray["totim"]: - if t not in times: - times.append(t) - - if zonebud is not None and not oudic: - if isinstance(f, NetCdf): - times = f.time_values_arg - else: - times = zonebud.time - - assert len(times) > 0 - times.sort() - - # rectify times - only use times that are common to every output file - common_times = [] - skipped_times = [] - for t in times: - keep = True - for filename, df in oudic.items(): - if isinstance(df, ZBNetOutput): - continue - if t not in df.recordarray["totim"]: - keep = False - break - if keep: - common_times.append(t) - else: - skipped_times.append(t) - - assert len(common_times) > 0 - if len(skipped_times) > 0: - if logger: - logger.warn("the following output times are not common to all" + \ - " output files and are being skipped:\n" + \ - "{0}".format(skipped_times)) - else: - print("the following output times are not common to all" + \ - " output files and are being skipped:\n" + \ - "{0}".format(skipped_times)) - times = [t for t in common_times[::stride]] - if isinstance(f, str) and f.lower().endswith(".nc"): - f = NetCdf(f, ml, time_values=times, logger=logger, - forgive=forgive, **kwargs) - elif isinstance(f, NetCdf): - otimes = list(f.nc.variables["time"][:]) - assert otimes == times - if isinstance(f, NetCdf) or isinstance(f, dict): - shape3d = (ml.modelgrid.nlay, ml.modelgrid.nrow, ml.modelgrid.ncol) - mask_array3d = None - if ml.hdry is not None: - mask_vals.append(ml.hdry) - if ml.hnoflo is not None: - mask_vals.append(ml.hnoflo) - - if ml.modelgrid.idomain is not None: - mask_array3d = ml.modelgrid.idomain == 0 - - for filename, out_obj in oudic.items(): - filename = filename.lower() - - if isinstance(out_obj, UcnFile): - _add_output_nc_variable(f, times, shape3d, out_obj, - "concentration", logger=logger, - mask_vals=mask_vals, - mask_array3d=mask_array3d) - - elif isinstance(out_obj, HeadFile): - _add_output_nc_variable(f, times, shape3d, out_obj, - out_obj.text.decode(), logger=logger, - mask_vals=mask_vals, - mask_array3d=mask_array3d) - - elif isinstance(out_obj, FormattedHeadFile): - _add_output_nc_variable(f, times, shape3d, out_obj, - out_obj.text, logger=logger, - mask_vals=mask_vals, - mask_array3d=mask_array3d) - - elif isinstance(out_obj, CellBudgetFile): - var_name = "cell_by_cell_flow" - for text in out_obj.textlist: - _add_output_nc_variable(f, times, shape3d, out_obj, - var_name, logger=logger, text=text, - mask_vals=mask_vals, - mask_array3d=mask_array3d) - - else: - estr = "unrecognized file extension:{0}".format(filename) - if logger: - logger.lraise(estr) - else: - raise Exception(estr) - - if zonebud is not None: - try: - f.initialize_group("zonebudget", - dimensions=('time', 'zone'), - dimension_data={'time': zonebud.time, - 'zone': zonebud.zones}) - except AttributeError: - pass - - for text, array in zonebud.arrays.items(): - _add_output_nc_zonebudget_variable(f, array, text, - zonebud.flux, - logger) - - # write the zone array to standard output - _add_output_nc_variable(f, times, shape3d, zonebud, - "budget_zones", logger=logger, - mask_vals=mask_vals, - mask_array3d=mask_array3d) - - elif isinstance(f, str) and f.endswith('.shp'): - attrib_dict = {} - for _, out_obj in oudic.items(): - - if isinstance(out_obj, HeadFile) or \ - isinstance(out_obj, FormattedHeadFile) or \ - isinstance(out_obj, UcnFile): - if isinstance(out_obj, UcnFile): - attrib_name = 'conc' - else: - attrib_name = 'head' - plotarray = np.atleast_3d(out_obj.get_alldata() - .transpose()).transpose() - - for per in range(plotarray.shape[0]): - for k in range(plotarray.shape[1]): - if kper is not None and per != kper: - continue - if mflay is not None and k != mflay: - continue - name = attrib_name + '{}_{}'.format(per, k) - attrib_dict[name] = plotarray[per][k] - - elif isinstance(out_obj, CellBudgetFile): - names = out_obj.get_unique_record_names(decode=True) - - for attrib_name in names: - plotarray = np.atleast_3d(out_obj.get_data( - text=attrib_name, - full3D=True)) - - attrib_name = attrib_name.strip() - if attrib_name == "FLOW RIGHT FACE": - attrib_name = 'FRF' - elif attrib_name == "FLOW FRONT FACE": - attrib_name = "FFF" - elif attrib_name == "FLOW LOWER FACE": - attrib_name = "FLF" - else: - pass - for per in range(plotarray.shape[0]): - for k in range(plotarray.shape[1]): - if kper is not None and per != kper: - continue - if mflay is not None and k != mflay: - continue - name = attrib_name + '{}_{}'.format(per, k) - attrib_dict[name] = plotarray[per][k] - - if attrib_dict: - shapefile_utils.write_grid_shapefile(f, ml.modelgrid, attrib_dict) - - else: - if logger: - logger.lraise("unrecognized export argument:{0}".format(f)) - else: - raise NotImplementedError("unrecognized export argument" + - ":{0}".format(f)) - return f - - -def model_export(f, ml, fmt=None, **kwargs): - """ - Method to export a model to a shapefile or netcdf file - - Parameters - ---------- - f : str - file name (".nc" for netcdf or ".shp" for shapefile) - or dictionary of .... - ml : flopy.modflow.mbase.ModelInterface object - flopy model object - fmt : str - output format flag. 'vtk' will export to vtk - **kwargs : keyword arguments - modelgrid: flopy.discretization.Grid - user supplied modelgrid object which will supercede the built - in modelgrid object - epsg : int - epsg projection code - prj : str - prj file name - if fmt is set to 'vtk', parameters of vtk.export_model - - """ - assert isinstance(ml, ModelInterface) - package_names = kwargs.get("package_names", None) - if package_names is None: - package_names = [pak.name[0] for pak in ml.packagelist] - - if isinstance(f, str) and f.lower().endswith(".nc"): - f = NetCdf(f, ml, **kwargs) - - if isinstance(f, str) and f.lower().endswith(".shp"): - shapefile_utils.model_attributes_to_shapefile(f, ml, - package_names=package_names, - **kwargs) - - elif isinstance(f, NetCdf): - - for pak in ml.packagelist: - if pak.name[0] in package_names: - f = package_export(f, pak, **kwargs) - assert f is not None - return f - - elif isinstance(f, dict): - for pak in ml.packagelist: - f = package_export(f, pak, **kwargs) - - elif fmt == 'vtk': - # call vtk model export - nanval = kwargs.get('nanval', -1e20) - smooth = kwargs.get('smooth', False) - point_scalars = kwargs.get('point_scalars', False) - vtk_grid_type = kwargs.get('vtk_grid_type', 'auto') - true2d = kwargs.get('true2d', False) - binary = kwargs.get('binary', False) - kpers = kwargs.get('kpers', None) - vtk.export_model(ml, f, package_names=package_names, nanval=nanval, - smooth=smooth, point_scalars=point_scalars, - vtk_grid_type=vtk_grid_type, true2d=true2d, - binary=binary, kpers=kpers) - - else: - raise NotImplementedError("unrecognized export argument:{0}".format(f)) - - return f - - -def package_export(f, pak, fmt=None, **kwargs): - """ - Method to export a package to shapefile or netcdf - - Parameters - ---------- - f : str - output file name (ends in .shp for shapefile or .nc for netcdf) - pak : flopy.pakbase.Package object - package to export - fmt : str - output format flag. 'vtk' will export to vtk - ** kwargs : keword arguments - modelgrid: flopy.discretization.Grid - user supplied modelgrid object which will supercede the built - in modelgrid object - epsg : int - epsg projection code - prj : str - prj file name - if fmt is set to 'vtk', parameters of vtk.export_package - - Returns - ------- - f : NetCdf object or None - - """ - assert isinstance(pak, PackageInterface) - if isinstance(f, str) and f.lower().endswith(".nc"): - f = NetCdf(f, pak.parent, **kwargs) - - if isinstance(f, str) and f.lower().endswith(".shp"): - shapefile_utils.model_attributes_to_shapefile(f, pak.parent, - package_names=pak.name, - **kwargs) - - elif isinstance(f, NetCdf) or isinstance(f, dict): - for a in pak.data_list: - if isinstance(a, DataInterface): - if a.array is not None: - if a.data_type == DataType.array2d \ - and len(a.array.shape) == 2 \ - and a.array.shape[1] > 0: - try: - f = array2d_export(f, a, **kwargs) - except: - f.logger.warn( - "error adding {0} as variable".format(a.name)) - elif a.data_type == DataType.array3d: - f = array3d_export(f, a, **kwargs) - elif a.data_type == DataType.transient2d: - f = transient2d_export(f, a, **kwargs) - elif a.data_type == DataType.transientlist: - f = mflist_export(f, a, **kwargs) - elif isinstance(a, list): - for v in a: - if isinstance(a, DataInterface) and \ - v.data_type == DataType.array3d: - f = array3d_export(f, v, **kwargs) - return f - - elif fmt == 'vtk': - # call vtk array export to folder - nanval = kwargs.get('nanval', -1e20) - smooth = kwargs.get('smooth', False) - point_scalars = kwargs.get('point_scalars', False) - vtk_grid_type = kwargs.get('vtk_grid_type', 'auto') - true2d = kwargs.get('true2d', False) - binary = kwargs.get('binary', False) - kpers = kwargs.get('kpers', None) - vtk.export_package(pak.parent, pak.name, f, nanval=nanval, - smooth=smooth, point_scalars=point_scalars, - vtk_grid_type=vtk_grid_type, true2d=true2d, - binary=binary, kpers=kpers) - - else: - raise NotImplementedError("unrecognized export argument:{0}".format(f)) - - -def generic_array_export(f, array, var_name="generic_array", - dimensions=("time", "layer", "y", "x"), - precision_str="f4", units="unitless", **kwargs): - """ - Method to export a generic array to NetCdf - - Parameters - ---------- - f : str - filename or existing export instance type (NetCdf only for now) - array : np.ndarray - var_name : str - variable name - dimensions : tuple - netcdf dimensions - precision_str : str - binary precision string, default "f4" - units : string - units of array data - **kwargs : keyword arguments - model : flopy.modflow.mbase - flopy model object - - """ - if isinstance(f, str) and f.lower().endswith(".nc"): - assert "model" in kwargs.keys(), "creating a new netCDF using " \ - "generic_array_helper requires a " \ - "'model' kwarg" - assert isinstance(kwargs["model"], BaseModel) - f = NetCdf(f, kwargs.pop("model"), **kwargs) - - assert array.ndim == len(dimensions), "generic_array_helper() " + \ - "array.ndim != dimensions" - coords_dims = {"time": "time", "layer": "layer", "y": "latitude", - "x": "longitude"} - coords = ' '.join([coords_dims[d] for d in dimensions]) - mn = kwargs.pop("min", -1.0e+9) - mx = kwargs.pop("max", 1.0e+9) - long_name = kwargs.pop("long_name", var_name) - if len(kwargs) > 0: - f.logger.warn("generic_array_helper(): unrecognized kwargs:" + \ - ",".join(kwargs.keys())) - attribs = {"long_name": long_name} - attribs["coordinates"] = coords - attribs["units"] = units - attribs["min"] = mn - attribs["max"] = mx - if np.isnan(attribs["min"]) or np.isnan(attribs["max"]): - raise Exception("error processing {0}: all NaNs".format(var_name)) - try: - var = f.create_variable(var_name, attribs, precision_str=precision_str, - dimensions=dimensions) - except Exception as e: - estr = "error creating variable {0}:\n{1}".format(var_name, str(e)) - f.logger.warn(estr) - raise Exception(estr) - try: - var[:] = array - except Exception as e: - estr = "error setting array to variable {0}:\n{1}".format(var_name, - str(e)) - f.logger.warn(estr) - raise Exception(estr) - return f - - -def mflist_export(f, mfl, **kwargs): - """ - export helper for MfList instances - - Parameters - ----------- - f : str - filename or existing export instance type (NetCdf only for now) - mfl : MfList instance - **kwargs : keyword arguments - modelgrid : flopy.discretization.Grid - model grid instance which will supercede the flopy.model.modelgrid - - """ - if not isinstance(mfl, (DataListInterface, DataInterface)): - err = "mflist_helper only helps instances that support " \ - "DataListInterface" - raise AssertionError(err) - - modelgrid = mfl.model.modelgrid - if "modelgrid" in kwargs: - modelgrid = kwargs.pop("modelgrid") - - if isinstance(f, str) and f.lower().endswith(".nc"): - f = NetCdf(f, mfl.model, **kwargs) - - if isinstance(f, str) and f.lower().endswith(".shp"): - sparse = kwargs.get("sparse", False) - kper = kwargs.get("kper", 0) - squeeze = kwargs.get("squeeze", True) - - if modelgrid is None: - raise Exception("MfList.to_shapefile: ModelGrid is not set") - elif modelgrid.grid_type == 'USG-Unstructured': - raise Exception('Flopy does not support exporting to shapefile ' - 'from a MODFLOW-USG unstructured grid.') - - if kper is None: - keys = mfl.data.keys() - keys.sort() - else: - keys = [kper] - if not sparse: - array_dict = {} - for kk in keys: - arrays = mfl.to_array(kk) - for name, array in arrays.items(): - for k in range(array.shape[0]): - # aname = name+"{0:03d}_{1:02d}".format(kk, k) - n = shapefile_utils.shape_attr_name(name, length=4) - aname = "{}{}{}".format(n, k + 1, int(kk) + 1) - array_dict[aname] = array[k] - shapefile_utils.write_grid_shapefile(f, modelgrid, array_dict) - else: - from ..export.shapefile_utils import recarray2shp - from ..utils.geometry import Polygon - - df = mfl.get_dataframe(squeeze=squeeze) - if 'kper' in kwargs or df is None: - ra = mfl[kper] - verts = np.array(modelgrid.get_cell_vertices(ra.i, ra.j)) - elif df is not None: - verts = np.array([modelgrid.get_cell_vertices(i, - df.j.values[ix]) - for ix, i in enumerate(df.i.values)]) - ra = df.to_records(index=False) - epsg = kwargs.get('epsg', None) - prj = kwargs.get('prj', None) - polys = np.array([Polygon(v) for v in verts]) - recarray2shp(ra, geoms=polys, shpname=f, - mg=modelgrid, epsg=epsg, prj=prj) - - elif isinstance(f, NetCdf) or isinstance(f, dict): - base_name = mfl.package.name[0].lower() - # f.log("getting 4D masked arrays for {0}".format(base_name)) - # m4d = mfl.masked_4D_arrays - # f.log("getting 4D masked arrays for {0}".format(base_name)) - - # for name, array in m4d.items(): - for name, array in mfl.masked_4D_arrays_itr(): - var_name = base_name + '_' + name - if isinstance(f, dict): - f[var_name] = array - continue - f.log("processing {0} attribute".format(name)) - - units = None - if var_name in NC_UNITS_FORMAT: - units = NC_UNITS_FORMAT[var_name].format(f.grid_units, - f.time_units) - precision_str = NC_PRECISION_TYPE[mfl.dtype[name].type] - if var_name in NC_LONG_NAMES: - attribs = {"long_name": NC_LONG_NAMES[var_name]} - else: - attribs = {"long_name": var_name} - attribs["coordinates"] = "time layer latitude longitude" - attribs["min"] = np.nanmin(array) - attribs["max"] = np.nanmax(array) - if np.isnan(attribs["min"]) or np.isnan(attribs["max"]): - raise Exception( - "error processing {0}: all NaNs".format(var_name)) - - if units is not None: - attribs["units"] = units - try: - dim_tuple = ("time",) + f.dimension_names - var = f.create_variable(var_name, attribs, - precision_str=precision_str, - dimensions=dim_tuple) - except Exception as e: - estr = "error creating variable {0}:\n{1}".format(var_name, - str(e)) - f.logger.warn(estr) - raise Exception(estr) - - array[np.isnan(array)] = f.fillvalue - try: - var[:] = array - except Exception as e: - estr = "error setting array to variable {0}:\n{1}".format( - var_name, str(e)) - f.logger.warn(estr) - raise Exception(estr) - f.log("processing {0} attribute".format(name)) - - return f - else: - raise NotImplementedError("unrecognized export argument:{0}".format(f)) - - -def transient2d_export(f, t2d, fmt=None, **kwargs): - """ - export helper for Transient2d instances - - Parameters - ----------- - f : str - filename or existing export instance type (NetCdf only for now) - t2d : Transient2d instance - fmt : str - output format flag. 'vtk' will export to vtk - **kwargs : keyword arguments - min_valid : minimum valid value - max_valid : maximum valid value - modelgrid : flopy.discretization.Grid - model grid instance which will supercede the flopy.model.modelgrid - if fmt is set to 'vtk', parameters of vtk.export_transient - - """ - - if not isinstance(t2d, DataInterface): - err = "transient2d_helper only helps instances that support " \ - "DataInterface" - raise AssertionError(err) - - min_valid = kwargs.get("min_valid", -1.0e+9) - max_valid = kwargs.get("max_valid", 1.0e+9) - - modelgrid = t2d.model.modelgrid - if 'modelgrid' in kwargs: - modelgrid = kwargs.pop("modelgrid") - - if isinstance(f, str) and f.lower().endswith(".nc"): - f = NetCdf(f, t2d.model, **kwargs) - - if isinstance(f, str) and f.lower().endswith(".shp"): - array_dict = {} - for kper in range(t2d.model.modeltime.nper): - u2d = t2d[kper] - name = '{}_{}'.format(shapefile_utils.shape_attr_name(u2d.name), - kper + 1) - array_dict[name] = u2d.array - shapefile_utils.write_grid_shapefile(f, modelgrid, array_dict) - - elif isinstance(f, NetCdf) or isinstance(f, dict): - # mask the array is defined by any row col with at lease - # one active cell - mask = None - if modelgrid.idomain is not None: - ibnd = np.abs(modelgrid.idomain).sum(axis=0) - mask = ibnd == 0 - - # f.log("getting 4D array for {0}".format(t2d.name_base)) - array = t2d.array - # f.log("getting 4D array for {0}".format(t2d.name_base)) - with np.errstate(invalid="ignore"): - if array.dtype not in [int, np.int, np.int32, np.int64]: - if mask is not None: - array[:, 0, mask] = np.NaN - array[array <= min_valid] = np.NaN - array[array >= max_valid] = np.NaN - mx, mn = np.nanmax(array), np.nanmin(array) - else: - mx, mn = np.nanmax(array), np.nanmin(array) - array[array <= min_valid] = netcdf.FILLVALUE - array[array >= max_valid] = netcdf.FILLVALUE - # if t2d.model.bas6 is not None: - # array[:, 0, t2d.model.bas6.ibound.array[0] == 0] = \ - # f.fillvalue - # elif t2d.model.btn is not None: - # array[:, 0, t2d.model.btn.icbund.array[0] == 0] = \ - # f.fillvalue - - var_name = t2d.name.replace('_', '') - if isinstance(f, dict): - array[array == netcdf.FILLVALUE] = np.NaN - f[var_name] = array - return f - - array[np.isnan(array)] = f.fillvalue - units = "unitless" - - if var_name in NC_UNITS_FORMAT: - units = NC_UNITS_FORMAT[var_name].format(f.grid_units, - f.time_units) - try: - precision_str = NC_PRECISION_TYPE[t2d.dtype] - except: - precision_str = NC_PRECISION_TYPE[t2d.dtype.type] - if var_name in NC_LONG_NAMES: - attribs = {"long_name": NC_LONG_NAMES[var_name]} - else: - attribs = {"long_name": var_name} - attribs["coordinates"] = "time layer latitude longitude" - attribs["units"] = units - attribs["min"] = mn - attribs["max"] = mx - if np.isnan(attribs["min"]) or np.isnan(attribs["max"]): - raise Exception("error processing {0}: all NaNs".format(var_name)) - try: - dim_tuple = ("time",) + f.dimension_names - var = f.create_variable(var_name, attribs, - precision_str=precision_str, - dimensions=dim_tuple) - except Exception as e: - estr = "error creating variable {0}:\n{1}".format(var_name, str(e)) - f.logger.warn(estr) - raise Exception(estr) - try: - var[:, 0] = array - except Exception as e: - estr = "error setting array to variable {0}:\n{1}".format(var_name, - str(e)) - f.logger.warn(estr) - raise Exception(estr) - return f - - elif fmt == 'vtk': - name = kwargs.get('name', t2d.name) - nanval = kwargs.get('nanval', -1e20) - smooth = kwargs.get('smooth', False) - point_scalars = kwargs.get('point_scalars', False) - vtk_grid_type = kwargs.get('vtk_grid_type', 'auto') - true2d = kwargs.get('true2d', False) - binary = kwargs.get('binary', False) - kpers = kwargs.get('kpers', None) - vtk.export_transient(t2d.model, t2d.array, f, name, nanval=nanval, - smooth=smooth, point_scalars=point_scalars, - array2d=True, vtk_grid_type=vtk_grid_type, - true2d=true2d, binary=binary, kpers=kpers) - else: - raise NotImplementedError("unrecognized export argument:{0}".format(f)) - - -def array3d_export(f, u3d, fmt=None, **kwargs): - """ - export helper for Transient2d instances - - Parameters - ----------- - f : str - filename or existing export instance type (NetCdf only for now) - u3d : Util3d instance - fmt : str - output format flag. 'vtk' will export to vtk - **kwargs : keyword arguments - min_valid : minimum valid value - max_valid : maximum valid value - modelgrid : flopy.discretization.Grid - model grid instance which will supercede the flopy.model.modelgrid - if fmt is set to 'vtk', parameters of vtk.export_array - - """ - - assert isinstance(u3d, DataInterface), "array3d_export only helps " \ - "instances that support " \ - "DataInterface" - - min_valid = kwargs.get("min_valid", -1.0e+9) - max_valid = kwargs.get("max_valid", 1.0e+9) - - modelgrid = u3d.model.modelgrid - if "modelgrid" in kwargs: - modelgrid = kwargs.pop("modelgrid") - - if isinstance(f, str) and f.lower().endswith(".nc"): - f = NetCdf(f, u3d.model, **kwargs) - - if isinstance(f, str) and f.lower().endswith(".shp"): - array_dict = {} - for ilay in range(modelgrid.nlay): - u2d = u3d[ilay] - if isinstance(u2d, np.ndarray): - dname = u3d.name - array = u2d - else: - dname = u2d.name - array = u2d.array - name = '{}_{}'.format( - shapefile_utils.shape_attr_name(dname), ilay + 1) - array_dict[name] = array - shapefile_utils.write_grid_shapefile(f, modelgrid, array_dict) - - elif isinstance(f, NetCdf) or isinstance(f, dict): - var_name = u3d.name - if isinstance(var_name, list) or isinstance(var_name, tuple): - var_name = var_name[0] - var_name = var_name.replace(' ', '_').lower() - # f.log("getting 3D array for {0}".format(var_name)) - array = u3d.array - - # this is for the crappy vcont in bcf6 - # if isinstance(f,NetCdf) and array.shape != f.shape: - # f.log("broadcasting 3D array for {0}".format(var_name)) - # full_array = np.empty(f.shape) - # full_array[:] = np.NaN - # full_array[:array.shape[0]] = array - # array = full_array - # f.log("broadcasting 3D array for {0}".format(var_name)) - # f.log("getting 3D array for {0}".format(var_name)) - # - mask = None - if modelgrid.idomain is not None and "ibound" not in var_name: - mask = modelgrid.idomain == 0 - - if mask is not None and array.shape != mask.shape: - # f.log("broadcasting 3D array for {0}".format(var_name)) - full_array = np.empty(mask.shape) - full_array[:] = np.NaN - full_array[:array.shape[0]] = array - array = full_array - # f.log("broadcasting 3D array for {0}".format(var_name)) - - # runtime warning issued in some cases - need to track down cause - # happens when NaN is already in array - with np.errstate(invalid="ignore"): - if array.dtype not in [int, np.int, np.int32, np.int64]: - # if u3d.model.modelgrid.bas6 is not None and "ibound" not - # in var_name: - # array[u3d.model.modelgrid.bas6.ibound.array == 0] = - # np.NaN - # elif u3d.model.btn is not None and 'icbund' not in var_name: - # array[u3d.model.modelgrid.btn.icbund.array == 0] = np.NaN - if mask is not None: - array[mask] = np.NaN - array[array <= min_valid] = np.NaN - array[array >= max_valid] = np.NaN - mx, mn = np.nanmax(array), np.nanmin(array) - else: - mx, mn = np.nanmax(array), np.nanmin(array) - if mask is not None: - array[mask] = netcdf.FILLVALUE - array[array <= min_valid] = netcdf.FILLVALUE - array[array >= max_valid] = netcdf.FILLVALUE - if modelgrid.idomain is not None and "ibound" not in var_name: - array[modelgrid.idomain == 0] = netcdf.FILLVALUE - - if isinstance(f, dict): - f[var_name] = array - return f - - array[np.isnan(array)] = f.fillvalue - units = "unitless" - if var_name in NC_UNITS_FORMAT: - units = NC_UNITS_FORMAT[var_name].format(f.grid_units, - f.time_units) - precision_str = NC_PRECISION_TYPE[u3d.dtype] - if var_name in NC_LONG_NAMES: - attribs = {"long_name": NC_LONG_NAMES[var_name]} - else: - attribs = {"long_name": var_name} - attribs["coordinates"] = "layer latitude longitude" - attribs["units"] = units - attribs["min"] = mn - attribs["max"] = mx - if np.isnan(attribs["min"]) or np.isnan(attribs["max"]): - raise Exception("error processing {0}: all NaNs".format(var_name)) - try: - var = f.create_variable(var_name, attribs, - precision_str=precision_str, - dimensions=f.dimension_names) - except Exception as e: - estr = "error creating variable {0}:\n{1}".format(var_name, str(e)) - f.logger.warn(estr) - raise Exception(estr) - try: - var[:] = array - except Exception as e: - estr = "error setting array to variable {0}:\n{1}".format(var_name, - str(e)) - f.logger.warn(estr) - raise Exception(estr) - return f - - elif fmt == 'vtk': - # call vtk array export to folder - name = kwargs.get('name', u3d.name) - nanval = kwargs.get('nanval', -1e20) - smooth = kwargs.get('smooth', False) - point_scalars = kwargs.get('point_scalars', False) - vtk_grid_type = kwargs.get('vtk_grid_type', 'auto') - true2d = kwargs.get('true2d', False) - binary = kwargs.get('binary', False) - if isinstance(name, list) or isinstance(name, tuple): - name = name[0] - - vtk.export_array(u3d.model, u3d.array, f, name, nanval=nanval, - smooth=smooth, point_scalars=point_scalars, - vtk_grid_type=vtk_grid_type, true2d=true2d, - binary=binary) - - else: - raise NotImplementedError("unrecognized export argument:{0}".format(f)) - - -def array2d_export(f, u2d, fmt=None, **kwargs): - """ - export helper for Util2d instances - - Parameters - ---------- - f : str - filename or existing export instance type (NetCdf only for now) - u2d : Util2d instance - fmt : str - output format flag. 'vtk' will export to vtk - **kwargs : keyword arguments - min_valid : minimum valid value - max_valid : maximum valid value - modelgrid : flopy.discretization.Grid - model grid instance which will supercede the flopy.model.modelgrid - if fmt is set to 'vtk', parameters of vtk.export_array - - """ - assert isinstance(u2d, DataInterface), "util2d_helper only helps " \ - "instances that support " \ - "DataInterface" - assert len(u2d.array.shape) == 2, "util2d_helper only supports 2D arrays" - - min_valid = kwargs.get("min_valid", -1.0e+9) - max_valid = kwargs.get("max_valid", 1.0e+9) - - modelgrid = u2d.model.modelgrid - if "modelgrid" in kwargs: - modelgrid = kwargs.pop("modelgrid") - - if isinstance(f, str) and f.lower().endswith(".nc"): - f = NetCdf(f, u2d.model, **kwargs) - - if isinstance(f, str) and f.lower().endswith(".shp"): - name = shapefile_utils.shape_attr_name(u2d.name, keep_layer=True) - shapefile_utils.write_grid_shapefile(f, modelgrid, - {name: u2d.array}) - return - - elif isinstance(f, str) and f.lower().endswith(".asc"): - export_array(modelgrid, f, u2d.array, **kwargs) - return - - elif isinstance(f, NetCdf) or isinstance(f, dict): - - # try to mask the array - assume layer 1 ibound is a good mask - # f.log("getting 2D array for {0}".format(u2d.name)) - array = u2d.array - # f.log("getting 2D array for {0}".format(u2d.name)) - - with np.errstate(invalid="ignore"): - if array.dtype not in [int, np.int, np.int32, np.int64]: - if modelgrid.idomain is not None and \ - "ibound" not in u2d.name.lower() and \ - "idomain" not in u2d.name.lower(): - array[modelgrid.idomain[0, :, :] == 0] = np.NaN - array[array <= min_valid] = np.NaN - array[array >= max_valid] = np.NaN - mx, mn = np.nanmax(array), np.nanmin(array) - else: - mx, mn = np.nanmax(array), np.nanmin(array) - array[array <= min_valid] = netcdf.FILLVALUE - array[array >= max_valid] = netcdf.FILLVALUE - if modelgrid.idomain is not None and \ - "ibound" not in u2d.name.lower() and \ - "idomain" not in u2d.name.lower() and \ - "icbund" not in u2d.name.lower(): - array[modelgrid.idomain[0, :, :] == 0] = \ - netcdf.FILLVALUE - var_name = u2d.name - if isinstance(f, dict): - f[var_name] = array - return f - - array[np.isnan(array)] = f.fillvalue - units = "unitless" - - if var_name in NC_UNITS_FORMAT: - units = NC_UNITS_FORMAT[var_name].format(f.grid_units, - f.time_units) - precision_str = NC_PRECISION_TYPE[u2d.dtype] - if var_name in NC_LONG_NAMES: - attribs = {"long_name": NC_LONG_NAMES[var_name]} - else: - attribs = {"long_name": var_name} - attribs["coordinates"] = "latitude longitude" - attribs["units"] = units - attribs["min"] = mn - attribs["max"] = mx - if np.isnan(attribs["min"]) or np.isnan(attribs["max"]): - raise Exception("error processing {0}: all NaNs".format(var_name)) - try: - var = f.create_variable(var_name, attribs, - precision_str=precision_str, - dimensions=f.dimension_names[1:]) - except Exception as e: - estr = "error creating variable {0}:\n{1}".format(var_name, str(e)) - f.logger.warn(estr) - raise Exception(estr) - try: - var[:] = array - except Exception as e: - estr = "error setting array to variable {0}:\n{1}".format(var_name, - str(e)) - f.logger.warn(estr) - raise Exception(estr) - return f - - elif fmt == 'vtk': - - # call vtk array export to folder - name = kwargs.get('name', u2d.name) - nanval = kwargs.get('nanval', -1e20) - smooth = kwargs.get('smooth', False) - point_scalars = kwargs.get('point_scalars', False) - vtk_grid_type = kwargs.get('vtk_grid_type', 'auto') - true2d = kwargs.get('true2d', False) - binary = kwargs.get('binary', False) - vtk.export_array(u2d.model, u2d.array, f, name, nanval=nanval, - smooth=smooth, point_scalars=point_scalars, - array2d=True, vtk_grid_type=vtk_grid_type, - true2d=true2d, binary=binary) - - else: - raise NotImplementedError("unrecognized export argument:{0}".format(f)) - - -def export_array(modelgrid, filename, a, nodata=-9999, - fieldname='value', **kwargs): - """ - Write a numpy array to Arc Ascii grid or shapefile with the model - reference. - - Parameters - ---------- - modelgrid : flopy.discretization.StructuredGrid object - model grid - filename : str - Path of output file. Export format is determined by - file extention. - '.asc' Arc Ascii grid - '.tif' GeoTIFF (requries rasterio package) - '.shp' Shapefile - a : 2D numpy.ndarray - Array to export - nodata : scalar - Value to assign to np.nan entries (default -9999) - fieldname : str - Attribute field name for array values (shapefile export only). - (default 'values') - kwargs: - keyword arguments to np.savetxt (ascii) - rasterio.open (GeoTIFF) - or flopy.export.shapefile_utils.write_grid_shapefile2 - - Notes - ----- - Rotated grids will be either be unrotated prior to export, - using scipy.ndimage.rotate (Arc Ascii format) or rotation will be - included in their transform property (GeoTiff format). In either case - the pixels will be displayed in the (unrotated) projected geographic - coordinate system, so the pixels will no longer align exactly with the - model grid (as displayed from a shapefile, for example). A key difference - between Arc Ascii and GeoTiff (besides disk usage) is that the - unrotated Arc Ascii will have a different grid size, whereas the GeoTiff - will have the same number of rows and pixels as the original. - - """ - - if filename.lower().endswith(".asc"): - if len(np.unique(modelgrid.delr)) != \ - len(np.unique(modelgrid.delc)) != 1 \ - or modelgrid.delr[0] != modelgrid.delc[0]: - raise ValueError('Arc ascii arrays require a uniform grid.') - - xoffset, yoffset = modelgrid.xoffset, modelgrid.yoffset - cellsize = modelgrid.delr[0] - fmt = kwargs.get('fmt', '%.18e') - a = a.copy() - a[np.isnan(a)] = nodata - if modelgrid.angrot != 0: - try: - from scipy.ndimage import rotate - except ImportError: - rotate = None - print('scipy package required to export rotated grid.') - - if rotate is not None: - a = rotate(a, modelgrid.angrot, cval=nodata) - height_rot, width_rot = a.shape - xmin, ymin, xmax, ymax = modelgrid.extent - dx = (xmax - xmin) / width_rot - dy = (ymax - ymin) / height_rot - cellsize = np.max((dx, dy)) - xoffset, yoffset = xmin, ymin - - filename = '.'.join( - filename.split('.')[:-1]) + '.asc' # enforce .asc ending - nrow, ncol = a.shape - a[np.isnan(a)] = nodata - txt = 'ncols {:d}\n'.format(ncol) - txt += 'nrows {:d}\n'.format(nrow) - txt += 'xllcorner {:f}\n'.format(xoffset) - txt += 'yllcorner {:f}\n'.format(yoffset) - txt += 'cellsize {}\n'.format(cellsize) - # ensure that nodata fmt consistent w values - txt += 'NODATA_value {}\n'.format(fmt) % (nodata) - with open(filename, 'w') as output: - output.write(txt) - with open(filename, 'ab') as output: - np.savetxt(output, a, **kwargs) - print('wrote {}'.format(filename)) - - elif filename.lower().endswith(".tif"): - if len(np.unique(modelgrid.delr)) != \ - len(np.unique(modelgrid.delc)) != 1 \ - or modelgrid.delr[0] != modelgrid.delc[0]: - raise ValueError('GeoTIFF export require a uniform grid.') - try: - import rasterio - from rasterio import Affine - except ImportError: - print('GeoTIFF export requires the rasterio package.') - return - dxdy = modelgrid.delc[0] - # because this is only implemented for a structured grid, - # we can get the xul and yul coordinate from modelgrid.xvertices(0, 0) - verts = modelgrid.get_cell_vertices(0, 0) - xul, yul = verts[0] - trans = Affine.translation(xul, yul) * \ - Affine.rotation(modelgrid.angrot) * \ - Affine.scale(dxdy, -dxdy) - - # third dimension is the number of bands - a = a.copy() - if len(a.shape) == 2: - a = np.reshape(a, (1, a.shape[0], a.shape[1])) - if a.dtype.name == 'int64': - a = a.astype('int32') - dtype = rasterio.int32 - elif a.dtype.name == 'int32': - dtype = rasterio.int32 - elif a.dtype.name == 'float64': - dtype = rasterio.float64 - elif a.dtype.name == 'float32': - dtype = rasterio.float32 - else: - msg = 'ERROR: invalid dtype "{}"'.format(a.dtype.name) - raise TypeError(msg) - - meta = {'count': a.shape[0], - 'width': a.shape[2], - 'height': a.shape[1], - 'nodata': nodata, - 'dtype': dtype, - 'driver': 'GTiff', - 'crs': modelgrid.proj4, - 'transform': trans - } - meta.update(kwargs) - with rasterio.open(filename, 'w', **meta) as dst: - dst.write(a) - print('wrote {}'.format(filename)) - - elif filename.lower().endswith(".shp"): - from ..export.shapefile_utils import write_grid_shapefile - epsg = kwargs.get('epsg', None) - prj = kwargs.get('prj', None) - if epsg is None and prj is None: - epsg = modelgrid.epsg - write_grid_shapefile(filename, modelgrid, array_dict={fieldname: a}, - nan_val=nodata, - epsg=epsg, prj=prj) - - -def export_contours(modelgrid, filename, contours, - fieldname='level', epsg=None, prj=None, - **kwargs): - """ - Convert matplotlib contour plot object to shapefile. - - Parameters - ---------- - modelgrid : flopy.discretization.Grid - flopy modelgrid instance - filename : str - path of output shapefile - contours : matplotlib.contour.QuadContourSet or list of them - (object returned by matplotlib.pyplot.contour) - fieldname : str - gis attribute table field name - epsg : int - EPSG code. See https://www.epsg-registry.org/ or spatialreference.org - prj : str - Existing projection file to be used with new shapefile. - **kwargs : key-word arguments to flopy.export.shapefile_utils.recarray2shp - - Returns - ------- - df : dataframe of shapefile contents - - """ - from ..utils.geometry import LineString - from .shapefile_utils import recarray2shp - - if not isinstance(contours, list): - contours = [contours] - - if epsg is None: - epsg = modelgrid.epsg - if prj is None: - prj = modelgrid.proj4 - - geoms = [] - level = [] - for ctr in contours: - levels = ctr.levels - for i, c in enumerate(ctr.collections): - paths = c.get_paths() - geoms += [LineString(p.vertices) for p in paths] - level += list(np.ones(len(paths)) * levels[i]) - - # convert the dictionary to a recarray - ra = np.array(level, - dtype=[(fieldname, float)]).view(np.recarray) - - recarray2shp(ra, geoms, filename, epsg=epsg, prj=prj, **kwargs) - return - - -def export_contourf(filename, contours, fieldname='level', epsg=None, - prj=None, **kwargs): - """ - Write matplotlib filled contours to shapefile. This utility requires - that shapely is installed. - - Parameters - ---------- - filename : str - name of output shapefile (e.g. myshp.shp) - contours : matplotlib.contour.QuadContourSet or list of them - (object returned by matplotlib.pyplot.contourf) - fieldname : str - Name of shapefile attribute field to contain the contour level. The - fieldname column in the attribute table will contain the lower end of - the range represented by the polygon. Default is 'level'. - epsg : int - EPSG code. See https://www.epsg-registry.org/ or spatialreference.org - prj : str - Existing projection file to be used with new shapefile. - - **kwargs : keyword arguments to flopy.export.shapefile_utils.recarray2shp - - Returns - ------- - None - - Examples - -------- - >>> import flopy - >>> import matplotlib.pyplot as plt - >>> from flopy.export.utils import export_contourf - >>> a = np.random.random((10, 10)) - >>> cs = plt.contourf(a) - >>> export_contourf('myfilledcontours.shp', cs) - - """ - - try: - from shapely import geometry - except (ImportError, ModuleNotFoundError): - raise ImportError('export_contourf requires python shapely package') - - from ..utils.geometry import Polygon - from .shapefile_utils import recarray2shp - - shapelygeoms = [] - level = [] - - if not isinstance(contours, list): - contours = [contours] - - for c in contours: - levels = c.levels - for idx, col in enumerate(c.collections): - # Loop through all polygons that have the same intensity level - for contour_path in col.get_paths(): - # Create the polygon for this intensity level - # The first polygon in the path is the main one, the following - # ones are "holes" - for ncp, cp in enumerate(contour_path.to_polygons()): - x = cp[:, 0] - y = cp[:, 1] - new_shape = geometry.Polygon([(i[0], i[1]) - for i in zip(x, y)]) - if ncp == 0: - poly = new_shape - else: - # Remove the holes if there are any - poly = poly.difference(new_shape) - - # store shapely geometry object - shapelygeoms.append(poly) - level.append(levels[idx]) - - geoms = [] - for shpgeom in shapelygeoms: - xa, ya = shpgeom.exterior.coords.xy - interiors = [s.coords for s in shpgeom.interiors] - pg = Polygon([(x, y) for x, y in zip(xa, ya)], interiors=interiors) - geoms += [pg] - - print('Writing {} polygons'.format(len(level))) - - # Create recarray - ra = np.array(level, dtype=[(fieldname, float)]).view(np.recarray) - - recarray2shp(ra, geoms, filename, epsg=epsg, prj=prj, **kwargs) - return - - -def export_array_contours(modelgrid, filename, a, - fieldname='level', - interval=None, - levels=None, - maxlevels=1000, - epsg=None, - prj=None, - **kwargs): - """ - Contour an array using matplotlib; write shapefile of contours. - - Parameters - ---------- - modelgrid : flopy.discretization.Grid object - model grid object - filename : str - Path of output file with '.shp' extention. - a : 2D numpy array - Array to contour - fieldname : str - gis field name - interval : float - interval to calculate levels from - levels : list - list of contour levels - maxlevels : int - maximum number of contour levels - epsg : int - EPSG code. See https://www.epsg-registry.org/ or spatialreference.org - prj : str - Existing projection file to be used with new shapefile. - **kwargs : keyword arguments to flopy.export.shapefile_utils.recarray2shp - - """ - import matplotlib.pyplot as plt - - if epsg is None: - epsg = modelgrid.epsg - if prj is None: - prj = modelgrid.proj4 - - if interval is not None: - imin = np.nanmin(a) - imax = np.nanmax(a) - nlevels = np.round(np.abs(imax - imin) / interval, 2) - msg = '{:.0f} levels at interval of {} > maxlevels={}'.format( - nlevels, - interval, - maxlevels) - assert nlevels < maxlevels, msg - levels = np.arange(imin, imax, interval) - ax = plt.subplots()[-1] - ctr = contour_array(modelgrid, ax, a, levels=levels) - export_contours(modelgrid, filename, ctr, fieldname, epsg, prj, **kwargs) - plt.close() - - -def contour_array(modelgrid, ax, a, **kwargs): - """ - Create a QuadMesh plot of the specified array using pcolormesh - - Parameters - ---------- - modelgrid : flopy.discretization.Grid object - modelgrid object - ax : matplotlib.axes.Axes - ax to add the contours - - a : np.ndarray - array to contour - - Returns - ------- - contour_set : ContourSet - - """ - from ..plot import PlotMapView - - kwargs['ax'] = ax - pmv = PlotMapView(modelgrid=modelgrid) - contour_set = pmv.contour_array(a=a, **kwargs) - - return contour_set +from __future__ import print_function +import json +import os +import numpy as np +from ..utils import HeadFile, CellBudgetFile, UcnFile, FormattedHeadFile, \ + ZBNetOutput +from ..mbase import BaseModel, ModelInterface +from ..pakbase import PackageInterface +from ..datbase import DataType, DataInterface, DataListInterface +from . import NetCdf, netcdf +from . import shapefile_utils +from . import vtk + + +NC_PRECISION_TYPE = {np.float64: "f8", np.float32: "f4", np.int: "i4", + np.int64: "i4", np.int32: "i4"} + +path = os.path.split(netcdf.__file__)[0] +with open(path + '/longnames.json') as f: + NC_LONG_NAMES = json.load(f) +with open(path + '/unitsformat.json') as f: + NC_UNITS_FORMAT = json.load(f) + + +def ensemble_helper(inputs_filename, outputs_filename, models, add_reals=True, + **kwargs): + """ + Helper to export an ensemble of model instances. Assumes + all models have same dis and reference information, only difference is + properties and boundary conditions. Assumes model.nam.split('_')[-1] is + the realization suffix to use in the netcdf variable names + """ + f_in, f_out = None, None + for m in models[1:]: + assert m.get_nrow_ncol_nlay_nper() == models[ + 0].get_nrow_ncol_nlay_nper() + if inputs_filename is not None: + f_in = models[0].export(inputs_filename, **kwargs) + vdict = {} + vdicts = [models[0].export(vdict, **kwargs)] + i = 1 + for m in models[1:]: + suffix = m.name.split('.')[0].split('_')[-1] + vdict = {} + m.export(vdict, **kwargs) + vdicts.append(vdict) + if add_reals: + f_in.append(vdict, suffix=suffix) + i += 1 + mean, stdev = {}, {} + for vname in vdict.keys(): + alist = [] + for vd in vdicts: + alist.append(vd[vname]) + alist = np.array(alist) + mean[vname] = alist.mean(axis=0) + stdev[vname] = alist.std(axis=0) + mean[vname][vdict[vname] == netcdf.FILLVALUE] = netcdf.FILLVALUE + stdev[vname][vdict[vname] == netcdf.FILLVALUE] = netcdf.FILLVALUE + mean[vname][np.isnan(vdict[vname])] = netcdf.FILLVALUE + stdev[vname][np.isnan(vdict[vname])] = netcdf.FILLVALUE + + if i >= 2: + if not add_reals: + f_in.write() + f_in = NetCdf.empty_like(mean, output_filename=inputs_filename) + f_in.append(mean, suffix="**mean**") + f_in.append(stdev, suffix="**stdev**") + else: + f_in.append(mean, suffix="**mean**") + f_in.append(stdev, suffix="**stdev**") + f_in.add_global_attributes({"namefile": ''}) + + if outputs_filename is not None: + f_out = output_helper(outputs_filename, models[0], + models[0].load_results(as_dict=True), **kwargs) + vdict = {} + vdicts = [output_helper(vdict, models[0], models[0]. \ + load_results(as_dict=True), **kwargs)] + i = 1 + for m in models[1:]: + suffix = m.name.split('.')[0].split('_')[-1] + oudic = m.load_results(as_dict=True) + vdict = {} + output_helper(vdict, m, oudic, **kwargs) + vdicts.append(vdict) + if add_reals: + f_out.append(vdict, suffix=suffix) + i += 1 + + mean, stdev = {}, {} + for vname in vdict.keys(): + alist = [] + for vd in vdicts: + alist.append(vd[vname]) + alist = np.array(alist) + mean[vname] = alist.mean(axis=0) + stdev[vname] = alist.std(axis=0) + mean[vname][np.isnan(vdict[vname])] = netcdf.FILLVALUE + stdev[vname][np.isnan(vdict[vname])] = netcdf.FILLVALUE + mean[vname][vdict[vname] == netcdf.FILLVALUE] = netcdf.FILLVALUE + stdev[vname][vdict[vname] == netcdf.FILLVALUE] = netcdf.FILLVALUE + if i >= 2: + if not add_reals: + f_out.write() + f_out = NetCdf.empty_like(mean, + output_filename=outputs_filename) + f_out.append(mean, suffix="**mean**") + f_out.append(stdev, suffix="**stdev**") + + else: + f_out.append(mean, suffix="**mean**") + f_out.append(stdev, suffix="**stdev**") + f_out.add_global_attributes({"namefile": ''}) + return f_in, f_out + + +def _add_output_nc_variable(f, times, shape3d, out_obj, var_name, logger=None, + text='', mask_vals=(), mask_array3d=None): + if logger: + logger.log("creating array for {0}".format( + var_name)) + + array = np.zeros((len(times), shape3d[0], shape3d[1], shape3d[2]), + dtype=np.float32) + array[:] = np.NaN + + if isinstance(out_obj, ZBNetOutput): + a = np.asarray(out_obj.zone_array, dtype=np.float32) + if mask_array3d is not None: + a[mask_array3d] = np.NaN + for i, _ in enumerate(times): + array[i, :, :, :] = a + + else: + for i, t in enumerate(times): + if t in out_obj.recordarray["totim"]: + try: + if text: + a = out_obj.get_data(totim=t, full3D=True, text=text) + if isinstance(a, list): + a = a[0] + else: + a = out_obj.get_data(totim=t) + except Exception as e: + estr = "error getting data for {0} at time" \ + " {1}:{2}".format(var_name + + text.decode().strip().lower(), + t, str(e)) + if logger: + logger.warn(estr) + else: + print(estr) + continue + if mask_array3d is not None and a.shape == mask_array3d.shape: + a[mask_array3d] = np.NaN + try: + array[i, :, :, :] = a.astype(np.float32) + except Exception as e: + estr = "error assigning {0} data to array for time" \ + " {1}:{2}".format(var_name + + text.decode().strip().lower(), + t, str(e)) + if logger: + logger.warn(estr) + else: + print(estr) + continue + + if logger: + logger.log("creating array for {0}".format( + var_name)) + + for mask_val in mask_vals: + array[np.where(array == mask_val)] = np.NaN + mx, mn = np.nanmax(array), np.nanmin(array) + array[np.isnan(array)] = netcdf.FILLVALUE + + if isinstance(f, dict): + if text: + var_name = text.decode().strip().lower() + f[var_name] = array + return f + + units = None + if var_name in NC_UNITS_FORMAT: + units = NC_UNITS_FORMAT[var_name].format( + f.grid_units, f.time_units) + precision_str = "f4" + + if text: + var_name = text.decode().strip().lower() + attribs = {"long_name": var_name} + attribs["coordinates"] = "time layer latitude longitude" + attribs["min"] = mn + attribs["max"] = mx + if units is not None: + attribs["units"] = units + try: + dim_tuple = ("time",) + f.dimension_names + var = f.create_variable(var_name, attribs, + precision_str=precision_str, + dimensions=dim_tuple) + except Exception as e: + estr = "error creating variable {0}:\n{1}".format( + var_name, str(e)) + if logger: + logger.lraise(estr) + else: + raise Exception(estr) + + try: + var[:] = array + except Exception as e: + estr = "error setting array to variable {0}:\n{1}".format( + var_name, str(e)) + if logger: + logger.lraise(estr) + else: + raise Exception(estr) + + +def _add_output_nc_zonebudget_variable(f, array, var_name, flux, + logger=None): + """ + Method to add zonebudget output data to netcdf file + + Parameters + ---------- + f : NetCdf object + array : np.ndarray + zonebudget output budget group array + var_name : str + variable name + flux : bool + flag for flux data or volumetric data + logger : None or Logger + logger instance + + """ + if logger: + logger.log("creating array for {}".format(var_name)) + + mn = np.min(array) + mx = np.max(array) + + precision_str = "f4" + if flux: + units = "{}^3/{}".format(f.grid_units, f.time_units) + else: + units = "{}^3".format(f.grid_units) + attribs = {"long_name": var_name} + attribs["coordinates"] = "time zone" + attribs["min"] = mn + attribs["max"] = mx + attribs['units'] = units + dim_tuple = ('time', "zone") + + var = f.create_group_variable('zonebudget', var_name, attribs, + precision_str, dim_tuple) + + var[:] = array + +def output_helper(f, ml, oudic, **kwargs): + """ + Export model outputs using the model spatial reference info. + + Parameters + ---------- + f : str + filename for output - must have .shp or .nc extension + ml : flopy.mbase.ModelInterface derived type + oudic : dict + output_filename,flopy datafile/cellbudgetfile instance + **kwargs : keyword arguments + modelgrid : flopy.discretizaiton.Grid + user supplied model grid instance that will be used for export + in lieu of the models model grid instance + mflay : int + zero based model layer which can be used in shapefile exporting + kper : int + zero based stress period which can be used for shapefile exporting + + Returns + ------- + None + Note: + ---- + casts down double precision to single precision for netCDF files + + """ + assert isinstance(ml, (BaseModel, ModelInterface)) + assert len(oudic.keys()) > 0 + logger = kwargs.pop("logger", None) + stride = kwargs.pop("stride", 1) + forgive = kwargs.pop("forgive", False) + kwargs.pop("suffix", None) + mask_vals = [] + mflay = kwargs.pop('mflay', None) + kper = kwargs.pop('kper', None) + if "masked_vals" in kwargs: + mask_vals = kwargs.pop("masked_vals") + if len(kwargs) > 0 and logger is not None: + str_args = ','.join(kwargs) + logger.warn("unused kwargs: " + str_args) + + zonebud = None + zbkey = None + for key, value in oudic.items(): + if isinstance(value, ZBNetOutput): + zbkey = key + break + + if zbkey is not None: + zonebud = oudic.pop(zbkey) + + # ISSUE - need to round the totims in each output file instance so + # that they will line up + for key in oudic.keys(): + out = oudic[key] + times = [float("{0:15.6f}".format(t)) for t in + out.recordarray["totim"]] + out.recordarray["totim"] = times + + times = [] + for filename, df in oudic.items(): + for t in df.recordarray["totim"]: + if t not in times: + times.append(t) + + if zonebud is not None and not oudic: + if isinstance(f, NetCdf): + times = f.time_values_arg + else: + times = zonebud.time + + assert len(times) > 0 + times.sort() + + # rectify times - only use times that are common to every output file + common_times = [] + skipped_times = [] + for t in times: + keep = True + for filename, df in oudic.items(): + if isinstance(df, ZBNetOutput): + continue + if t not in df.recordarray["totim"]: + keep = False + break + if keep: + common_times.append(t) + else: + skipped_times.append(t) + + assert len(common_times) > 0 + if len(skipped_times) > 0: + if logger: + logger.warn("the following output times are not common to all" + \ + " output files and are being skipped:\n" + \ + "{0}".format(skipped_times)) + else: + print("the following output times are not common to all" + \ + " output files and are being skipped:\n" + \ + "{0}".format(skipped_times)) + times = [t for t in common_times[::stride]] + if isinstance(f, str) and f.lower().endswith(".nc"): + f = NetCdf(f, ml, time_values=times, logger=logger, + forgive=forgive, **kwargs) + elif isinstance(f, NetCdf): + otimes = list(f.nc.variables["time"][:]) + assert otimes == times + if isinstance(f, NetCdf) or isinstance(f, dict): + shape3d = (ml.modelgrid.nlay, ml.modelgrid.nrow, ml.modelgrid.ncol) + mask_array3d = None + if ml.hdry is not None: + mask_vals.append(ml.hdry) + if ml.hnoflo is not None: + mask_vals.append(ml.hnoflo) + + if ml.modelgrid.idomain is not None: + mask_array3d = ml.modelgrid.idomain == 0 + + for filename, out_obj in oudic.items(): + filename = filename.lower() + + if isinstance(out_obj, UcnFile): + _add_output_nc_variable(f, times, shape3d, out_obj, + "concentration", logger=logger, + mask_vals=mask_vals, + mask_array3d=mask_array3d) + + elif isinstance(out_obj, HeadFile): + _add_output_nc_variable(f, times, shape3d, out_obj, + out_obj.text.decode(), logger=logger, + mask_vals=mask_vals, + mask_array3d=mask_array3d) + + elif isinstance(out_obj, FormattedHeadFile): + _add_output_nc_variable(f, times, shape3d, out_obj, + out_obj.text, logger=logger, + mask_vals=mask_vals, + mask_array3d=mask_array3d) + + elif isinstance(out_obj, CellBudgetFile): + var_name = "cell_by_cell_flow" + for text in out_obj.textlist: + _add_output_nc_variable(f, times, shape3d, out_obj, + var_name, logger=logger, text=text, + mask_vals=mask_vals, + mask_array3d=mask_array3d) + + else: + estr = "unrecognized file extension:{0}".format(filename) + if logger: + logger.lraise(estr) + else: + raise Exception(estr) + + if zonebud is not None: + try: + f.initialize_group("zonebudget", + dimensions=('time', 'zone'), + dimension_data={'time': zonebud.time, + 'zone': zonebud.zones}) + except AttributeError: + pass + + for text, array in zonebud.arrays.items(): + _add_output_nc_zonebudget_variable(f, array, text, + zonebud.flux, + logger) + + # write the zone array to standard output + _add_output_nc_variable(f, times, shape3d, zonebud, + "budget_zones", logger=logger, + mask_vals=mask_vals, + mask_array3d=mask_array3d) + + elif isinstance(f, str) and f.endswith('.shp'): + attrib_dict = {} + for _, out_obj in oudic.items(): + + if isinstance(out_obj, HeadFile) or \ + isinstance(out_obj, FormattedHeadFile) or \ + isinstance(out_obj, UcnFile): + if isinstance(out_obj, UcnFile): + attrib_name = 'conc' + else: + attrib_name = 'head' + plotarray = np.atleast_3d(out_obj.get_alldata() + .transpose()).transpose() + + for per in range(plotarray.shape[0]): + for k in range(plotarray.shape[1]): + if kper is not None and per != kper: + continue + if mflay is not None and k != mflay: + continue + name = attrib_name + '{}_{}'.format(per, k) + attrib_dict[name] = plotarray[per][k] + + elif isinstance(out_obj, CellBudgetFile): + names = out_obj.get_unique_record_names(decode=True) + + for attrib_name in names: + plotarray = np.atleast_3d(out_obj.get_data( + text=attrib_name, + full3D=True)) + + attrib_name = attrib_name.strip() + if attrib_name == "FLOW RIGHT FACE": + attrib_name = 'FRF' + elif attrib_name == "FLOW FRONT FACE": + attrib_name = "FFF" + elif attrib_name == "FLOW LOWER FACE": + attrib_name = "FLF" + else: + pass + for per in range(plotarray.shape[0]): + for k in range(plotarray.shape[1]): + if kper is not None and per != kper: + continue + if mflay is not None and k != mflay: + continue + name = attrib_name + '{}_{}'.format(per, k) + attrib_dict[name] = plotarray[per][k] + + if attrib_dict: + shapefile_utils.write_grid_shapefile(f, ml.modelgrid, attrib_dict) + + else: + if logger: + logger.lraise("unrecognized export argument:{0}".format(f)) + else: + raise NotImplementedError("unrecognized export argument" + + ":{0}".format(f)) + return f + + +def model_export(f, ml, fmt=None, **kwargs): + """ + Method to export a model to a shapefile or netcdf file + + Parameters + ---------- + f : str + file name (".nc" for netcdf or ".shp" for shapefile) + or dictionary of .... + ml : flopy.modflow.mbase.ModelInterface object + flopy model object + fmt : str + output format flag. 'vtk' will export to vtk + **kwargs : keyword arguments + modelgrid: flopy.discretization.Grid + user supplied modelgrid object which will supercede the built + in modelgrid object + epsg : int + epsg projection code + prj : str + prj file name + if fmt is set to 'vtk', parameters of vtk.export_model + + """ + assert isinstance(ml, ModelInterface) + package_names = kwargs.get("package_names", None) + if package_names is None: + package_names = [pak.name[0] for pak in ml.packagelist] + + if isinstance(f, str) and f.lower().endswith(".nc"): + f = NetCdf(f, ml, **kwargs) + + if isinstance(f, str) and f.lower().endswith(".shp"): + shapefile_utils.model_attributes_to_shapefile(f, ml, + package_names=package_names, + **kwargs) + + elif isinstance(f, NetCdf): + + for pak in ml.packagelist: + if pak.name[0] in package_names: + f = package_export(f, pak, **kwargs) + assert f is not None + return f + + elif isinstance(f, dict): + for pak in ml.packagelist: + f = package_export(f, pak, **kwargs) + + elif fmt == 'vtk': + # call vtk model export + nanval = kwargs.get('nanval', -1e20) + smooth = kwargs.get('smooth', False) + point_scalars = kwargs.get('point_scalars', False) + vtk_grid_type = kwargs.get('vtk_grid_type', 'auto') + true2d = kwargs.get('true2d', False) + binary = kwargs.get('binary', False) + kpers = kwargs.get('kpers', None) + vtk.export_model(ml, f, package_names=package_names, nanval=nanval, + smooth=smooth, point_scalars=point_scalars, + vtk_grid_type=vtk_grid_type, true2d=true2d, + binary=binary, kpers=kpers) + + else: + raise NotImplementedError("unrecognized export argument:{0}".format(f)) + + return f + + +def package_export(f, pak, fmt=None, **kwargs): + """ + Method to export a package to shapefile or netcdf + + Parameters + ---------- + f : str + output file name (ends in .shp for shapefile or .nc for netcdf) + pak : flopy.pakbase.Package object + package to export + fmt : str + output format flag. 'vtk' will export to vtk + ** kwargs : keword arguments + modelgrid: flopy.discretization.Grid + user supplied modelgrid object which will supercede the built + in modelgrid object + epsg : int + epsg projection code + prj : str + prj file name + if fmt is set to 'vtk', parameters of vtk.export_package + + Returns + ------- + f : NetCdf object or None + + """ + assert isinstance(pak, PackageInterface) + if isinstance(f, str) and f.lower().endswith(".nc"): + f = NetCdf(f, pak.parent, **kwargs) + + if isinstance(f, str) and f.lower().endswith(".shp"): + shapefile_utils.model_attributes_to_shapefile(f, pak.parent, + package_names=pak.name, + **kwargs) + + elif isinstance(f, NetCdf) or isinstance(f, dict): + for a in pak.data_list: + if isinstance(a, DataInterface): + if a.array is not None: + if a.data_type == DataType.array2d \ + and len(a.array.shape) == 2 \ + and a.array.shape[1] > 0: + try: + f = array2d_export(f, a, **kwargs) + except: + f.logger.warn( + "error adding {0} as variable".format(a.name)) + elif a.data_type == DataType.array3d: + f = array3d_export(f, a, **kwargs) + elif a.data_type == DataType.transient2d: + f = transient2d_export(f, a, **kwargs) + elif a.data_type == DataType.transientlist: + f = mflist_export(f, a, **kwargs) + elif isinstance(a, list): + for v in a: + if isinstance(a, DataInterface) and \ + v.data_type == DataType.array3d: + f = array3d_export(f, v, **kwargs) + return f + + elif fmt == 'vtk': + # call vtk array export to folder + nanval = kwargs.get('nanval', -1e20) + smooth = kwargs.get('smooth', False) + point_scalars = kwargs.get('point_scalars', False) + vtk_grid_type = kwargs.get('vtk_grid_type', 'auto') + true2d = kwargs.get('true2d', False) + binary = kwargs.get('binary', False) + kpers = kwargs.get('kpers', None) + vtk.export_package(pak.parent, pak.name, f, nanval=nanval, + smooth=smooth, point_scalars=point_scalars, + vtk_grid_type=vtk_grid_type, true2d=true2d, + binary=binary, kpers=kpers) + + else: + raise NotImplementedError("unrecognized export argument:{0}".format(f)) + + +def generic_array_export(f, array, var_name="generic_array", + dimensions=("time", "layer", "y", "x"), + precision_str="f4", units="unitless", **kwargs): + """ + Method to export a generic array to NetCdf + + Parameters + ---------- + f : str + filename or existing export instance type (NetCdf only for now) + array : np.ndarray + var_name : str + variable name + dimensions : tuple + netcdf dimensions + precision_str : str + binary precision string, default "f4" + units : string + units of array data + **kwargs : keyword arguments + model : flopy.modflow.mbase + flopy model object + + """ + if isinstance(f, str) and f.lower().endswith(".nc"): + assert "model" in kwargs.keys(), "creating a new netCDF using " \ + "generic_array_helper requires a " \ + "'model' kwarg" + assert isinstance(kwargs["model"], BaseModel) + f = NetCdf(f, kwargs.pop("model"), **kwargs) + + assert array.ndim == len(dimensions), "generic_array_helper() " + \ + "array.ndim != dimensions" + coords_dims = {"time": "time", "layer": "layer", "y": "latitude", + "x": "longitude"} + coords = ' '.join([coords_dims[d] for d in dimensions]) + mn = kwargs.pop("min", -1.0e+9) + mx = kwargs.pop("max", 1.0e+9) + long_name = kwargs.pop("long_name", var_name) + if len(kwargs) > 0: + f.logger.warn("generic_array_helper(): unrecognized kwargs:" + \ + ",".join(kwargs.keys())) + attribs = {"long_name": long_name} + attribs["coordinates"] = coords + attribs["units"] = units + attribs["min"] = mn + attribs["max"] = mx + if np.isnan(attribs["min"]) or np.isnan(attribs["max"]): + raise Exception("error processing {0}: all NaNs".format(var_name)) + try: + var = f.create_variable(var_name, attribs, precision_str=precision_str, + dimensions=dimensions) + except Exception as e: + estr = "error creating variable {0}:\n{1}".format(var_name, str(e)) + f.logger.warn(estr) + raise Exception(estr) + try: + var[:] = array + except Exception as e: + estr = "error setting array to variable {0}:\n{1}".format(var_name, + str(e)) + f.logger.warn(estr) + raise Exception(estr) + return f + + +def mflist_export(f, mfl, **kwargs): + """ + export helper for MfList instances + + Parameters + ----------- + f : str + filename or existing export instance type (NetCdf only for now) + mfl : MfList instance + **kwargs : keyword arguments + modelgrid : flopy.discretization.Grid + model grid instance which will supercede the flopy.model.modelgrid + + """ + if not isinstance(mfl, (DataListInterface, DataInterface)): + err = "mflist_helper only helps instances that support " \ + "DataListInterface" + raise AssertionError(err) + + modelgrid = mfl.model.modelgrid + if "modelgrid" in kwargs: + modelgrid = kwargs.pop("modelgrid") + + if isinstance(f, str) and f.lower().endswith(".nc"): + f = NetCdf(f, mfl.model, **kwargs) + + if isinstance(f, str) and f.lower().endswith(".shp"): + sparse = kwargs.get("sparse", False) + kper = kwargs.get("kper", 0) + squeeze = kwargs.get("squeeze", True) + + if modelgrid is None: + raise Exception("MfList.to_shapefile: ModelGrid is not set") + elif modelgrid.grid_type == 'USG-Unstructured': + raise Exception('Flopy does not support exporting to shapefile ' + 'from a MODFLOW-USG unstructured grid.') + + if kper is None: + keys = mfl.data.keys() + keys.sort() + else: + keys = [kper] + if not sparse: + array_dict = {} + for kk in keys: + arrays = mfl.to_array(kk) + for name, array in arrays.items(): + for k in range(array.shape[0]): + # aname = name+"{0:03d}_{1:02d}".format(kk, k) + n = shapefile_utils.shape_attr_name(name, length=4) + aname = "{}{}{}".format(n, k + 1, int(kk) + 1) + array_dict[aname] = array[k] + shapefile_utils.write_grid_shapefile(f, modelgrid, array_dict) + else: + from ..export.shapefile_utils import recarray2shp + from ..utils.geometry import Polygon + + df = mfl.get_dataframe(squeeze=squeeze) + if 'kper' in kwargs or df is None: + ra = mfl[kper] + verts = np.array(modelgrid.get_cell_vertices(ra.i, ra.j)) + elif df is not None: + verts = np.array([modelgrid.get_cell_vertices(i, + df.j.values[ix]) + for ix, i in enumerate(df.i.values)]) + ra = df.to_records(index=False) + epsg = kwargs.get('epsg', None) + prj = kwargs.get('prj', None) + polys = np.array([Polygon(v) for v in verts]) + recarray2shp(ra, geoms=polys, shpname=f, + mg=modelgrid, epsg=epsg, prj=prj) + + elif isinstance(f, NetCdf) or isinstance(f, dict): + base_name = mfl.package.name[0].lower() + # f.log("getting 4D masked arrays for {0}".format(base_name)) + # m4d = mfl.masked_4D_arrays + # f.log("getting 4D masked arrays for {0}".format(base_name)) + + # for name, array in m4d.items(): + for name, array in mfl.masked_4D_arrays_itr(): + var_name = base_name + '_' + name + if isinstance(f, dict): + f[var_name] = array + continue + f.log("processing {0} attribute".format(name)) + + units = None + if var_name in NC_UNITS_FORMAT: + units = NC_UNITS_FORMAT[var_name].format(f.grid_units, + f.time_units) + precision_str = NC_PRECISION_TYPE[mfl.dtype[name].type] + if var_name in NC_LONG_NAMES: + attribs = {"long_name": NC_LONG_NAMES[var_name]} + else: + attribs = {"long_name": var_name} + attribs["coordinates"] = "time layer latitude longitude" + attribs["min"] = np.nanmin(array) + attribs["max"] = np.nanmax(array) + if np.isnan(attribs["min"]) or np.isnan(attribs["max"]): + raise Exception( + "error processing {0}: all NaNs".format(var_name)) + + if units is not None: + attribs["units"] = units + try: + dim_tuple = ("time",) + f.dimension_names + var = f.create_variable(var_name, attribs, + precision_str=precision_str, + dimensions=dim_tuple) + except Exception as e: + estr = "error creating variable {0}:\n{1}".format(var_name, + str(e)) + f.logger.warn(estr) + raise Exception(estr) + + array[np.isnan(array)] = f.fillvalue + try: + var[:] = array + except Exception as e: + estr = "error setting array to variable {0}:\n{1}".format( + var_name, str(e)) + f.logger.warn(estr) + raise Exception(estr) + f.log("processing {0} attribute".format(name)) + + return f + else: + raise NotImplementedError("unrecognized export argument:{0}".format(f)) + + +def transient2d_export(f, t2d, fmt=None, **kwargs): + """ + export helper for Transient2d instances + + Parameters + ----------- + f : str + filename or existing export instance type (NetCdf only for now) + t2d : Transient2d instance + fmt : str + output format flag. 'vtk' will export to vtk + **kwargs : keyword arguments + min_valid : minimum valid value + max_valid : maximum valid value + modelgrid : flopy.discretization.Grid + model grid instance which will supercede the flopy.model.modelgrid + if fmt is set to 'vtk', parameters of vtk.export_transient + + """ + + if not isinstance(t2d, DataInterface): + err = "transient2d_helper only helps instances that support " \ + "DataInterface" + raise AssertionError(err) + + min_valid = kwargs.get("min_valid", -1.0e+9) + max_valid = kwargs.get("max_valid", 1.0e+9) + + modelgrid = t2d.model.modelgrid + if 'modelgrid' in kwargs: + modelgrid = kwargs.pop("modelgrid") + + if isinstance(f, str) and f.lower().endswith(".nc"): + f = NetCdf(f, t2d.model, **kwargs) + + if isinstance(f, str) and f.lower().endswith(".shp"): + array_dict = {} + for kper in range(t2d.model.modeltime.nper): + u2d = t2d[kper] + name = '{}_{}'.format(shapefile_utils.shape_attr_name(u2d.name), + kper + 1) + array_dict[name] = u2d.array + shapefile_utils.write_grid_shapefile(f, modelgrid, array_dict) + + elif isinstance(f, NetCdf) or isinstance(f, dict): + # mask the array is defined by any row col with at lease + # one active cell + mask = None + if modelgrid.idomain is not None: + ibnd = np.abs(modelgrid.idomain).sum(axis=0) + mask = ibnd == 0 + + # f.log("getting 4D array for {0}".format(t2d.name_base)) + array = t2d.array + # f.log("getting 4D array for {0}".format(t2d.name_base)) + with np.errstate(invalid="ignore"): + if array.dtype not in [int, np.int, np.int32, np.int64]: + if mask is not None: + array[:, 0, mask] = np.NaN + array[array <= min_valid] = np.NaN + array[array >= max_valid] = np.NaN + mx, mn = np.nanmax(array), np.nanmin(array) + else: + mx, mn = np.nanmax(array), np.nanmin(array) + array[array <= min_valid] = netcdf.FILLVALUE + array[array >= max_valid] = netcdf.FILLVALUE + # if t2d.model.bas6 is not None: + # array[:, 0, t2d.model.bas6.ibound.array[0] == 0] = \ + # f.fillvalue + # elif t2d.model.btn is not None: + # array[:, 0, t2d.model.btn.icbund.array[0] == 0] = \ + # f.fillvalue + + var_name = t2d.name.replace('_', '') + if isinstance(f, dict): + array[array == netcdf.FILLVALUE] = np.NaN + f[var_name] = array + return f + + array[np.isnan(array)] = f.fillvalue + units = "unitless" + + if var_name in NC_UNITS_FORMAT: + units = NC_UNITS_FORMAT[var_name].format(f.grid_units, + f.time_units) + try: + precision_str = NC_PRECISION_TYPE[t2d.dtype] + except: + precision_str = NC_PRECISION_TYPE[t2d.dtype.type] + if var_name in NC_LONG_NAMES: + attribs = {"long_name": NC_LONG_NAMES[var_name]} + else: + attribs = {"long_name": var_name} + attribs["coordinates"] = "time layer latitude longitude" + attribs["units"] = units + attribs["min"] = mn + attribs["max"] = mx + if np.isnan(attribs["min"]) or np.isnan(attribs["max"]): + raise Exception("error processing {0}: all NaNs".format(var_name)) + try: + dim_tuple = ("time",) + f.dimension_names + var = f.create_variable(var_name, attribs, + precision_str=precision_str, + dimensions=dim_tuple) + except Exception as e: + estr = "error creating variable {0}:\n{1}".format(var_name, str(e)) + f.logger.warn(estr) + raise Exception(estr) + try: + var[:, 0] = array + except Exception as e: + estr = "error setting array to variable {0}:\n{1}".format(var_name, + str(e)) + f.logger.warn(estr) + raise Exception(estr) + return f + + elif fmt == 'vtk': + name = kwargs.get('name', t2d.name) + nanval = kwargs.get('nanval', -1e20) + smooth = kwargs.get('smooth', False) + point_scalars = kwargs.get('point_scalars', False) + vtk_grid_type = kwargs.get('vtk_grid_type', 'auto') + true2d = kwargs.get('true2d', False) + binary = kwargs.get('binary', False) + kpers = kwargs.get('kpers', None) + vtk.export_transient(t2d.model, t2d.array, f, name, nanval=nanval, + smooth=smooth, point_scalars=point_scalars, + array2d=True, vtk_grid_type=vtk_grid_type, + true2d=true2d, binary=binary, kpers=kpers) + else: + raise NotImplementedError("unrecognized export argument:{0}".format(f)) + + +def array3d_export(f, u3d, fmt=None, **kwargs): + """ + export helper for Transient2d instances + + Parameters + ----------- + f : str + filename or existing export instance type (NetCdf only for now) + u3d : Util3d instance + fmt : str + output format flag. 'vtk' will export to vtk + **kwargs : keyword arguments + min_valid : minimum valid value + max_valid : maximum valid value + modelgrid : flopy.discretization.Grid + model grid instance which will supercede the flopy.model.modelgrid + if fmt is set to 'vtk', parameters of vtk.export_array + + """ + + assert isinstance(u3d, DataInterface), "array3d_export only helps " \ + "instances that support " \ + "DataInterface" + + min_valid = kwargs.get("min_valid", -1.0e+9) + max_valid = kwargs.get("max_valid", 1.0e+9) + + modelgrid = u3d.model.modelgrid + if "modelgrid" in kwargs: + modelgrid = kwargs.pop("modelgrid") + + if isinstance(f, str) and f.lower().endswith(".nc"): + f = NetCdf(f, u3d.model, **kwargs) + + if isinstance(f, str) and f.lower().endswith(".shp"): + array_dict = {} + for ilay in range(modelgrid.nlay): + u2d = u3d[ilay] + if isinstance(u2d, np.ndarray): + dname = u3d.name + array = u2d + else: + dname = u2d.name + array = u2d.array + name = '{}_{}'.format( + shapefile_utils.shape_attr_name(dname), ilay + 1) + array_dict[name] = array + shapefile_utils.write_grid_shapefile(f, modelgrid, array_dict) + + elif isinstance(f, NetCdf) or isinstance(f, dict): + var_name = u3d.name + if isinstance(var_name, list) or isinstance(var_name, tuple): + var_name = var_name[0] + var_name = var_name.replace(' ', '_').lower() + # f.log("getting 3D array for {0}".format(var_name)) + array = u3d.array + + # this is for the crappy vcont in bcf6 + # if isinstance(f,NetCdf) and array.shape != f.shape: + # f.log("broadcasting 3D array for {0}".format(var_name)) + # full_array = np.empty(f.shape) + # full_array[:] = np.NaN + # full_array[:array.shape[0]] = array + # array = full_array + # f.log("broadcasting 3D array for {0}".format(var_name)) + # f.log("getting 3D array for {0}".format(var_name)) + # + mask = None + if modelgrid.idomain is not None and "ibound" not in var_name: + mask = modelgrid.idomain == 0 + + if mask is not None and array.shape != mask.shape: + # f.log("broadcasting 3D array for {0}".format(var_name)) + full_array = np.empty(mask.shape) + full_array[:] = np.NaN + full_array[:array.shape[0]] = array + array = full_array + # f.log("broadcasting 3D array for {0}".format(var_name)) + + # runtime warning issued in some cases - need to track down cause + # happens when NaN is already in array + with np.errstate(invalid="ignore"): + if array.dtype not in [int, np.int, np.int32, np.int64]: + # if u3d.model.modelgrid.bas6 is not None and "ibound" not + # in var_name: + # array[u3d.model.modelgrid.bas6.ibound.array == 0] = + # np.NaN + # elif u3d.model.btn is not None and 'icbund' not in var_name: + # array[u3d.model.modelgrid.btn.icbund.array == 0] = np.NaN + if mask is not None: + array[mask] = np.NaN + array[array <= min_valid] = np.NaN + array[array >= max_valid] = np.NaN + mx, mn = np.nanmax(array), np.nanmin(array) + else: + mx, mn = np.nanmax(array), np.nanmin(array) + if mask is not None: + array[mask] = netcdf.FILLVALUE + array[array <= min_valid] = netcdf.FILLVALUE + array[array >= max_valid] = netcdf.FILLVALUE + if modelgrid.idomain is not None and "ibound" not in var_name: + array[modelgrid.idomain == 0] = netcdf.FILLVALUE + + if isinstance(f, dict): + f[var_name] = array + return f + + array[np.isnan(array)] = f.fillvalue + units = "unitless" + if var_name in NC_UNITS_FORMAT: + units = NC_UNITS_FORMAT[var_name].format(f.grid_units, + f.time_units) + precision_str = NC_PRECISION_TYPE[u3d.dtype] + if var_name in NC_LONG_NAMES: + attribs = {"long_name": NC_LONG_NAMES[var_name]} + else: + attribs = {"long_name": var_name} + attribs["coordinates"] = "layer latitude longitude" + attribs["units"] = units + attribs["min"] = mn + attribs["max"] = mx + if np.isnan(attribs["min"]) or np.isnan(attribs["max"]): + raise Exception("error processing {0}: all NaNs".format(var_name)) + try: + var = f.create_variable(var_name, attribs, + precision_str=precision_str, + dimensions=f.dimension_names) + except Exception as e: + estr = "error creating variable {0}:\n{1}".format(var_name, str(e)) + f.logger.warn(estr) + raise Exception(estr) + try: + var[:] = array + except Exception as e: + estr = "error setting array to variable {0}:\n{1}".format(var_name, + str(e)) + f.logger.warn(estr) + raise Exception(estr) + return f + + elif fmt == 'vtk': + # call vtk array export to folder + name = kwargs.get('name', u3d.name) + nanval = kwargs.get('nanval', -1e20) + smooth = kwargs.get('smooth', False) + point_scalars = kwargs.get('point_scalars', False) + vtk_grid_type = kwargs.get('vtk_grid_type', 'auto') + true2d = kwargs.get('true2d', False) + binary = kwargs.get('binary', False) + if isinstance(name, list) or isinstance(name, tuple): + name = name[0] + + vtk.export_array(u3d.model, u3d.array, f, name, nanval=nanval, + smooth=smooth, point_scalars=point_scalars, + vtk_grid_type=vtk_grid_type, true2d=true2d, + binary=binary) + + else: + raise NotImplementedError("unrecognized export argument:{0}".format(f)) + + +def array2d_export(f, u2d, fmt=None, **kwargs): + """ + export helper for Util2d instances + + Parameters + ---------- + f : str + filename or existing export instance type (NetCdf only for now) + u2d : Util2d instance + fmt : str + output format flag. 'vtk' will export to vtk + **kwargs : keyword arguments + min_valid : minimum valid value + max_valid : maximum valid value + modelgrid : flopy.discretization.Grid + model grid instance which will supercede the flopy.model.modelgrid + if fmt is set to 'vtk', parameters of vtk.export_array + + """ + assert isinstance(u2d, DataInterface), "util2d_helper only helps " \ + "instances that support " \ + "DataInterface" + assert len(u2d.array.shape) == 2, "util2d_helper only supports 2D arrays" + + min_valid = kwargs.get("min_valid", -1.0e+9) + max_valid = kwargs.get("max_valid", 1.0e+9) + + modelgrid = u2d.model.modelgrid + if "modelgrid" in kwargs: + modelgrid = kwargs.pop("modelgrid") + + if isinstance(f, str) and f.lower().endswith(".nc"): + f = NetCdf(f, u2d.model, **kwargs) + + if isinstance(f, str) and f.lower().endswith(".shp"): + name = shapefile_utils.shape_attr_name(u2d.name, keep_layer=True) + shapefile_utils.write_grid_shapefile(f, modelgrid, + {name: u2d.array}) + return + + elif isinstance(f, str) and f.lower().endswith(".asc"): + export_array(modelgrid, f, u2d.array, **kwargs) + return + + elif isinstance(f, NetCdf) or isinstance(f, dict): + + # try to mask the array - assume layer 1 ibound is a good mask + # f.log("getting 2D array for {0}".format(u2d.name)) + array = u2d.array + # f.log("getting 2D array for {0}".format(u2d.name)) + + with np.errstate(invalid="ignore"): + if array.dtype not in [int, np.int, np.int32, np.int64]: + if modelgrid.idomain is not None and \ + "ibound" not in u2d.name.lower() and \ + "idomain" not in u2d.name.lower(): + array[modelgrid.idomain[0, :, :] == 0] = np.NaN + array[array <= min_valid] = np.NaN + array[array >= max_valid] = np.NaN + mx, mn = np.nanmax(array), np.nanmin(array) + else: + mx, mn = np.nanmax(array), np.nanmin(array) + array[array <= min_valid] = netcdf.FILLVALUE + array[array >= max_valid] = netcdf.FILLVALUE + if modelgrid.idomain is not None and \ + "ibound" not in u2d.name.lower() and \ + "idomain" not in u2d.name.lower() and \ + "icbund" not in u2d.name.lower(): + array[modelgrid.idomain[0, :, :] == 0] = \ + netcdf.FILLVALUE + var_name = u2d.name + if isinstance(f, dict): + f[var_name] = array + return f + + array[np.isnan(array)] = f.fillvalue + units = "unitless" + + if var_name in NC_UNITS_FORMAT: + units = NC_UNITS_FORMAT[var_name].format(f.grid_units, + f.time_units) + precision_str = NC_PRECISION_TYPE[u2d.dtype] + if var_name in NC_LONG_NAMES: + attribs = {"long_name": NC_LONG_NAMES[var_name]} + else: + attribs = {"long_name": var_name} + attribs["coordinates"] = "latitude longitude" + attribs["units"] = units + attribs["min"] = mn + attribs["max"] = mx + if np.isnan(attribs["min"]) or np.isnan(attribs["max"]): + raise Exception("error processing {0}: all NaNs".format(var_name)) + try: + var = f.create_variable(var_name, attribs, + precision_str=precision_str, + dimensions=f.dimension_names[1:]) + except Exception as e: + estr = "error creating variable {0}:\n{1}".format(var_name, str(e)) + f.logger.warn(estr) + raise Exception(estr) + try: + var[:] = array + except Exception as e: + estr = "error setting array to variable {0}:\n{1}".format(var_name, + str(e)) + f.logger.warn(estr) + raise Exception(estr) + return f + + elif fmt == 'vtk': + + # call vtk array export to folder + name = kwargs.get('name', u2d.name) + nanval = kwargs.get('nanval', -1e20) + smooth = kwargs.get('smooth', False) + point_scalars = kwargs.get('point_scalars', False) + vtk_grid_type = kwargs.get('vtk_grid_type', 'auto') + true2d = kwargs.get('true2d', False) + binary = kwargs.get('binary', False) + vtk.export_array(u2d.model, u2d.array, f, name, nanval=nanval, + smooth=smooth, point_scalars=point_scalars, + array2d=True, vtk_grid_type=vtk_grid_type, + true2d=true2d, binary=binary) + + else: + raise NotImplementedError("unrecognized export argument:{0}".format(f)) + + +def export_array(modelgrid, filename, a, nodata=-9999, + fieldname='value', **kwargs): + """ + Write a numpy array to Arc Ascii grid or shapefile with the model + reference. + + Parameters + ---------- + modelgrid : flopy.discretization.StructuredGrid object + model grid + filename : str + Path of output file. Export format is determined by + file extention. + '.asc' Arc Ascii grid + '.tif' GeoTIFF (requries rasterio package) + '.shp' Shapefile + a : 2D numpy.ndarray + Array to export + nodata : scalar + Value to assign to np.nan entries (default -9999) + fieldname : str + Attribute field name for array values (shapefile export only). + (default 'values') + kwargs: + keyword arguments to np.savetxt (ascii) + rasterio.open (GeoTIFF) + or flopy.export.shapefile_utils.write_grid_shapefile2 + + Notes + ----- + Rotated grids will be either be unrotated prior to export, + using scipy.ndimage.rotate (Arc Ascii format) or rotation will be + included in their transform property (GeoTiff format). In either case + the pixels will be displayed in the (unrotated) projected geographic + coordinate system, so the pixels will no longer align exactly with the + model grid (as displayed from a shapefile, for example). A key difference + between Arc Ascii and GeoTiff (besides disk usage) is that the + unrotated Arc Ascii will have a different grid size, whereas the GeoTiff + will have the same number of rows and pixels as the original. + + """ + + if filename.lower().endswith(".asc"): + if len(np.unique(modelgrid.delr)) != \ + len(np.unique(modelgrid.delc)) != 1 \ + or modelgrid.delr[0] != modelgrid.delc[0]: + raise ValueError('Arc ascii arrays require a uniform grid.') + + xoffset, yoffset = modelgrid.xoffset, modelgrid.yoffset + cellsize = modelgrid.delr[0] + fmt = kwargs.get('fmt', '%.18e') + a = a.copy() + a[np.isnan(a)] = nodata + if modelgrid.angrot != 0: + try: + from scipy.ndimage import rotate + except ImportError: + rotate = None + print('scipy package required to export rotated grid.') + + if rotate is not None: + a = rotate(a, modelgrid.angrot, cval=nodata) + height_rot, width_rot = a.shape + xmin, ymin, xmax, ymax = modelgrid.extent + dx = (xmax - xmin) / width_rot + dy = (ymax - ymin) / height_rot + cellsize = np.max((dx, dy)) + xoffset, yoffset = xmin, ymin + + filename = '.'.join( + filename.split('.')[:-1]) + '.asc' # enforce .asc ending + nrow, ncol = a.shape + a[np.isnan(a)] = nodata + txt = 'ncols {:d}\n'.format(ncol) + txt += 'nrows {:d}\n'.format(nrow) + txt += 'xllcorner {:f}\n'.format(xoffset) + txt += 'yllcorner {:f}\n'.format(yoffset) + txt += 'cellsize {}\n'.format(cellsize) + # ensure that nodata fmt consistent w values + txt += 'NODATA_value {}\n'.format(fmt) % (nodata) + with open(filename, 'w') as output: + output.write(txt) + with open(filename, 'ab') as output: + np.savetxt(output, a, **kwargs) + print('wrote {}'.format(filename)) + + elif filename.lower().endswith(".tif"): + if len(np.unique(modelgrid.delr)) != \ + len(np.unique(modelgrid.delc)) != 1 \ + or modelgrid.delr[0] != modelgrid.delc[0]: + raise ValueError('GeoTIFF export require a uniform grid.') + try: + import rasterio + from rasterio import Affine + except ImportError: + print('GeoTIFF export requires the rasterio package.') + return + dxdy = modelgrid.delc[0] + # because this is only implemented for a structured grid, + # we can get the xul and yul coordinate from modelgrid.xvertices(0, 0) + verts = modelgrid.get_cell_vertices(0, 0) + xul, yul = verts[0] + trans = Affine.translation(xul, yul) * \ + Affine.rotation(modelgrid.angrot) * \ + Affine.scale(dxdy, -dxdy) + + # third dimension is the number of bands + a = a.copy() + if len(a.shape) == 2: + a = np.reshape(a, (1, a.shape[0], a.shape[1])) + if a.dtype.name == 'int64': + a = a.astype('int32') + dtype = rasterio.int32 + elif a.dtype.name == 'int32': + dtype = rasterio.int32 + elif a.dtype.name == 'float64': + dtype = rasterio.float64 + elif a.dtype.name == 'float32': + dtype = rasterio.float32 + else: + msg = 'ERROR: invalid dtype "{}"'.format(a.dtype.name) + raise TypeError(msg) + + meta = {'count': a.shape[0], + 'width': a.shape[2], + 'height': a.shape[1], + 'nodata': nodata, + 'dtype': dtype, + 'driver': 'GTiff', + 'crs': modelgrid.proj4, + 'transform': trans + } + meta.update(kwargs) + with rasterio.open(filename, 'w', **meta) as dst: + dst.write(a) + print('wrote {}'.format(filename)) + + elif filename.lower().endswith(".shp"): + from ..export.shapefile_utils import write_grid_shapefile + epsg = kwargs.get('epsg', None) + prj = kwargs.get('prj', None) + if epsg is None and prj is None: + epsg = modelgrid.epsg + write_grid_shapefile(filename, modelgrid, array_dict={fieldname: a}, + nan_val=nodata, + epsg=epsg, prj=prj) + + +def export_contours(modelgrid, filename, contours, + fieldname='level', epsg=None, prj=None, + **kwargs): + """ + Convert matplotlib contour plot object to shapefile. + + Parameters + ---------- + modelgrid : flopy.discretization.Grid + flopy modelgrid instance + filename : str + path of output shapefile + contours : matplotlib.contour.QuadContourSet or list of them + (object returned by matplotlib.pyplot.contour) + fieldname : str + gis attribute table field name + epsg : int + EPSG code. See https://www.epsg-registry.org/ or spatialreference.org + prj : str + Existing projection file to be used with new shapefile. + **kwargs : key-word arguments to flopy.export.shapefile_utils.recarray2shp + + Returns + ------- + df : dataframe of shapefile contents + + """ + from ..utils.geometry import LineString + from .shapefile_utils import recarray2shp + + if not isinstance(contours, list): + contours = [contours] + + if epsg is None: + epsg = modelgrid.epsg + if prj is None: + prj = modelgrid.proj4 + + geoms = [] + level = [] + for ctr in contours: + levels = ctr.levels + for i, c in enumerate(ctr.collections): + paths = c.get_paths() + geoms += [LineString(p.vertices) for p in paths] + level += list(np.ones(len(paths)) * levels[i]) + + # convert the dictionary to a recarray + ra = np.array(level, + dtype=[(fieldname, float)]).view(np.recarray) + + recarray2shp(ra, geoms, filename, epsg=epsg, prj=prj, **kwargs) + return + + +def export_contourf(filename, contours, fieldname='level', epsg=None, + prj=None, **kwargs): + """ + Write matplotlib filled contours to shapefile. This utility requires + that shapely is installed. + + Parameters + ---------- + filename : str + name of output shapefile (e.g. myshp.shp) + contours : matplotlib.contour.QuadContourSet or list of them + (object returned by matplotlib.pyplot.contourf) + fieldname : str + Name of shapefile attribute field to contain the contour level. The + fieldname column in the attribute table will contain the lower end of + the range represented by the polygon. Default is 'level'. + epsg : int + EPSG code. See https://www.epsg-registry.org/ or spatialreference.org + prj : str + Existing projection file to be used with new shapefile. + + **kwargs : keyword arguments to flopy.export.shapefile_utils.recarray2shp + + Returns + ------- + None + + Examples + -------- + >>> import flopy + >>> import matplotlib.pyplot as plt + >>> from flopy.export.utils import export_contourf + >>> a = np.random.random((10, 10)) + >>> cs = plt.contourf(a) + >>> export_contourf('myfilledcontours.shp', cs) + + """ + + try: + from shapely import geometry + except (ImportError, ModuleNotFoundError): + raise ImportError('export_contourf requires python shapely package') + + from ..utils.geometry import Polygon + from .shapefile_utils import recarray2shp + + shapelygeoms = [] + level = [] + + if not isinstance(contours, list): + contours = [contours] + + for c in contours: + levels = c.levels + for idx, col in enumerate(c.collections): + # Loop through all polygons that have the same intensity level + for contour_path in col.get_paths(): + # Create the polygon for this intensity level + # The first polygon in the path is the main one, the following + # ones are "holes" + for ncp, cp in enumerate(contour_path.to_polygons()): + x = cp[:, 0] + y = cp[:, 1] + new_shape = geometry.Polygon([(i[0], i[1]) + for i in zip(x, y)]) + if ncp == 0: + poly = new_shape + else: + # Remove the holes if there are any + poly = poly.difference(new_shape) + + # store shapely geometry object + shapelygeoms.append(poly) + level.append(levels[idx]) + + geoms = [] + for shpgeom in shapelygeoms: + xa, ya = shpgeom.exterior.coords.xy + interiors = [s.coords for s in shpgeom.interiors] + pg = Polygon([(x, y) for x, y in zip(xa, ya)], interiors=interiors) + geoms += [pg] + + print('Writing {} polygons'.format(len(level))) + + # Create recarray + ra = np.array(level, dtype=[(fieldname, float)]).view(np.recarray) + + recarray2shp(ra, geoms, filename, epsg=epsg, prj=prj, **kwargs) + return + + +def export_array_contours(modelgrid, filename, a, + fieldname='level', + interval=None, + levels=None, + maxlevels=1000, + epsg=None, + prj=None, + **kwargs): + """ + Contour an array using matplotlib; write shapefile of contours. + + Parameters + ---------- + modelgrid : flopy.discretization.Grid object + model grid object + filename : str + Path of output file with '.shp' extention. + a : 2D numpy array + Array to contour + fieldname : str + gis field name + interval : float + interval to calculate levels from + levels : list + list of contour levels + maxlevels : int + maximum number of contour levels + epsg : int + EPSG code. See https://www.epsg-registry.org/ or spatialreference.org + prj : str + Existing projection file to be used with new shapefile. + **kwargs : keyword arguments to flopy.export.shapefile_utils.recarray2shp + + """ + import matplotlib.pyplot as plt + + if epsg is None: + epsg = modelgrid.epsg + if prj is None: + prj = modelgrid.proj4 + + if interval is not None: + imin = np.nanmin(a) + imax = np.nanmax(a) + nlevels = np.round(np.abs(imax - imin) / interval, 2) + msg = '{:.0f} levels at interval of {} > maxlevels={}'.format( + nlevels, + interval, + maxlevels) + assert nlevels < maxlevels, msg + levels = np.arange(imin, imax, interval) + ax = plt.subplots()[-1] + ctr = contour_array(modelgrid, ax, a, levels=levels) + export_contours(modelgrid, filename, ctr, fieldname, epsg, prj, **kwargs) + plt.close() + + +def contour_array(modelgrid, ax, a, **kwargs): + """ + Create a QuadMesh plot of the specified array using pcolormesh + + Parameters + ---------- + modelgrid : flopy.discretization.Grid object + modelgrid object + ax : matplotlib.axes.Axes + ax to add the contours + + a : np.ndarray + array to contour + + Returns + ------- + contour_set : ContourSet + + """ + from ..plot import PlotMapView + + kwargs['ax'] = ax + pmv = PlotMapView(modelgrid=modelgrid) + contour_set = pmv.contour_array(a=a, **kwargs) + + return contour_set diff --git a/flopy/mbase.py b/flopy/mbase.py index 974a5de476..cf1027e294 100644 --- a/flopy/mbase.py +++ b/flopy/mbase.py @@ -1,1722 +1,1722 @@ -""" -mbase module - This module contains the base model class from which - all of the other models inherit from. - -""" - -from __future__ import print_function -import abc -import sys -import os -import shutil -import threading -import warnings -import queue as Queue - -from datetime import datetime -from shutil import which -from subprocess import Popen, PIPE, STDOUT -import copy -import numpy as np -from flopy import utils, discretization -from .version import __version__ -from .discretization.modeltime import ModelTime -from .discretization.grid import Grid - -# Global variables -iconst = 1 # Multiplier for individual array elements in integer and real arrays read by MODFLOW's U2DREL, U1DREL and U2DINT. -iprn = -1 # Printout flag. If >= 0 then array values read are printed in listing file. - - -class FileDataEntry(object): - def __init__(self, fname, unit, binflag=False, output=False, package=None): - self.fname = fname - self.unit = unit - self.binflag = binflag - self.output = output - self.package = package - - -class FileData(object): - def __init__(self): - self.file_data = [] - return - - def add_file(self, fname, unit, binflag=False, output=False, package=None): - ipop = [] - for idx, file_data in enumerate(self.file_data): - if file_data.fname == fname or file_data.unit == unit: - ipop.append(idx) - - self.file_data.append(FileDataEntry(fname, unit, binflag=binflag, - output=output, package=package)) - return - - -class ModelInterface(object): - def __init__(self): - self._mg_resync = True - self._modelgrid = None - - def update_modelgrid(self): - if self._modelgrid is not None: - self._modelgrid = Grid(proj4=self._modelgrid.proj4, - xoff=self._modelgrid.xoffset, - yoff=self._modelgrid.yoffset, - angrot=self._modelgrid.angrot) - self._mg_resync = True - - @property - @abc.abstractmethod - def modelgrid(self): - raise NotImplementedError( - 'must define modelgrid in child ' - 'class to use this base class') - - @property - @abc.abstractmethod - def packagelist(self): - raise NotImplementedError( - 'must define packagelist in child ' - 'class to use this base class') - - @property - @abc.abstractmethod - def namefile(self): - raise NotImplementedError( - 'must define namefile in child ' - 'class to use this base class') - - @property - @abc.abstractmethod - def model_ws(self): - raise NotImplementedError( - 'must define model_ws in child ' - 'class to use this base class') - - @property - @abc.abstractmethod - def exename(self): - raise NotImplementedError( - 'must define exename in child ' - 'class to use this base class') - - @property - @abc.abstractmethod - def version(self): - raise NotImplementedError( - 'must define version in child ' - 'class to use this base class') - - @property - @abc.abstractmethod - def solver_tols(self): - raise NotImplementedError( - 'must define version in child ' - 'class to use this base class') - - @abc.abstractmethod - def export(self, f, **kwargs): - raise NotImplementedError( - 'must define export in child ' - 'class to use this base class') - - @property - @abc.abstractmethod - def laytyp(self): - raise NotImplementedError( - 'must define laytyp in child ' - 'class to use this base class') - - @property - @abc.abstractmethod - def hdry(self): - raise NotImplementedError( - 'must define hdry in child ' - 'class to use this base class') - - @property - @abc.abstractmethod - def hnoflo(self): - raise NotImplementedError( - 'must define hnoflo in child ' - 'class to use this base class') - - @property - @abc.abstractmethod - def laycbd(self): - raise NotImplementedError( - 'must define laycbd in child ' - 'class to use this base class') - - @property - @abc.abstractmethod - def verbose(self): - raise NotImplementedError( - 'must define verbose in child ' - 'class to use this base class') - - @abc.abstractmethod - def check(self, f=None, verbose=True, level=1): - raise NotImplementedError( - 'must define check in child ' - 'class to use this base class') - - def get_package_list(self, ftype=None): - """ - Get a list of all the package names. - - Parameters - ---------- - ftype : str - Type of package, 'RIV', 'LPF', etc. - - Returns - ------- - val : list of strings - Can be used to see what packages are in the model, and can then - be used with get_package to pull out individual packages. - - """ - val = [] - for pp in (self.packagelist): - if ftype is None: - val.append(pp.name[0].upper()) - elif pp.package_type.lower() == ftype: - val.append(pp.name[0].upper()) - return val - - def _check(self, chk, level=1): - """ - Check model data for common errors. - - Parameters - ---------- - f : str or file handle - String defining file name or file handle for summary file - of check method output. If a string is passed a file handle - is created. If f is None, check method does not write - results to a summary file. (default is None) - verbose : bool - Boolean flag used to determine if check method results are - written to the screen - level : int - Check method analysis level. If level=0, summary checks are - performed. If level=1, full checks are performed. - summarize : bool - Boolean flag used to determine if summary of results is written - to the screen - - Returns - ------- - None - - Examples - -------- - - >>> import flopy - >>> m = flopy.modflow.Modflow.load('model.nam') - >>> m.check() - """ - - # check instance for model-level check - results = {} - - for p in self.packagelist: - if chk.package_check_levels.get(p.name[0].lower(), 0) <= level: - results[p.name[0]] = p.check(f=None, verbose=False, - level=level - 1, - checktype=chk.__class__) - - # model level checks - # solver check - if self.version in chk.solver_packages.keys(): - solvers = set(chk.solver_packages[self.version]).intersection( - set(self.get_package_list())) - if not solvers: - chk._add_to_summary('Error', desc='\r No solver package', - package='model') - elif len(list(solvers)) > 1: - for s in solvers: - chk._add_to_summary('Error', - desc='\r Multiple solver packages', - package=s) - else: - chk.passed.append('Compatible solver package') - - # add package check results to model level check summary - for r in results.values(): - if r is not None and r.summary_array is not None: # currently SFR doesn't have one - chk.summary_array = np.append(chk.summary_array, - r.summary_array).view( - np.recarray) - chk.passed += ['{} package: {}'.format(r.package.name[0], psd) - for psd in r.passed] - chk.summarize() - return chk - - -class BaseModel(ModelInterface): - """ - MODFLOW based models base class - - Parameters - ---------- - - modelname : string - Name of the model. Model files will be given this name. (default is - 'modflowtest' - - namefile_ext : string - name file extension (default is 'nam') - - exe_name : string - name of the modflow executable - - model_ws : string - Path to the model workspace. Model files will be created in this - directory. Default is None, in which case model_ws is assigned - to the current working directory. - - """ - - def __init__(self, modelname='modflowtest', namefile_ext='nam', - exe_name='mf2k.exe', model_ws=None, - structured=True, verbose=False, **kwargs): - """ - BaseModel init - """ - ModelInterface.__init__(self) - self.__name = modelname - self.namefile_ext = namefile_ext or '' - self._namefile = self.__name + '.' + self.namefile_ext - self._packagelist = [] - self.heading = '' - self.exe_name = exe_name - self._verbose = verbose - self.external_path = None - self.external_extension = 'ref' - if model_ws is None: model_ws = os.getcwd() - if not os.path.exists(model_ws): - try: - os.makedirs(model_ws) - except: - print( - '\n{0:s} not valid, workspace-folder was changed to {1:s}\n'.format( - model_ws, os.getcwd())) - model_ws = os.getcwd() - self._model_ws = model_ws - self.structured = structured - self.pop_key_list = [] - self.cl_params = '' - - # check for reference info in kwargs - # we are just carrying these until a dis package is added - xll = kwargs.pop("xll", None) - yll = kwargs.pop("yll", None) - self._xul = kwargs.pop("xul", None) - self._yul = kwargs.pop("yul", None) - if self._xul is not None or self._yul is not None: - warnings.warn('xul/yul have been deprecated. Use xll/yll instead.', - DeprecationWarning) - - self._rotation = kwargs.pop("rotation", 0.0) - self._proj4_str = kwargs.pop("proj4_str", None) - self._start_datetime = kwargs.pop("start_datetime", "1-1-1970") - - # build model discretization objects - self._modelgrid = Grid(proj4=self._proj4_str, xoff=xll, yoff=yll, - angrot=self._rotation) - self._modeltime = None - - # Model file information - self.__onunit__ = 10 - # external option stuff - self.array_free_format = True - self.free_format_input = True - self.parameter_load = False - self.array_format = None - self.external_fnames = [] - self.external_units = [] - self.external_binflag = [] - self.external_output = [] - self.package_units = [] - self._next_ext_unit = None - - # output files - self.output_fnames = [] - self.output_units = [] - self.output_binflag = [] - self.output_packages = [] - - return - - @property - def modeltime(self): - raise NotImplementedError( - 'must define modeltime in child ' - 'class to use this base class') - - @property - def modelgrid(self): - raise NotImplementedError( - 'must define modelgrid in child ' - 'class to use this base class') - - @property - def packagelist(self): - return self._packagelist - - @packagelist.setter - def packagelist(self, packagelist): - self._packagelist = packagelist - - @property - def namefile(self): - return self._namefile - - @namefile.setter - def namefile(self, namefile): - self._namefile = namefile - - @property - def model_ws(self): - return self._model_ws - - @model_ws.setter - def model_ws(self, model_ws): - self._model_ws = model_ws - - @property - def exename(self): - return self._exename - - @exename.setter - def exename(self, exename): - self._exename = exename - - @property - def version(self): - return self._version - - @version.setter - def version(self, version): - self._version = version - - @property - def verbose(self): - return self._verbose - - @verbose.setter - def verbose(self, verbose): - self._verbose = verbose - - @property - def laytyp(self): - if self.get_package("LPF") is not None: - return self.get_package("LPF").laytyp.array - if self.get_package("BCF6") is not None: - return self.get_package("BCF6").laycon.array - if self.get_package("UPW") is not None: - return self.get_package("UPW").laytyp.array - - return None - - @property - def hdry(self): - if self.get_package("LPF") is not None: - return self.get_package("LPF").hdry - if self.get_package("BCF6") is not None: - return self.get_package("BCF6").hdry - if self.get_package("UPW") is not None: - return self.get_package("UPW").hdry - return None - - @property - def hnoflo(self): - try: - bas6 = self.get_package("BAS6") - return bas6.hnoflo - except AttributeError: - return None - - @property - def laycbd(self): - try: - dis = self.get_package("DIS") - return dis.laycbd.array - except AttributeError: - return None - - # we don't need these - no need for controlled access to array_free_format - # def set_free_format(self, value=True): - # """ - # Set the free format flag for the model instance - # - # Parameters - # ---------- - # value : bool - # Boolean value to set free format flag for model. (default is True) - # - # Returns - # ------- - # - # """ - # if not isinstance(value, bool): - # print('Error: set_free_format passed value must be a boolean') - # return False - # self.array_free_format = value - # - # def get_free_format(self): - # """ - # Return the free format flag for the model - # - # Returns - # ------- - # out : bool - # Free format flag for the model - # - # """ - # return self.array_free_format - - def next_unit(self, i=None): - if i is not None: - self.__onunit__ = i - 1 - else: - self.__onunit__ += 1 - return self.__onunit__ - - def next_ext_unit(self): - """ - Function to encapsulate next_ext_unit attribute - - """ - next_unit = self._next_ext_unit + 1 - self._next_ext_unit += 1 - return next_unit - - def export(self, f, **kwargs): - """ - Method to export a model to netcdf or shapefile based on the - extension of the file name (.shp for shapefile, .nc for netcdf) - - Parameters - ---------- - f : str - filename - kwargs : keyword arguments - modelgrid : flopy.discretization.Grid instance - user supplied modelgrid which can be used for exporting - in lieu of the modelgrid associated with the model object - - Returns - ------- - None or Netcdf object - - """ - from .export import utils - return utils.model_export(f, self, **kwargs) - - def add_package(self, p): - """ - Add a package. - - Parameters - ---------- - p : Package object - - """ - for idx, u in enumerate(p.unit_number): - if u != 0: - if u in self.package_units or u in self.external_units: - try: - pn = p.name[idx] - except: - pn = p.name - if self.verbose: - msg = "\nWARNING:\n unit {} ".format(u) + \ - "of package {} ".format(pn) + \ - "already in use." - print(msg) - self.package_units.append(u) - for i, pp in enumerate(self.packagelist): - if pp.allowDuplicates: - continue - elif isinstance(p, type(pp)): - if self.verbose: - print("\nWARNING:\n Two packages of the same type, " + - "Replacing existing " + - "'{}' package.".format(p.name[0])) - self.packagelist[i] = p - return - if self.verbose: - print('adding Package: ', p.name[0]) - self.packagelist.append(p) - - def remove_package(self, pname): - """ - Remove a package from this model - - Parameters - ---------- - pname : string - Name of the package, such as 'RIV', 'BAS6', etc. - - """ - for i, pp in enumerate(self.packagelist): - if pname.upper() in pp.name: - if self.verbose: - print('removing Package: ', pp.name) - - # Remove the package object from the model's packagelist - p = self.packagelist.pop(i) - - # Remove the package unit number from the list of package - # units stored with the model - for iu in p.unit_number: - if iu in self.package_units: - self.package_units.remove(iu) - return - raise StopIteration( - 'Package name ' + pname + ' not found in Package list') - - def __getattr__(self, item): - """ - __getattr__ - syntactic sugar - - Parameters - ---------- - item : str - 3 character package name (case insensitive) or "sr" to access - the SpatialReference instance of the ModflowDis object - - - Returns - ------- - sr : SpatialReference instance - pp : Package object - Package object of type :class:`flopy.pakbase.Package` - - Note - ---- - if self.dis is not None, then the spatial reference instance is updated - using self.dis.delr, self.dis.delc, and self.dis.lenuni before being - returned - """ - if item == 'output_packages' or not hasattr(self, 'output_packages'): - raise AttributeError(item) - - if item == 'sr': - if self.dis is not None: - return self.dis.sr - else: - return None - if item == 'tr': - if self.dis is not None: - return self.dis.tr - else: - return None - if item == "start_datetime": - if self.dis is not None: - return self.dis.start_datetime - else: - return None - - # return self.get_package(item) - # to avoid infinite recursion - if item == "_packagelist" or item == "packagelist": - raise AttributeError(item) - pckg = self.get_package(item) - if pckg is not None or item in self.mfnam_packages: - return pckg - if item == 'modelgrid': - return - raise AttributeError(item) - - def get_ext_dict_attr(self, ext_unit_dict=None, unit=None, filetype=None, - pop_key=True): - iu = None - fname = None - if ext_unit_dict is not None: - for key, value in ext_unit_dict.items(): - if key == unit: - iu = key - fname = os.path.basename(value.filename) - break - elif value.filetype == filetype: - iu = key - fname = os.path.basename(value.filename) - if pop_key: - self.add_pop_key_list(iu) - break - return iu, fname - - def _output_msg(self, i, add=True): - if add: - txt1 = 'Adding' - txt2 = 'to' - else: - txt1 = 'Removing' - txt2 = 'from' - msg = '{} {} '.format(txt1, self.output_fnames[i]) + \ - '(unit={}) '.format(self.output_units[i]) + \ - '{} the output list.'.format(txt2) - print(msg) - - def add_output_file(self, unit, fname=None, extension='cbc', - binflag=True, package=None): - """ - Add an ascii or binary output file for a package - - Parameters - ---------- - unit : int - unit number of external array - fname : str - filename of external array. (default is None) - extension : str - extension to use for the cell-by-cell file. Only used if fname - is None. (default is cbc) - binflag : bool - boolean flag indicating if the output file is a binary file. - Default is True - package : str - string that defines the package the output file is attached to. - Default is None - - """ - add_cbc = False - if unit > 0: - add_cbc = True - # determine if the file is in external_units - if abs(unit) in self.external_units: - idx = self.external_units.index(abs(unit)) - if fname is None: - fname = os.path.basename(self.external_fnames[idx]) - binflag = self.external_binflag[idx] - self.remove_external(unit=abs(unit)) - # determine if the unit exists in the output data - if abs(unit) in self.output_units: - add_cbc = False - idx = self.output_units.index(abs(unit)) - # determine if binflag has changed - if binflag is not self.output_binflag[idx]: - add_cbc = True - if add_cbc: - self.remove_output(unit=abs(unit)) - else: - if package is not None: - self.output_packages[idx].append(package) - - if add_cbc: - if fname is None: - fname = self.name + '.' + extension - # check if this file name exists for a different unit number - if fname in self.output_fnames: - idx = self.output_fnames.index(fname) - iut = self.output_units[idx] - if iut != unit: - # include unit number in fname if package has - # not been passed - if package is None: - fname = self.name + '.{}.'.format(unit) \ - + extension - # include package name in fname - else: - fname = self.name + '.{}.'.format(package) \ - + extension - else: - fname = os.path.basename(fname) - self.add_output(fname, unit, binflag=binflag, package=package) - return - - def add_output(self, fname, unit, binflag=False, package=None): - """ - Assign an external array so that it will be listed as a DATA or - DATA(BINARY) entry in the name file. This will allow an outside - file package to refer to it. - - Parameters - ---------- - fname : str - filename of external array - unit : int - unit number of external array - binflag : boolean - binary or not. (default is False) - - """ - if fname in self.output_fnames: - if self.verbose: - msg = "BaseModel.add_output() warning: " + \ - "replacing existing filename {}".format(fname) - print(msg) - idx = self.output_fnames.index(fname) - if self.verbose: - self._output_msg(idx, add=False) - self.output_fnames.pop(idx) - self.output_units.pop(idx) - self.output_binflag.pop(idx) - self.output_packages.pop(idx) - - self.output_fnames.append(fname) - self.output_units.append(unit) - self.output_binflag.append(binflag) - if package is not None: - self.output_packages.append([package]) - else: - self.output_packages.append([]) - - if self.verbose: - self._output_msg(-1, add=True) - - return - - def remove_output(self, fname=None, unit=None): - """ - Remove an output file from the model by specifying either the - file name or the unit number. - - Parameters - ---------- - fname : str - filename of output array - unit : int - unit number of output array - - """ - if fname is not None: - for i, e in enumerate(self.output_fnames): - if fname in e: - if self.verbose: - self._output_msg(i, add=False) - self.output_fnames.pop(i) - self.output_units.pop(i) - self.output_binflag.pop(i) - self.output_packages.pop(i) - elif unit is not None: - for i, u in enumerate(self.output_units): - if u == unit: - if self.verbose: - self._output_msg(i, add=False) - self.output_fnames.pop(i) - self.output_units.pop(i) - self.output_binflag.pop(i) - self.output_packages.pop(i) - else: - msg = ' either fname or unit must be passed to remove_output()' - raise Exception(msg) - return - - def get_output(self, fname=None, unit=None): - """ - Get an output file from the model by specifying either the - file name or the unit number. - - Parameters - ---------- - fname : str - filename of output array - unit : int - unit number of output array - - """ - if fname is not None: - for i, e in enumerate(self.output_fnames): - if fname in e: - return self.output_units[i] - return None - elif unit is not None: - for i, u in enumerate(self.output_units): - if u == unit: - return self.output_fnames[i] - return None - else: - msg = ' either fname or unit must be passed to get_output()' - raise Exception(msg) - return - - def set_output_attribute(self, fname=None, unit=None, attr=None): - """ - Set a variable in an output file from the model by specifying either - the file name or the unit number and a dictionary with attributes - to change. - - Parameters - ---------- - fname : str - filename of output array - unit : int - unit number of output array - - """ - idx = None - if fname is not None: - for i, e in enumerate(self.output_fnames): - if fname in e: - idx = i - break - return None - elif unit is not None: - for i, u in enumerate(self.output_units): - if u == unit: - idx = i - break - else: - msg = ' either fname or unit must be passed ' + \ - ' to set_output_attribute()' - raise Exception(msg) - if attr is not None: - if idx is not None: - for key, value in attr.items: - if key == 'binflag': - self.output_binflag[idx] = value - elif key == 'fname': - self.output_fnames[idx] = value - elif key == 'unit': - self.output_units[idx] = value - return - - def get_output_attribute(self, fname=None, unit=None, attr=None): - """ - Get a attribute for an output file from the model by specifying either - the file name or the unit number. - - Parameters - ---------- - fname : str - filename of output array - unit : int - unit number of output array - - """ - idx = None - if fname is not None: - for i, e in enumerate(self.output_fnames): - if fname in e: - idx = i - break - return None - elif unit is not None: - for i, u in enumerate(self.output_units): - if u == unit: - idx = i - break - else: - raise Exception( - ' either fname or unit must be passed ' + - ' to set_output_attribute()') - v = None - if attr is not None: - if idx is not None: - if attr == 'binflag': - v = self.output_binflag[idx] - elif attr == 'fname': - v = self.output_fnames[idx] - elif attr == 'unit': - v = self.output_units[idx] - return v - - def add_external(self, fname, unit, binflag=False, output=False): - """ - Assign an external array so that it will be listed as a DATA or - DATA(BINARY) entry in the name file. This will allow an outside - file package to refer to it. - - Parameters - ---------- - fname : str - filename of external array - unit : int - unit number of external array - binflag : boolean - binary or not. (default is False) - - """ - if fname in self.external_fnames: - if self.verbose: - msg = "BaseModel.add_external() warning: " + \ - "replacing existing filename {}".format(fname) - print(msg) - idx = self.external_fnames.index(fname) - self.external_fnames.pop(idx) - self.external_units.pop(idx) - self.external_binflag.pop(idx) - self.external_output.pop(idx) - if unit in self.external_units: - if self.verbose: - msg = "BaseModel.add_external() warning: " + \ - "replacing existing unit {}".format(unit) - print(msg) - idx = self.external_units.index(unit) - self.external_fnames.pop(idx) - self.external_units.pop(idx) - self.external_binflag.pop(idx) - self.external_output.pop(idx) - - self.external_fnames.append(fname) - self.external_units.append(unit) - self.external_binflag.append(binflag) - self.external_output.append(output) - return - - def remove_external(self, fname=None, unit=None): - """ - Remove an external file from the model by specifying either the - file name or the unit number. - - Parameters - ---------- - fname : str - filename of external array - unit : int - unit number of external array - - """ - plist = [] - if fname is not None: - for i, e in enumerate(self.external_fnames): - if fname in e: - plist.append(i) - elif unit is not None: - for i, u in enumerate(self.external_units): - if u == unit: - plist.append(i) - else: - msg = ' either fname or unit must be passed to remove_external()' - raise Exception(msg) - # remove external file - j = 0 - for i in plist: - ipos = i - j - self.external_fnames.pop(ipos) - self.external_units.pop(ipos) - self.external_binflag.pop(ipos) - self.external_output.pop(ipos) - j += 1 - return - - def add_existing_package(self, filename, ptype=None, - copy_to_model_ws=True): - """ - Add an existing package to a model instance. - - Parameters - ---------- - - filename : str - the name of the file to add as a package - ptype : optional - the model package type (e.g. "lpf", "wel", etc). If None, - then the file extension of the filename arg is used - copy_to_model_ws : bool - flag to copy the package file into the model_ws directory. - - Returns - ------- - None - - """ - if ptype is None: - ptype = filename.split('.')[-1] - ptype = str(ptype).upper() - - # for pak in self.packagelist: - # if ptype in pak.name: - # print("BaseModel.add_existing_package() warning: " +\ - # "replacing existing package {0}".format(ptype)) - class Obj(object): - pass - - fake_package = Obj() - fake_package.write_file = lambda: None - fake_package.extra = [''] - fake_package.name = [ptype] - fake_package.extension = [filename.split('.')[-1]] - fake_package.unit_number = [self.next_ext_unit()] - if copy_to_model_ws: - base_filename = os.path.split(filename)[-1] - fake_package.file_name = [base_filename] - shutil.copy2(filename, os.path.join(self.model_ws, base_filename)) - else: - fake_package.file_name = [filename] - fake_package.allowDuplicates = True - self.add_package(fake_package) - - def get_name_file_entries(self): - """ - Get a string representation of the name file. - - Parameters - ---------- - - """ - lines = [] - for p in self.packagelist: - for i in range(len(p.name)): - if p.unit_number[i] == 0: - continue - s = '{:14s} '.format(p.name[i]) + \ - '{:5d} '.format(p.unit_number[i]) + \ - '{}'.format(p.file_name[i]) - if p.extra[i]: - s += ' ' + p.extra[i] - lines.append(s) - return '\n'.join(lines) + '\n' - - def has_package(self, name): - """ - Check if package name is in package list. - - Parameters - ---------- - name : str - Name of the package, 'DIS', 'BAS6', etc. (case-insensitive). - - Returns - ------- - bool - True if package name exists, otherwise False if not found. - - """ - if not name: - raise ValueError('invalid package name') - name = name.upper() - for p in self.packagelist: - for pn in p.name: - if pn.upper() == name: - return True - return False - - def get_package(self, name): - """ - Get a package. - - Parameters - ---------- - name : str - Name of the package, 'RIV', 'LPF', etc. (case-insensitive). - - Returns - ------- - pp : Package object - Package object of type :class:`flopy.pakbase.Package` - - """ - if not name: - raise ValueError('invalid package name') - name = name.upper() - for pp in (self.packagelist): - if pp.name[0].upper() == name: - return pp - return None - - def set_version(self, version): - self.version = version.lower() - - # check that this is a valid model version - if self.version not in list(self.version_types.keys()): - err = 'Error: Unsupported model ' + \ - 'version ({}).'.format(self.version) + \ - ' Valid model versions are:' - for v in list(self.version_types.keys()): - err += ' {}'.format(v) - raise Exception(err) - - # set namefile heading - heading = '# Name file for ' + \ - '{}, '.format(self.version_types[self.version]) + \ - 'generated by Flopy version {}.'.format(__version__) - self.heading = heading - - # set heading for each package - for p in self.get_package_list(): - pak = self.get_package(p) - heading = '# {} package for '.format(pak.name[0]) + \ - '{}, '.format(self.version_types[self.version]) + \ - 'generated by Flopy version {}.'.format(__version__) - - pak.heading = heading - - return None - - def change_model_ws(self, new_pth=None, reset_external=False): - """ - Change the model work space. - - Parameters - ---------- - new_pth : str - Location of new model workspace. If this path does not exist, - it will be created. (default is None, which will be assigned to - the present working directory). - - Returns - ------- - val : list of strings - Can be used to see what packages are in the model, and can then - be used with get_package to pull out individual packages. - - """ - if new_pth is None: - new_pth = os.getcwd() - if not os.path.exists(new_pth): - try: - line = '\ncreating model workspace...\n' + \ - ' {}'.format(new_pth) - print(line) - os.makedirs(new_pth) - except: - line = '\n{} not valid, workspace-folder '.format(new_pth) - raise OSError(line) - # line = '\n{} not valid, workspace-folder '.format(new_pth) + \ - # 'was changed to {}\n'.format(os.getcwd()) - # print(line) - # new_pth = os.getcwd() - - # --reset the model workspace - old_pth = self._model_ws - self._model_ws = new_pth - line = '\nchanging model workspace...\n {}\n'.format(new_pth) - sys.stdout.write(line) - # reset the paths for each package - for pp in (self.packagelist): - pp.fn_path = os.path.join(self.model_ws, pp.file_name[0]) - - # create the external path (if needed) - if hasattr(self, "external_path") and self.external_path is not None \ - and not os.path.exists(os.path.join(self._model_ws, - self.external_path)): - pth = os.path.join(self._model_ws, self.external_path) - os.makedirs(pth) - if reset_external: - self._reset_external(pth, old_pth) - elif reset_external: - self._reset_external(self._model_ws, old_pth) - return None - - def _reset_external(self, pth, old_pth): - new_ext_fnames = [] - for ext_file, output in zip(self.external_fnames, - self.external_output): - # new_ext_file = os.path.join(pth, os.path.split(ext_file)[-1]) - # this is a wicked mess - if output: - # new_ext_file = os.path.join(pth, os.path.split(ext_file)[-1]) - new_ext_file = ext_file - else: - # fpth = os.path.abspath(os.path.join(old_pth, ext_file)) - # new_ext_file = os.path.relpath(fpth, os.path.abspath(pth)) - fdir = os.path.dirname(ext_file) - if fdir == '': - fpth = os.path.abspath(os.path.join(old_pth, ext_file)) - else: - fpth = ext_file - ao = os.path.abspath(os.path.dirname(fpth)) - ep = os.path.abspath(pth) - relp = os.path.relpath(ao, ep) - new_ext_file = os.path.join(relp, os.path.basename(ext_file)) - new_ext_fnames.append(new_ext_file) - self.external_fnames = new_ext_fnames - - @property - def model_ws(self): - return copy.deepcopy(self._model_ws) - - def _set_name(self, value): - """ - Set model name - - Parameters - ---------- - value : str - Name to assign to model. - - """ - self.__name = str(value) - self.namefile = self.__name + '.' + self.namefile_ext - for p in self.packagelist: - for i in range(len(p.extension)): - p.file_name[i] = self.__name + '.' + p.extension[i] - p.fn_path = os.path.join(self.model_ws, p.file_name[0]) - - def __setattr__(self, key, value): - if key == "free_format_input": - # if self.bas6 is not None: - # self.bas6.ifrefm = value - super(BaseModel, self).__setattr__(key, value) - elif key == "name": - self._set_name(value) - elif key == "model_ws": - self.change_model_ws(value) - elif key == "sr": - assert isinstance(value, utils.reference.SpatialReference) - warnings.warn( - "SpatialReference has been deprecated.", - category=DeprecationWarning) - if self.dis is not None: - self.dis.sr = value - else: - raise Exception("cannot set SpatialReference -" - "ModflowDis not found") - elif key == "tr": - assert isinstance(value, - discretization.reference.TemporalReference) - if self.dis is not None: - self.dis.tr = value - else: - raise Exception("cannot set TemporalReference -" - "ModflowDis not found") - elif key == "start_datetime": - if self.dis is not None: - self.dis.start_datetime = value - self.tr.start_datetime = value - else: - raise Exception("cannot set start_datetime -" - "ModflowDis not found") - else: - super(BaseModel, self).__setattr__(key, value) - - def run_model(self, silent=False, pause=False, report=False, - normal_msg='normal termination'): - """ - This method will run the model using subprocess.Popen. - - Parameters - ---------- - silent : boolean - Echo run information to screen (default is True). - pause : boolean, optional - Pause upon completion (default is False). - report : boolean, optional - Save stdout lines to a list (buff) which is returned - by the method . (default is False). - normal_msg : str - Normal termination message used to determine if the - run terminated normally. (default is 'normal termination') - - Returns - ------- - (success, buff) - success : boolean - buff : list of lines of stdout - - """ - - return run_model(self.exe_name, self.namefile, model_ws=self.model_ws, - silent=silent, pause=pause, report=report, - normal_msg=normal_msg) - - def load_results(self): - - print('load_results not implemented') - - return None - - def write_input(self, SelPackList=False, check=False): - """ - Write the input. - - Parameters - ---------- - SelPackList : False or list of packages - - """ - if check: - # run check prior to writing input - self.check(f='{}.chk'.format(self.name), verbose=self.verbose, - level=1) - - # reset the model to free_format if parameter substitution was - # performed on a model load - if self.parameter_load and not self.free_format_input: - if self.verbose: - print('\nResetting free_format_input to True to ' + - 'preserve the precision of the parameter data.') - self.free_format_input = True - - if self.verbose: - print('\nWriting packages:') - - if SelPackList == False: - for p in self.packagelist: - if self.verbose: - print(' Package: ', p.name[0]) - # prevent individual package checks from running after - # model-level package check above - # otherwise checks are run twice - # or the model level check procedure would have to be split up - # or each package would need a check argument, - # or default for package level check would have to be False - try: - p.write_file(check=False) - except TypeError: - p.write_file() - else: - for pon in SelPackList: - for i, p in enumerate(self.packagelist): - if pon in p.name: - if self.verbose: - print(' Package: ', p.name[0]) - try: - p.write_file(check=False) - except TypeError: - p.write_file() - break - if self.verbose: - print(' ') - # write name file - self.write_name_file() - # os.chdir(org_dir) - return - - def write_name_file(self): - """ - Every Package needs its own writenamefile function - - """ - raise Exception( - 'IMPLEMENTATION ERROR: writenamefile must be overloaded') - - def set_model_units(self): - """ - Every model needs its own set_model_units method - - """ - raise Exception( - 'IMPLEMENTATION ERROR: set_model_units must be overloaded') - - @property - def name(self): - """ - Get model name - - Returns - ------- - name : str - name of model - - """ - return copy.deepcopy(self.__name) - - def add_pop_key_list(self, key): - """ - Add a external file unit number to a list that will be used to remove - model output (typically binary) files from ext_unit_dict. - - Parameters - ---------- - key : int - file unit number - - Returns - ------- - - Examples - -------- - - """ - if key not in self.pop_key_list: - self.pop_key_list.append(key) - - def check(self, f=None, verbose=True, level=1): - """ - Check model data for common errors. - - Parameters - ---------- - f : str or file handle - String defining file name or file handle for summary file - of check method output. If a string is passed a file handle - is created. If f is None, check method does not write - results to a summary file. (default is None) - verbose : bool - Boolean flag used to determine if check method results are - written to the screen - level : int - Check method analysis level. If level=0, summary checks are - performed. If level=1, full checks are performed. - - Returns - ------- - None - - Examples - -------- - - >>> import flopy - >>> m = flopy.modflow.Modflow.load('model.nam') - >>> m.check() - """ - - # check instance for model-level check - chk = utils.check(self, f=f, verbose=verbose, level=level) - # check for unit number conflicts - package_units = {} - duplicate_units = {} - for p in self.packagelist: - for i in range(len(p.name)): - if p.unit_number[i] != 0: - if p.unit_number[i] in package_units.values(): - duplicate_units[p.name[i]] = p.unit_number[i] - otherpackage = [k for k, v in package_units.items() - if v == p.unit_number[i]][0] - duplicate_units[otherpackage] = p.unit_number[i] - if len(duplicate_units) > 0: - for k, v in duplicate_units.items(): - chk._add_to_summary('Error', package=k, value=v, - desc='unit number conflict') - else: - chk.passed.append('Unit number conflicts') - - return self._check(chk, level) - - def plot(self, SelPackList=None, **kwargs): - """ - Plot 2-D, 3-D, transient 2-D, and stress period list (MfList) - model input data - - Parameters - ---------- - SelPackList : bool or list - List of of packages to plot. If SelPackList=None all packages - are plotted. (default is None) - **kwargs : dict - filename_base : str - Base file name that will be used to automatically generate file - names for output image files. Plots will be exported as image - files if file_name_base is not None. (default is None) - file_extension : str - Valid matplotlib.pyplot file extension for savefig(). Only used - if filename_base is not None. (default is 'png') - mflay : int - MODFLOW zero-based layer number to return. If None, then all - all layers will be included. (default is None) - kper : int - MODFLOW zero-based stress period number to return. - (default is zero) - key : str - MfList dictionary key. (default is None) - - Returns - ---------- - axes : list - Empty list is returned if filename_base is not None. Otherwise - a list of matplotlib.pyplot.axis are returned. - - See Also - -------- - - Notes - ----- - - Examples - -------- - >>> import flopy - >>> ml = flopy.modflow.Modflow.load('test.nam') - >>> ml.plot() - - """ - from flopy.plot import PlotUtilities - - axes = PlotUtilities._plot_model_helper(self, SelPackList=SelPackList, - **kwargs) - return axes - - def to_shapefile(self, filename, package_names=None, **kwargs): - """ - Wrapper function for writing a shapefile for the model grid. If - package_names is not None, then search through the requested packages - looking for arrays that can be added to the shapefile as attributes - - Parameters - ---------- - filename : string - name of the shapefile to write - package_names : list of package names (e.g. ["dis","lpf"]) - Packages to export data arrays to shapefile. (default is None) - - Returns - ------- - None - - Examples - -------- - >>> import flopy - >>> m = flopy.modflow.Modflow() - >>> m.to_shapefile('model.shp', SelPackList) - - """ - warnings.warn("to_shapefile() is deprecated. use .export()") - self.export(filename, package_names=package_names) - return - - -def run_model(exe_name, namefile, model_ws='./', - silent=False, pause=False, report=False, - normal_msg='normal termination', use_async=False, - cargs=None): - """ - This function will run the model using subprocess.Popen. It - communicates with the model's stdout asynchronously and reports - progress to the screen with timestamps - - Parameters - ---------- - exe_name : str - Executable name (with path, if necessary) to run. - namefile : str - Namefile of model to run. The namefile must be the - filename of the namefile without the path. Namefile can be None - to allow programs that do not require a control file (name file) - to be passed as a command line argument. - model_ws : str - Path to the location of the namefile. (default is the - current working directory - './') - silent : boolean - Echo run information to screen (default is True). - pause : boolean, optional - Pause upon completion (default is False). - report : boolean, optional - Save stdout lines to a list (buff) which is returned - by the method . (default is False). - normal_msg : str or list - Normal termination message used to determine if the - run terminated normally. More than one message can be provided using - a list. (Default is 'normal termination') - use_async : boolean - asynchronously read model stdout and report with timestamps. good for - models that take long time to run. not good for models that run - really fast - cargs : str or list of strings - additional command line arguments to pass to the executable. - Default is None - Returns - ------- - (success, buff) - success : boolean - buff : list of lines of stdout - - """ - success = False - buff = [] - - # convert normal_msg to a list of lower case str for comparison - if isinstance(normal_msg, str): - normal_msg = [normal_msg] - for idx, s in enumerate(normal_msg): - normal_msg[idx] = s.lower() - - # Check to make sure that program and namefile exist - exe = which(exe_name) - if exe is None: - import platform - if platform.system() in 'Windows': - if not exe_name.lower().endswith('.exe'): - exe = which(exe_name + '.exe') - if exe is None: - s = 'The program {} does not exist or is not executable.'.format( - exe_name) - raise Exception(s) - else: - if not silent: - s = 'FloPy is using the following ' + \ - ' executable to run the model: {}'.format(exe) - print(s) - - if namefile is not None: - if not os.path.isfile(os.path.join(model_ws, namefile)): - s = 'The namefile for this model ' + \ - 'does not exists: {}'.format(namefile) - raise Exception(s) - - # simple little function for the thread to target - def q_output(output, q): - for line in iter(output.readline, b''): - q.put(line) - # time.sleep(1) - # output.close() - - # create a list of arguments to pass to Popen - argv = [exe_name] - if namefile is not None: - argv.append(namefile) - - # add additional arguments to Popen arguments - if cargs is not None: - if isinstance(cargs, str): - cargs = [cargs] - for t in cargs: - argv.append(t) - - # run the model with Popen - proc = Popen(argv, stdout=PIPE, stderr=STDOUT, cwd=model_ws) - - if not use_async: - while True: - line = proc.stdout.readline().decode('utf-8') - if line == '' and proc.poll() is not None: - break - if line: - for msg in normal_msg: - if msg in line.lower(): - success = True - break - line = line.rstrip('\r\n') - if not silent: - print(line) - if report: - buff.append(line) - else: - break - return success, buff - - # some tricks for the async stdout reading - q = Queue.Queue() - thread = threading.Thread(target=q_output, args=(proc.stdout, q)) - thread.daemon = True - thread.start() - - failed_words = ["fail", "error"] - last = datetime.now() - lastsec = 0. - while True: - try: - line = q.get_nowait() - except Queue.Empty: - pass - else: - if line == '': - break - line = line.decode().lower().strip() - if line != '': - now = datetime.now() - dt = now - last - tsecs = dt.total_seconds() - lastsec - line = "(elapsed:{0})-->{1}".format(tsecs, line) - lastsec = tsecs + lastsec - buff.append(line) - if not silent: - print(line) - for fword in failed_words: - if fword in line: - success = False - break - if proc.poll() is not None: - break - proc.wait() - thread.join(timeout=1) - buff.extend(proc.stdout.readlines()) - proc.stdout.close() - - for line in buff: - for msg in normal_msg: - if msg in line.lower(): - print("success") - success = True - break - - if pause: - input('Press Enter to continue...') - return success, buff +""" +mbase module + This module contains the base model class from which + all of the other models inherit from. + +""" + +from __future__ import print_function +import abc +import sys +import os +import shutil +import threading +import warnings +import queue as Queue + +from datetime import datetime +from shutil import which +from subprocess import Popen, PIPE, STDOUT +import copy +import numpy as np +from flopy import utils, discretization +from .version import __version__ +from .discretization.modeltime import ModelTime +from .discretization.grid import Grid + +# Global variables +iconst = 1 # Multiplier for individual array elements in integer and real arrays read by MODFLOW's U2DREL, U1DREL and U2DINT. +iprn = -1 # Printout flag. If >= 0 then array values read are printed in listing file. + + +class FileDataEntry(object): + def __init__(self, fname, unit, binflag=False, output=False, package=None): + self.fname = fname + self.unit = unit + self.binflag = binflag + self.output = output + self.package = package + + +class FileData(object): + def __init__(self): + self.file_data = [] + return + + def add_file(self, fname, unit, binflag=False, output=False, package=None): + ipop = [] + for idx, file_data in enumerate(self.file_data): + if file_data.fname == fname or file_data.unit == unit: + ipop.append(idx) + + self.file_data.append(FileDataEntry(fname, unit, binflag=binflag, + output=output, package=package)) + return + + +class ModelInterface(object): + def __init__(self): + self._mg_resync = True + self._modelgrid = None + + def update_modelgrid(self): + if self._modelgrid is not None: + self._modelgrid = Grid(proj4=self._modelgrid.proj4, + xoff=self._modelgrid.xoffset, + yoff=self._modelgrid.yoffset, + angrot=self._modelgrid.angrot) + self._mg_resync = True + + @property + @abc.abstractmethod + def modelgrid(self): + raise NotImplementedError( + 'must define modelgrid in child ' + 'class to use this base class') + + @property + @abc.abstractmethod + def packagelist(self): + raise NotImplementedError( + 'must define packagelist in child ' + 'class to use this base class') + + @property + @abc.abstractmethod + def namefile(self): + raise NotImplementedError( + 'must define namefile in child ' + 'class to use this base class') + + @property + @abc.abstractmethod + def model_ws(self): + raise NotImplementedError( + 'must define model_ws in child ' + 'class to use this base class') + + @property + @abc.abstractmethod + def exename(self): + raise NotImplementedError( + 'must define exename in child ' + 'class to use this base class') + + @property + @abc.abstractmethod + def version(self): + raise NotImplementedError( + 'must define version in child ' + 'class to use this base class') + + @property + @abc.abstractmethod + def solver_tols(self): + raise NotImplementedError( + 'must define version in child ' + 'class to use this base class') + + @abc.abstractmethod + def export(self, f, **kwargs): + raise NotImplementedError( + 'must define export in child ' + 'class to use this base class') + + @property + @abc.abstractmethod + def laytyp(self): + raise NotImplementedError( + 'must define laytyp in child ' + 'class to use this base class') + + @property + @abc.abstractmethod + def hdry(self): + raise NotImplementedError( + 'must define hdry in child ' + 'class to use this base class') + + @property + @abc.abstractmethod + def hnoflo(self): + raise NotImplementedError( + 'must define hnoflo in child ' + 'class to use this base class') + + @property + @abc.abstractmethod + def laycbd(self): + raise NotImplementedError( + 'must define laycbd in child ' + 'class to use this base class') + + @property + @abc.abstractmethod + def verbose(self): + raise NotImplementedError( + 'must define verbose in child ' + 'class to use this base class') + + @abc.abstractmethod + def check(self, f=None, verbose=True, level=1): + raise NotImplementedError( + 'must define check in child ' + 'class to use this base class') + + def get_package_list(self, ftype=None): + """ + Get a list of all the package names. + + Parameters + ---------- + ftype : str + Type of package, 'RIV', 'LPF', etc. + + Returns + ------- + val : list of strings + Can be used to see what packages are in the model, and can then + be used with get_package to pull out individual packages. + + """ + val = [] + for pp in (self.packagelist): + if ftype is None: + val.append(pp.name[0].upper()) + elif pp.package_type.lower() == ftype: + val.append(pp.name[0].upper()) + return val + + def _check(self, chk, level=1): + """ + Check model data for common errors. + + Parameters + ---------- + f : str or file handle + String defining file name or file handle for summary file + of check method output. If a string is passed a file handle + is created. If f is None, check method does not write + results to a summary file. (default is None) + verbose : bool + Boolean flag used to determine if check method results are + written to the screen + level : int + Check method analysis level. If level=0, summary checks are + performed. If level=1, full checks are performed. + summarize : bool + Boolean flag used to determine if summary of results is written + to the screen + + Returns + ------- + None + + Examples + -------- + + >>> import flopy + >>> m = flopy.modflow.Modflow.load('model.nam') + >>> m.check() + """ + + # check instance for model-level check + results = {} + + for p in self.packagelist: + if chk.package_check_levels.get(p.name[0].lower(), 0) <= level: + results[p.name[0]] = p.check(f=None, verbose=False, + level=level - 1, + checktype=chk.__class__) + + # model level checks + # solver check + if self.version in chk.solver_packages.keys(): + solvers = set(chk.solver_packages[self.version]).intersection( + set(self.get_package_list())) + if not solvers: + chk._add_to_summary('Error', desc='\r No solver package', + package='model') + elif len(list(solvers)) > 1: + for s in solvers: + chk._add_to_summary('Error', + desc='\r Multiple solver packages', + package=s) + else: + chk.passed.append('Compatible solver package') + + # add package check results to model level check summary + for r in results.values(): + if r is not None and r.summary_array is not None: # currently SFR doesn't have one + chk.summary_array = np.append(chk.summary_array, + r.summary_array).view( + np.recarray) + chk.passed += ['{} package: {}'.format(r.package.name[0], psd) + for psd in r.passed] + chk.summarize() + return chk + + +class BaseModel(ModelInterface): + """ + MODFLOW based models base class + + Parameters + ---------- + + modelname : string + Name of the model. Model files will be given this name. (default is + 'modflowtest' + + namefile_ext : string + name file extension (default is 'nam') + + exe_name : string + name of the modflow executable + + model_ws : string + Path to the model workspace. Model files will be created in this + directory. Default is None, in which case model_ws is assigned + to the current working directory. + + """ + + def __init__(self, modelname='modflowtest', namefile_ext='nam', + exe_name='mf2k.exe', model_ws=None, + structured=True, verbose=False, **kwargs): + """ + BaseModel init + """ + ModelInterface.__init__(self) + self.__name = modelname + self.namefile_ext = namefile_ext or '' + self._namefile = self.__name + '.' + self.namefile_ext + self._packagelist = [] + self.heading = '' + self.exe_name = exe_name + self._verbose = verbose + self.external_path = None + self.external_extension = 'ref' + if model_ws is None: model_ws = os.getcwd() + if not os.path.exists(model_ws): + try: + os.makedirs(model_ws) + except: + print( + '\n{0:s} not valid, workspace-folder was changed to {1:s}\n'.format( + model_ws, os.getcwd())) + model_ws = os.getcwd() + self._model_ws = model_ws + self.structured = structured + self.pop_key_list = [] + self.cl_params = '' + + # check for reference info in kwargs + # we are just carrying these until a dis package is added + xll = kwargs.pop("xll", None) + yll = kwargs.pop("yll", None) + self._xul = kwargs.pop("xul", None) + self._yul = kwargs.pop("yul", None) + if self._xul is not None or self._yul is not None: + warnings.warn('xul/yul have been deprecated. Use xll/yll instead.', + DeprecationWarning) + + self._rotation = kwargs.pop("rotation", 0.0) + self._proj4_str = kwargs.pop("proj4_str", None) + self._start_datetime = kwargs.pop("start_datetime", "1-1-1970") + + # build model discretization objects + self._modelgrid = Grid(proj4=self._proj4_str, xoff=xll, yoff=yll, + angrot=self._rotation) + self._modeltime = None + + # Model file information + self.__onunit__ = 10 + # external option stuff + self.array_free_format = True + self.free_format_input = True + self.parameter_load = False + self.array_format = None + self.external_fnames = [] + self.external_units = [] + self.external_binflag = [] + self.external_output = [] + self.package_units = [] + self._next_ext_unit = None + + # output files + self.output_fnames = [] + self.output_units = [] + self.output_binflag = [] + self.output_packages = [] + + return + + @property + def modeltime(self): + raise NotImplementedError( + 'must define modeltime in child ' + 'class to use this base class') + + @property + def modelgrid(self): + raise NotImplementedError( + 'must define modelgrid in child ' + 'class to use this base class') + + @property + def packagelist(self): + return self._packagelist + + @packagelist.setter + def packagelist(self, packagelist): + self._packagelist = packagelist + + @property + def namefile(self): + return self._namefile + + @namefile.setter + def namefile(self, namefile): + self._namefile = namefile + + @property + def model_ws(self): + return self._model_ws + + @model_ws.setter + def model_ws(self, model_ws): + self._model_ws = model_ws + + @property + def exename(self): + return self._exename + + @exename.setter + def exename(self, exename): + self._exename = exename + + @property + def version(self): + return self._version + + @version.setter + def version(self, version): + self._version = version + + @property + def verbose(self): + return self._verbose + + @verbose.setter + def verbose(self, verbose): + self._verbose = verbose + + @property + def laytyp(self): + if self.get_package("LPF") is not None: + return self.get_package("LPF").laytyp.array + if self.get_package("BCF6") is not None: + return self.get_package("BCF6").laycon.array + if self.get_package("UPW") is not None: + return self.get_package("UPW").laytyp.array + + return None + + @property + def hdry(self): + if self.get_package("LPF") is not None: + return self.get_package("LPF").hdry + if self.get_package("BCF6") is not None: + return self.get_package("BCF6").hdry + if self.get_package("UPW") is not None: + return self.get_package("UPW").hdry + return None + + @property + def hnoflo(self): + try: + bas6 = self.get_package("BAS6") + return bas6.hnoflo + except AttributeError: + return None + + @property + def laycbd(self): + try: + dis = self.get_package("DIS") + return dis.laycbd.array + except AttributeError: + return None + + # we don't need these - no need for controlled access to array_free_format + # def set_free_format(self, value=True): + # """ + # Set the free format flag for the model instance + # + # Parameters + # ---------- + # value : bool + # Boolean value to set free format flag for model. (default is True) + # + # Returns + # ------- + # + # """ + # if not isinstance(value, bool): + # print('Error: set_free_format passed value must be a boolean') + # return False + # self.array_free_format = value + # + # def get_free_format(self): + # """ + # Return the free format flag for the model + # + # Returns + # ------- + # out : bool + # Free format flag for the model + # + # """ + # return self.array_free_format + + def next_unit(self, i=None): + if i is not None: + self.__onunit__ = i - 1 + else: + self.__onunit__ += 1 + return self.__onunit__ + + def next_ext_unit(self): + """ + Function to encapsulate next_ext_unit attribute + + """ + next_unit = self._next_ext_unit + 1 + self._next_ext_unit += 1 + return next_unit + + def export(self, f, **kwargs): + """ + Method to export a model to netcdf or shapefile based on the + extension of the file name (.shp for shapefile, .nc for netcdf) + + Parameters + ---------- + f : str + filename + kwargs : keyword arguments + modelgrid : flopy.discretization.Grid instance + user supplied modelgrid which can be used for exporting + in lieu of the modelgrid associated with the model object + + Returns + ------- + None or Netcdf object + + """ + from .export import utils + return utils.model_export(f, self, **kwargs) + + def add_package(self, p): + """ + Add a package. + + Parameters + ---------- + p : Package object + + """ + for idx, u in enumerate(p.unit_number): + if u != 0: + if u in self.package_units or u in self.external_units: + try: + pn = p.name[idx] + except: + pn = p.name + if self.verbose: + msg = "\nWARNING:\n unit {} ".format(u) + \ + "of package {} ".format(pn) + \ + "already in use." + print(msg) + self.package_units.append(u) + for i, pp in enumerate(self.packagelist): + if pp.allowDuplicates: + continue + elif isinstance(p, type(pp)): + if self.verbose: + print("\nWARNING:\n Two packages of the same type, " + + "Replacing existing " + + "'{}' package.".format(p.name[0])) + self.packagelist[i] = p + return + if self.verbose: + print('adding Package: ', p.name[0]) + self.packagelist.append(p) + + def remove_package(self, pname): + """ + Remove a package from this model + + Parameters + ---------- + pname : string + Name of the package, such as 'RIV', 'BAS6', etc. + + """ + for i, pp in enumerate(self.packagelist): + if pname.upper() in pp.name: + if self.verbose: + print('removing Package: ', pp.name) + + # Remove the package object from the model's packagelist + p = self.packagelist.pop(i) + + # Remove the package unit number from the list of package + # units stored with the model + for iu in p.unit_number: + if iu in self.package_units: + self.package_units.remove(iu) + return + raise StopIteration( + 'Package name ' + pname + ' not found in Package list') + + def __getattr__(self, item): + """ + __getattr__ - syntactic sugar + + Parameters + ---------- + item : str + 3 character package name (case insensitive) or "sr" to access + the SpatialReference instance of the ModflowDis object + + + Returns + ------- + sr : SpatialReference instance + pp : Package object + Package object of type :class:`flopy.pakbase.Package` + + Note + ---- + if self.dis is not None, then the spatial reference instance is updated + using self.dis.delr, self.dis.delc, and self.dis.lenuni before being + returned + """ + if item == 'output_packages' or not hasattr(self, 'output_packages'): + raise AttributeError(item) + + if item == 'sr': + if self.dis is not None: + return self.dis.sr + else: + return None + if item == 'tr': + if self.dis is not None: + return self.dis.tr + else: + return None + if item == "start_datetime": + if self.dis is not None: + return self.dis.start_datetime + else: + return None + + # return self.get_package(item) + # to avoid infinite recursion + if item == "_packagelist" or item == "packagelist": + raise AttributeError(item) + pckg = self.get_package(item) + if pckg is not None or item in self.mfnam_packages: + return pckg + if item == 'modelgrid': + return + raise AttributeError(item) + + def get_ext_dict_attr(self, ext_unit_dict=None, unit=None, filetype=None, + pop_key=True): + iu = None + fname = None + if ext_unit_dict is not None: + for key, value in ext_unit_dict.items(): + if key == unit: + iu = key + fname = os.path.basename(value.filename) + break + elif value.filetype == filetype: + iu = key + fname = os.path.basename(value.filename) + if pop_key: + self.add_pop_key_list(iu) + break + return iu, fname + + def _output_msg(self, i, add=True): + if add: + txt1 = 'Adding' + txt2 = 'to' + else: + txt1 = 'Removing' + txt2 = 'from' + msg = '{} {} '.format(txt1, self.output_fnames[i]) + \ + '(unit={}) '.format(self.output_units[i]) + \ + '{} the output list.'.format(txt2) + print(msg) + + def add_output_file(self, unit, fname=None, extension='cbc', + binflag=True, package=None): + """ + Add an ascii or binary output file for a package + + Parameters + ---------- + unit : int + unit number of external array + fname : str + filename of external array. (default is None) + extension : str + extension to use for the cell-by-cell file. Only used if fname + is None. (default is cbc) + binflag : bool + boolean flag indicating if the output file is a binary file. + Default is True + package : str + string that defines the package the output file is attached to. + Default is None + + """ + add_cbc = False + if unit > 0: + add_cbc = True + # determine if the file is in external_units + if abs(unit) in self.external_units: + idx = self.external_units.index(abs(unit)) + if fname is None: + fname = os.path.basename(self.external_fnames[idx]) + binflag = self.external_binflag[idx] + self.remove_external(unit=abs(unit)) + # determine if the unit exists in the output data + if abs(unit) in self.output_units: + add_cbc = False + idx = self.output_units.index(abs(unit)) + # determine if binflag has changed + if binflag is not self.output_binflag[idx]: + add_cbc = True + if add_cbc: + self.remove_output(unit=abs(unit)) + else: + if package is not None: + self.output_packages[idx].append(package) + + if add_cbc: + if fname is None: + fname = self.name + '.' + extension + # check if this file name exists for a different unit number + if fname in self.output_fnames: + idx = self.output_fnames.index(fname) + iut = self.output_units[idx] + if iut != unit: + # include unit number in fname if package has + # not been passed + if package is None: + fname = self.name + '.{}.'.format(unit) \ + + extension + # include package name in fname + else: + fname = self.name + '.{}.'.format(package) \ + + extension + else: + fname = os.path.basename(fname) + self.add_output(fname, unit, binflag=binflag, package=package) + return + + def add_output(self, fname, unit, binflag=False, package=None): + """ + Assign an external array so that it will be listed as a DATA or + DATA(BINARY) entry in the name file. This will allow an outside + file package to refer to it. + + Parameters + ---------- + fname : str + filename of external array + unit : int + unit number of external array + binflag : boolean + binary or not. (default is False) + + """ + if fname in self.output_fnames: + if self.verbose: + msg = "BaseModel.add_output() warning: " + \ + "replacing existing filename {}".format(fname) + print(msg) + idx = self.output_fnames.index(fname) + if self.verbose: + self._output_msg(idx, add=False) + self.output_fnames.pop(idx) + self.output_units.pop(idx) + self.output_binflag.pop(idx) + self.output_packages.pop(idx) + + self.output_fnames.append(fname) + self.output_units.append(unit) + self.output_binflag.append(binflag) + if package is not None: + self.output_packages.append([package]) + else: + self.output_packages.append([]) + + if self.verbose: + self._output_msg(-1, add=True) + + return + + def remove_output(self, fname=None, unit=None): + """ + Remove an output file from the model by specifying either the + file name or the unit number. + + Parameters + ---------- + fname : str + filename of output array + unit : int + unit number of output array + + """ + if fname is not None: + for i, e in enumerate(self.output_fnames): + if fname in e: + if self.verbose: + self._output_msg(i, add=False) + self.output_fnames.pop(i) + self.output_units.pop(i) + self.output_binflag.pop(i) + self.output_packages.pop(i) + elif unit is not None: + for i, u in enumerate(self.output_units): + if u == unit: + if self.verbose: + self._output_msg(i, add=False) + self.output_fnames.pop(i) + self.output_units.pop(i) + self.output_binflag.pop(i) + self.output_packages.pop(i) + else: + msg = ' either fname or unit must be passed to remove_output()' + raise Exception(msg) + return + + def get_output(self, fname=None, unit=None): + """ + Get an output file from the model by specifying either the + file name or the unit number. + + Parameters + ---------- + fname : str + filename of output array + unit : int + unit number of output array + + """ + if fname is not None: + for i, e in enumerate(self.output_fnames): + if fname in e: + return self.output_units[i] + return None + elif unit is not None: + for i, u in enumerate(self.output_units): + if u == unit: + return self.output_fnames[i] + return None + else: + msg = ' either fname or unit must be passed to get_output()' + raise Exception(msg) + return + + def set_output_attribute(self, fname=None, unit=None, attr=None): + """ + Set a variable in an output file from the model by specifying either + the file name or the unit number and a dictionary with attributes + to change. + + Parameters + ---------- + fname : str + filename of output array + unit : int + unit number of output array + + """ + idx = None + if fname is not None: + for i, e in enumerate(self.output_fnames): + if fname in e: + idx = i + break + return None + elif unit is not None: + for i, u in enumerate(self.output_units): + if u == unit: + idx = i + break + else: + msg = ' either fname or unit must be passed ' + \ + ' to set_output_attribute()' + raise Exception(msg) + if attr is not None: + if idx is not None: + for key, value in attr.items: + if key == 'binflag': + self.output_binflag[idx] = value + elif key == 'fname': + self.output_fnames[idx] = value + elif key == 'unit': + self.output_units[idx] = value + return + + def get_output_attribute(self, fname=None, unit=None, attr=None): + """ + Get a attribute for an output file from the model by specifying either + the file name or the unit number. + + Parameters + ---------- + fname : str + filename of output array + unit : int + unit number of output array + + """ + idx = None + if fname is not None: + for i, e in enumerate(self.output_fnames): + if fname in e: + idx = i + break + return None + elif unit is not None: + for i, u in enumerate(self.output_units): + if u == unit: + idx = i + break + else: + raise Exception( + ' either fname or unit must be passed ' + + ' to set_output_attribute()') + v = None + if attr is not None: + if idx is not None: + if attr == 'binflag': + v = self.output_binflag[idx] + elif attr == 'fname': + v = self.output_fnames[idx] + elif attr == 'unit': + v = self.output_units[idx] + return v + + def add_external(self, fname, unit, binflag=False, output=False): + """ + Assign an external array so that it will be listed as a DATA or + DATA(BINARY) entry in the name file. This will allow an outside + file package to refer to it. + + Parameters + ---------- + fname : str + filename of external array + unit : int + unit number of external array + binflag : boolean + binary or not. (default is False) + + """ + if fname in self.external_fnames: + if self.verbose: + msg = "BaseModel.add_external() warning: " + \ + "replacing existing filename {}".format(fname) + print(msg) + idx = self.external_fnames.index(fname) + self.external_fnames.pop(idx) + self.external_units.pop(idx) + self.external_binflag.pop(idx) + self.external_output.pop(idx) + if unit in self.external_units: + if self.verbose: + msg = "BaseModel.add_external() warning: " + \ + "replacing existing unit {}".format(unit) + print(msg) + idx = self.external_units.index(unit) + self.external_fnames.pop(idx) + self.external_units.pop(idx) + self.external_binflag.pop(idx) + self.external_output.pop(idx) + + self.external_fnames.append(fname) + self.external_units.append(unit) + self.external_binflag.append(binflag) + self.external_output.append(output) + return + + def remove_external(self, fname=None, unit=None): + """ + Remove an external file from the model by specifying either the + file name or the unit number. + + Parameters + ---------- + fname : str + filename of external array + unit : int + unit number of external array + + """ + plist = [] + if fname is not None: + for i, e in enumerate(self.external_fnames): + if fname in e: + plist.append(i) + elif unit is not None: + for i, u in enumerate(self.external_units): + if u == unit: + plist.append(i) + else: + msg = ' either fname or unit must be passed to remove_external()' + raise Exception(msg) + # remove external file + j = 0 + for i in plist: + ipos = i - j + self.external_fnames.pop(ipos) + self.external_units.pop(ipos) + self.external_binflag.pop(ipos) + self.external_output.pop(ipos) + j += 1 + return + + def add_existing_package(self, filename, ptype=None, + copy_to_model_ws=True): + """ + Add an existing package to a model instance. + + Parameters + ---------- + + filename : str + the name of the file to add as a package + ptype : optional + the model package type (e.g. "lpf", "wel", etc). If None, + then the file extension of the filename arg is used + copy_to_model_ws : bool + flag to copy the package file into the model_ws directory. + + Returns + ------- + None + + """ + if ptype is None: + ptype = filename.split('.')[-1] + ptype = str(ptype).upper() + + # for pak in self.packagelist: + # if ptype in pak.name: + # print("BaseModel.add_existing_package() warning: " +\ + # "replacing existing package {0}".format(ptype)) + class Obj(object): + pass + + fake_package = Obj() + fake_package.write_file = lambda: None + fake_package.extra = [''] + fake_package.name = [ptype] + fake_package.extension = [filename.split('.')[-1]] + fake_package.unit_number = [self.next_ext_unit()] + if copy_to_model_ws: + base_filename = os.path.split(filename)[-1] + fake_package.file_name = [base_filename] + shutil.copy2(filename, os.path.join(self.model_ws, base_filename)) + else: + fake_package.file_name = [filename] + fake_package.allowDuplicates = True + self.add_package(fake_package) + + def get_name_file_entries(self): + """ + Get a string representation of the name file. + + Parameters + ---------- + + """ + lines = [] + for p in self.packagelist: + for i in range(len(p.name)): + if p.unit_number[i] == 0: + continue + s = '{:14s} '.format(p.name[i]) + \ + '{:5d} '.format(p.unit_number[i]) + \ + '{}'.format(p.file_name[i]) + if p.extra[i]: + s += ' ' + p.extra[i] + lines.append(s) + return '\n'.join(lines) + '\n' + + def has_package(self, name): + """ + Check if package name is in package list. + + Parameters + ---------- + name : str + Name of the package, 'DIS', 'BAS6', etc. (case-insensitive). + + Returns + ------- + bool + True if package name exists, otherwise False if not found. + + """ + if not name: + raise ValueError('invalid package name') + name = name.upper() + for p in self.packagelist: + for pn in p.name: + if pn.upper() == name: + return True + return False + + def get_package(self, name): + """ + Get a package. + + Parameters + ---------- + name : str + Name of the package, 'RIV', 'LPF', etc. (case-insensitive). + + Returns + ------- + pp : Package object + Package object of type :class:`flopy.pakbase.Package` + + """ + if not name: + raise ValueError('invalid package name') + name = name.upper() + for pp in (self.packagelist): + if pp.name[0].upper() == name: + return pp + return None + + def set_version(self, version): + self.version = version.lower() + + # check that this is a valid model version + if self.version not in list(self.version_types.keys()): + err = 'Error: Unsupported model ' + \ + 'version ({}).'.format(self.version) + \ + ' Valid model versions are:' + for v in list(self.version_types.keys()): + err += ' {}'.format(v) + raise Exception(err) + + # set namefile heading + heading = '# Name file for ' + \ + '{}, '.format(self.version_types[self.version]) + \ + 'generated by Flopy version {}.'.format(__version__) + self.heading = heading + + # set heading for each package + for p in self.get_package_list(): + pak = self.get_package(p) + heading = '# {} package for '.format(pak.name[0]) + \ + '{}, '.format(self.version_types[self.version]) + \ + 'generated by Flopy version {}.'.format(__version__) + + pak.heading = heading + + return None + + def change_model_ws(self, new_pth=None, reset_external=False): + """ + Change the model work space. + + Parameters + ---------- + new_pth : str + Location of new model workspace. If this path does not exist, + it will be created. (default is None, which will be assigned to + the present working directory). + + Returns + ------- + val : list of strings + Can be used to see what packages are in the model, and can then + be used with get_package to pull out individual packages. + + """ + if new_pth is None: + new_pth = os.getcwd() + if not os.path.exists(new_pth): + try: + line = '\ncreating model workspace...\n' + \ + ' {}'.format(new_pth) + print(line) + os.makedirs(new_pth) + except: + line = '\n{} not valid, workspace-folder '.format(new_pth) + raise OSError(line) + # line = '\n{} not valid, workspace-folder '.format(new_pth) + \ + # 'was changed to {}\n'.format(os.getcwd()) + # print(line) + # new_pth = os.getcwd() + + # --reset the model workspace + old_pth = self._model_ws + self._model_ws = new_pth + line = '\nchanging model workspace...\n {}\n'.format(new_pth) + sys.stdout.write(line) + # reset the paths for each package + for pp in (self.packagelist): + pp.fn_path = os.path.join(self.model_ws, pp.file_name[0]) + + # create the external path (if needed) + if hasattr(self, "external_path") and self.external_path is not None \ + and not os.path.exists(os.path.join(self._model_ws, + self.external_path)): + pth = os.path.join(self._model_ws, self.external_path) + os.makedirs(pth) + if reset_external: + self._reset_external(pth, old_pth) + elif reset_external: + self._reset_external(self._model_ws, old_pth) + return None + + def _reset_external(self, pth, old_pth): + new_ext_fnames = [] + for ext_file, output in zip(self.external_fnames, + self.external_output): + # new_ext_file = os.path.join(pth, os.path.split(ext_file)[-1]) + # this is a wicked mess + if output: + # new_ext_file = os.path.join(pth, os.path.split(ext_file)[-1]) + new_ext_file = ext_file + else: + # fpth = os.path.abspath(os.path.join(old_pth, ext_file)) + # new_ext_file = os.path.relpath(fpth, os.path.abspath(pth)) + fdir = os.path.dirname(ext_file) + if fdir == '': + fpth = os.path.abspath(os.path.join(old_pth, ext_file)) + else: + fpth = ext_file + ao = os.path.abspath(os.path.dirname(fpth)) + ep = os.path.abspath(pth) + relp = os.path.relpath(ao, ep) + new_ext_file = os.path.join(relp, os.path.basename(ext_file)) + new_ext_fnames.append(new_ext_file) + self.external_fnames = new_ext_fnames + + @property + def model_ws(self): + return copy.deepcopy(self._model_ws) + + def _set_name(self, value): + """ + Set model name + + Parameters + ---------- + value : str + Name to assign to model. + + """ + self.__name = str(value) + self.namefile = self.__name + '.' + self.namefile_ext + for p in self.packagelist: + for i in range(len(p.extension)): + p.file_name[i] = self.__name + '.' + p.extension[i] + p.fn_path = os.path.join(self.model_ws, p.file_name[0]) + + def __setattr__(self, key, value): + if key == "free_format_input": + # if self.bas6 is not None: + # self.bas6.ifrefm = value + super(BaseModel, self).__setattr__(key, value) + elif key == "name": + self._set_name(value) + elif key == "model_ws": + self.change_model_ws(value) + elif key == "sr": + assert isinstance(value, utils.reference.SpatialReference) + warnings.warn( + "SpatialReference has been deprecated.", + category=DeprecationWarning) + if self.dis is not None: + self.dis.sr = value + else: + raise Exception("cannot set SpatialReference -" + "ModflowDis not found") + elif key == "tr": + assert isinstance(value, + discretization.reference.TemporalReference) + if self.dis is not None: + self.dis.tr = value + else: + raise Exception("cannot set TemporalReference -" + "ModflowDis not found") + elif key == "start_datetime": + if self.dis is not None: + self.dis.start_datetime = value + self.tr.start_datetime = value + else: + raise Exception("cannot set start_datetime -" + "ModflowDis not found") + else: + super(BaseModel, self).__setattr__(key, value) + + def run_model(self, silent=False, pause=False, report=False, + normal_msg='normal termination'): + """ + This method will run the model using subprocess.Popen. + + Parameters + ---------- + silent : boolean + Echo run information to screen (default is True). + pause : boolean, optional + Pause upon completion (default is False). + report : boolean, optional + Save stdout lines to a list (buff) which is returned + by the method . (default is False). + normal_msg : str + Normal termination message used to determine if the + run terminated normally. (default is 'normal termination') + + Returns + ------- + (success, buff) + success : boolean + buff : list of lines of stdout + + """ + + return run_model(self.exe_name, self.namefile, model_ws=self.model_ws, + silent=silent, pause=pause, report=report, + normal_msg=normal_msg) + + def load_results(self): + + print('load_results not implemented') + + return None + + def write_input(self, SelPackList=False, check=False): + """ + Write the input. + + Parameters + ---------- + SelPackList : False or list of packages + + """ + if check: + # run check prior to writing input + self.check(f='{}.chk'.format(self.name), verbose=self.verbose, + level=1) + + # reset the model to free_format if parameter substitution was + # performed on a model load + if self.parameter_load and not self.free_format_input: + if self.verbose: + print('\nResetting free_format_input to True to ' + + 'preserve the precision of the parameter data.') + self.free_format_input = True + + if self.verbose: + print('\nWriting packages:') + + if SelPackList == False: + for p in self.packagelist: + if self.verbose: + print(' Package: ', p.name[0]) + # prevent individual package checks from running after + # model-level package check above + # otherwise checks are run twice + # or the model level check procedure would have to be split up + # or each package would need a check argument, + # or default for package level check would have to be False + try: + p.write_file(check=False) + except TypeError: + p.write_file() + else: + for pon in SelPackList: + for i, p in enumerate(self.packagelist): + if pon in p.name: + if self.verbose: + print(' Package: ', p.name[0]) + try: + p.write_file(check=False) + except TypeError: + p.write_file() + break + if self.verbose: + print(' ') + # write name file + self.write_name_file() + # os.chdir(org_dir) + return + + def write_name_file(self): + """ + Every Package needs its own writenamefile function + + """ + raise Exception( + 'IMPLEMENTATION ERROR: writenamefile must be overloaded') + + def set_model_units(self): + """ + Every model needs its own set_model_units method + + """ + raise Exception( + 'IMPLEMENTATION ERROR: set_model_units must be overloaded') + + @property + def name(self): + """ + Get model name + + Returns + ------- + name : str + name of model + + """ + return copy.deepcopy(self.__name) + + def add_pop_key_list(self, key): + """ + Add a external file unit number to a list that will be used to remove + model output (typically binary) files from ext_unit_dict. + + Parameters + ---------- + key : int + file unit number + + Returns + ------- + + Examples + -------- + + """ + if key not in self.pop_key_list: + self.pop_key_list.append(key) + + def check(self, f=None, verbose=True, level=1): + """ + Check model data for common errors. + + Parameters + ---------- + f : str or file handle + String defining file name or file handle for summary file + of check method output. If a string is passed a file handle + is created. If f is None, check method does not write + results to a summary file. (default is None) + verbose : bool + Boolean flag used to determine if check method results are + written to the screen + level : int + Check method analysis level. If level=0, summary checks are + performed. If level=1, full checks are performed. + + Returns + ------- + None + + Examples + -------- + + >>> import flopy + >>> m = flopy.modflow.Modflow.load('model.nam') + >>> m.check() + """ + + # check instance for model-level check + chk = utils.check(self, f=f, verbose=verbose, level=level) + # check for unit number conflicts + package_units = {} + duplicate_units = {} + for p in self.packagelist: + for i in range(len(p.name)): + if p.unit_number[i] != 0: + if p.unit_number[i] in package_units.values(): + duplicate_units[p.name[i]] = p.unit_number[i] + otherpackage = [k for k, v in package_units.items() + if v == p.unit_number[i]][0] + duplicate_units[otherpackage] = p.unit_number[i] + if len(duplicate_units) > 0: + for k, v in duplicate_units.items(): + chk._add_to_summary('Error', package=k, value=v, + desc='unit number conflict') + else: + chk.passed.append('Unit number conflicts') + + return self._check(chk, level) + + def plot(self, SelPackList=None, **kwargs): + """ + Plot 2-D, 3-D, transient 2-D, and stress period list (MfList) + model input data + + Parameters + ---------- + SelPackList : bool or list + List of of packages to plot. If SelPackList=None all packages + are plotted. (default is None) + **kwargs : dict + filename_base : str + Base file name that will be used to automatically generate file + names for output image files. Plots will be exported as image + files if file_name_base is not None. (default is None) + file_extension : str + Valid matplotlib.pyplot file extension for savefig(). Only used + if filename_base is not None. (default is 'png') + mflay : int + MODFLOW zero-based layer number to return. If None, then all + all layers will be included. (default is None) + kper : int + MODFLOW zero-based stress period number to return. + (default is zero) + key : str + MfList dictionary key. (default is None) + + Returns + ---------- + axes : list + Empty list is returned if filename_base is not None. Otherwise + a list of matplotlib.pyplot.axis are returned. + + See Also + -------- + + Notes + ----- + + Examples + -------- + >>> import flopy + >>> ml = flopy.modflow.Modflow.load('test.nam') + >>> ml.plot() + + """ + from flopy.plot import PlotUtilities + + axes = PlotUtilities._plot_model_helper(self, SelPackList=SelPackList, + **kwargs) + return axes + + def to_shapefile(self, filename, package_names=None, **kwargs): + """ + Wrapper function for writing a shapefile for the model grid. If + package_names is not None, then search through the requested packages + looking for arrays that can be added to the shapefile as attributes + + Parameters + ---------- + filename : string + name of the shapefile to write + package_names : list of package names (e.g. ["dis","lpf"]) + Packages to export data arrays to shapefile. (default is None) + + Returns + ------- + None + + Examples + -------- + >>> import flopy + >>> m = flopy.modflow.Modflow() + >>> m.to_shapefile('model.shp', SelPackList) + + """ + warnings.warn("to_shapefile() is deprecated. use .export()") + self.export(filename, package_names=package_names) + return + + +def run_model(exe_name, namefile, model_ws='./', + silent=False, pause=False, report=False, + normal_msg='normal termination', use_async=False, + cargs=None): + """ + This function will run the model using subprocess.Popen. It + communicates with the model's stdout asynchronously and reports + progress to the screen with timestamps + + Parameters + ---------- + exe_name : str + Executable name (with path, if necessary) to run. + namefile : str + Namefile of model to run. The namefile must be the + filename of the namefile without the path. Namefile can be None + to allow programs that do not require a control file (name file) + to be passed as a command line argument. + model_ws : str + Path to the location of the namefile. (default is the + current working directory - './') + silent : boolean + Echo run information to screen (default is True). + pause : boolean, optional + Pause upon completion (default is False). + report : boolean, optional + Save stdout lines to a list (buff) which is returned + by the method . (default is False). + normal_msg : str or list + Normal termination message used to determine if the + run terminated normally. More than one message can be provided using + a list. (Default is 'normal termination') + use_async : boolean + asynchronously read model stdout and report with timestamps. good for + models that take long time to run. not good for models that run + really fast + cargs : str or list of strings + additional command line arguments to pass to the executable. + Default is None + Returns + ------- + (success, buff) + success : boolean + buff : list of lines of stdout + + """ + success = False + buff = [] + + # convert normal_msg to a list of lower case str for comparison + if isinstance(normal_msg, str): + normal_msg = [normal_msg] + for idx, s in enumerate(normal_msg): + normal_msg[idx] = s.lower() + + # Check to make sure that program and namefile exist + exe = which(exe_name) + if exe is None: + import platform + if platform.system() in 'Windows': + if not exe_name.lower().endswith('.exe'): + exe = which(exe_name + '.exe') + if exe is None: + s = 'The program {} does not exist or is not executable.'.format( + exe_name) + raise Exception(s) + else: + if not silent: + s = 'FloPy is using the following ' + \ + ' executable to run the model: {}'.format(exe) + print(s) + + if namefile is not None: + if not os.path.isfile(os.path.join(model_ws, namefile)): + s = 'The namefile for this model ' + \ + 'does not exists: {}'.format(namefile) + raise Exception(s) + + # simple little function for the thread to target + def q_output(output, q): + for line in iter(output.readline, b''): + q.put(line) + # time.sleep(1) + # output.close() + + # create a list of arguments to pass to Popen + argv = [exe_name] + if namefile is not None: + argv.append(namefile) + + # add additional arguments to Popen arguments + if cargs is not None: + if isinstance(cargs, str): + cargs = [cargs] + for t in cargs: + argv.append(t) + + # run the model with Popen + proc = Popen(argv, stdout=PIPE, stderr=STDOUT, cwd=model_ws) + + if not use_async: + while True: + line = proc.stdout.readline().decode('utf-8') + if line == '' and proc.poll() is not None: + break + if line: + for msg in normal_msg: + if msg in line.lower(): + success = True + break + line = line.rstrip('\r\n') + if not silent: + print(line) + if report: + buff.append(line) + else: + break + return success, buff + + # some tricks for the async stdout reading + q = Queue.Queue() + thread = threading.Thread(target=q_output, args=(proc.stdout, q)) + thread.daemon = True + thread.start() + + failed_words = ["fail", "error"] + last = datetime.now() + lastsec = 0. + while True: + try: + line = q.get_nowait() + except Queue.Empty: + pass + else: + if line == '': + break + line = line.decode().lower().strip() + if line != '': + now = datetime.now() + dt = now - last + tsecs = dt.total_seconds() - lastsec + line = "(elapsed:{0})-->{1}".format(tsecs, line) + lastsec = tsecs + lastsec + buff.append(line) + if not silent: + print(line) + for fword in failed_words: + if fword in line: + success = False + break + if proc.poll() is not None: + break + proc.wait() + thread.join(timeout=1) + buff.extend(proc.stdout.readlines()) + proc.stdout.close() + + for line in buff: + for msg in normal_msg: + if msg in line.lower(): + print("success") + success = True + break + + if pause: + input('Press Enter to continue...') + return success, buff diff --git a/flopy/mf6/data/dfn/utl-tas.dfn b/flopy/mf6/data/dfn/utl-tas.dfn index 2a90f1c468..b3c734c638 100644 --- a/flopy/mf6/data/dfn/utl-tas.dfn +++ b/flopy/mf6/data/dfn/utl-tas.dfn @@ -1,116 +1,116 @@ -# --------------------- gwf ts attributes --------------------- - -block attributes -name time_series_namerecord -type record name time_series_name -shape -reader urword -tagged false -optional false -longname -description xxx - -block attributes -name name -type keyword -shape -reader urword -optional false -longname -description xxx - -block attributes -name time_series_name -type string -shape any1d -tagged false -reader urword -optional false -longname -description Name by which a package references a particular time-array series. The name must be unique among all time-array series used in a package. - -block attributes -name interpolation_methodrecord -type record method interpolation_method -shape -reader urword -tagged false -optional true -longname -description xxx - -block attributes -name method -type keyword -shape -reader urword -optional false -longname -description xxx - -block attributes -name interpolation_method -type string -valid stepwise linear linearend -shape -tagged false -reader urword -optional false -longname -description Interpolation method, which is either STEPWISE or LINEAR. - -block attributes -name sfacrecord -type record sfac sfacval -shape -reader urword -tagged true -optional true -longname -description xxx - -block attributes -name sfac -type keyword -shape -reader urword -optional false -longname -description xxx - -block attributes -name sfacval -type double precision -shape time_series_name -tagged false -reader urword -optional false -longname -description Scale factor, which will multiply all array values in time series. SFAC is an optional attribute; if omitted, SFAC = 1.0. - -# --------------------- gwf ts time --------------------- - -block time -name time_from_model_start -type double precision -block_variable True -in_record true -shape -tagged false -valid -reader urword -optional false -longname -description A numeric time relative to the start of the simulation, in the time unit used in the simulation. Times must be strictly increasing. - -block time -name tas_array -type double precision -tagged false -just_data true -shape (unknown) -reader readarray -optional false -repeating true -longname +# --------------------- gwf ts attributes --------------------- + +block attributes +name time_series_namerecord +type record name time_series_name +shape +reader urword +tagged false +optional false +longname +description xxx + +block attributes +name name +type keyword +shape +reader urword +optional false +longname +description xxx + +block attributes +name time_series_name +type string +shape any1d +tagged false +reader urword +optional false +longname +description Name by which a package references a particular time-array series. The name must be unique among all time-array series used in a package. + +block attributes +name interpolation_methodrecord +type record method interpolation_method +shape +reader urword +tagged false +optional true +longname +description xxx + +block attributes +name method +type keyword +shape +reader urword +optional false +longname +description xxx + +block attributes +name interpolation_method +type string +valid stepwise linear linearend +shape +tagged false +reader urword +optional false +longname +description Interpolation method, which is either STEPWISE or LINEAR. + +block attributes +name sfacrecord +type record sfac sfacval +shape +reader urword +tagged true +optional true +longname +description xxx + +block attributes +name sfac +type keyword +shape +reader urword +optional false +longname +description xxx + +block attributes +name sfacval +type double precision +shape time_series_name +tagged false +reader urword +optional false +longname +description Scale factor, which will multiply all array values in time series. SFAC is an optional attribute; if omitted, SFAC = 1.0. + +# --------------------- gwf ts time --------------------- + +block time +name time_from_model_start +type double precision +block_variable True +in_record true +shape +tagged false +valid +reader urword +optional false +longname +description A numeric time relative to the start of the simulation, in the time unit used in the simulation. Times must be strictly increasing. + +block time +name tas_array +type double precision +tagged false +just_data true +shape (unknown) +reader readarray +optional false +repeating true +longname description An array of numeric, floating-point values, or a constant value, readable by the U2DREL array-reading utility. \ No newline at end of file diff --git a/flopy/mf6/data/dfn/utl-ts.dfn b/flopy/mf6/data/dfn/utl-ts.dfn index 69316d7a17..5f584c59b3 100644 --- a/flopy/mf6/data/dfn/utl-ts.dfn +++ b/flopy/mf6/data/dfn/utl-ts.dfn @@ -1,173 +1,173 @@ -# --------------------- gwf ts attributes --------------------- - -block attributes -name time_series_namerecord -type record names time_series_names -shape -reader urword -tagged false -optional false -longname -description xxx - -block attributes -name names -other_names name -type keyword -shape -reader urword -optional false -longname -description xxx - -block attributes -name time_series_names -type string -shape any1d -tagged false -reader urword -optional false -longname -description Name by which a package references a particular time-array series. The name must be unique among all time-array series used in a package. - -block attributes -name interpolation_methodrecord -type record methods interpolation_method -shape -reader urword -tagged false -optional true -longname -description xxx - -block attributes -name methods -type keyword -shape -reader urword -optional false -longname -description xxx - -block attributes -name interpolation_method -type string -valid stepwise linear linearend -shape time_series_names -tagged false -reader urword -optional false -longname -description Interpolation method, which is either STEPWISE or LINEAR. - -block attributes -name interpolation_methodrecord_single -type record method interpolation_method_single -shape -reader urword -tagged false -optional true -longname -description xxx - -block attributes -name method -type keyword -shape -reader urword -optional false -longname -description xxx - -block attributes -name interpolation_method_single -type string -valid stepwise linear linearend -shape -tagged false -reader urword -optional false -longname -description Interpolation method, which is either STEPWISE or LINEAR. - -block attributes -name sfacrecord -type record sfacs sfacval -shape -reader urword -tagged true -optional true -longname -description xxx - -block attributes -name sfacs -type keyword -shape -reader urword -optional false -longname -description xxx - -block attributes -name sfacval -type double precision -shape = \ - VerbosityLevel.normal.value: - print('WARNING: Stress period value {} in package {} is ' - 'greater than the number of stress periods defined ' - 'in nper.'.format(sp_num + 1, - self.structure.get_package())) - return True - - -class MFData(DataInterface): - """ - Base class for all data. This class contains internal objects and methods - that most end users will not need to access directly. - - Parameters - ---------- - sim_data : MFSimulationData - container class for all data for a MF6 simulation - structure : MFDataStructure - defines the structure of the data - enable : bool - whether this data is currently being used - path : tuple - tuple describing path to the data generally in the format (, - , , ) - dimensions : DataDimensions - object used to retrieve dimension information about data - *args, **kwargs : exists to support different child class parameter sets - with extra init parameters - - Attributes - ---------- - _current_key : str - current key defining specific transient dataset to be accessed - - Methods - ------- - new_simulation(sim_data) - points data object to a new simulation - layer_shape() : tuple - returns the shape of the layered dimensions - - See Also - -------- - - Notes - ----- - - Examples - -------- - - - """ - def __init__(self, sim_data, model_or_sim, structure, enable=True, path=None, - dimensions=None, *args, **kwargs): - # initialize - self._current_key = None - self._valid = True - self._simulation_data = sim_data - self._model_or_sim = model_or_sim - self.structure = structure - self.enabled = enable - self.repeating = False - if path is None: - self._path = structure.path - else: - self._path = path - self._data_name = structure.name - self._data_storage = None - self._data_type = structure.type - self._keyword = '' - if self._simulation_data is not None: - self._data_dimensions = DataDimensions(dimensions, structure) - # build a unique path in the simulation dictionary - self._org_path = self._path - index = 0 - while self._path in self._simulation_data.mfdata: - self._path = self._org_path[:-1] + \ - ('{}_{}'.format(self._org_path[-1], index),) - index += 1 - self._structure_init() - # tie this to the simulation dictionary - sim_data.mfdata[self._path] = self - - def __repr__(self): - return repr(self._get_storage_obj()) - - def __str__(self): - return str(self._get_storage_obj()) - - @property - def array(self): - kwargs = {'array': True} - return self.get_data(apply_mult=True, **kwargs) - - @property - def name(self): - return self.structure.name - - @property - def model(self): - if self._model_or_sim is not None and \ - self._model_or_sim.type == 'Model': - return self._model_or_sim - else: - return None - - @property - def data_type(self): - raise NotImplementedError( - 'must define dat_type in child ' - 'class to use this base class') - - @property - def dtype(self): - raise NotImplementedError( - 'must define dtype in child ' - 'class to use this base class') - - @property - def plotable(self): - raise NotImplementedError( - 'must define plotable in child ' - 'class to use this base class') - - def _resync(self): - model = self.model - if model is not None: - model._mg_resync = True - - @staticmethod - def _tas_info(tas_str): - if isinstance(tas_str, str): - lst_str = tas_str.split(' ') - if len(lst_str) >= 2 and lst_str[0].lower() == 'timearrayseries': - return lst_str[1], lst_str[0] - return None, None - - def export(self, f, **kwargs): - from flopy.export import utils - - if self.data_type == DataType.array2d and len(self.array.shape) == 2 \ - and self.array.shape[1] > 0: - return utils.array2d_export(f, self, **kwargs) - elif self.data_type == DataType.array3d: - return utils.array3d_export(f, self, **kwargs) - elif self.data_type == DataType.transient2d: - return utils.transient2d_export(f, self, **kwargs) - elif self.data_type == DataType.transientlist: - return utils.mflist_export(f, self, **kwargs) - return utils.transient2d_export(f, self, **kwargs) - - def new_simulation(self, sim_data): - self._simulation_data = sim_data - self._data_storage = None - - def find_dimension_size(self, dimension_name): - parent_path = self._path[:-1] - result = self._simulation_data.mfdata.find_in_path(parent_path, - dimension_name) - if result[0] is not None: - return [result[0].get_data()] - else: - return [] - - def aux_var_names(self): - return self.find_dimension_size('auxnames') - - def layer_shape(self): - layers = [] - layer_dims = self.structure.data_item_structures[0] \ - .layer_dims - if len(layer_dims) == 1: - layers.append(self._data_dimensions.get_model_grid(). \ - num_layers()) - else: - for layer in layer_dims: - if layer == 'nlay': - # get the layer size from the model grid - try: - model_grid = self._data_dimensions.get_model_grid() - except Exception as ex: - type_, value_, traceback_ = sys.exc_info() - raise MFDataException(self.structure.get_model(), - self.structure.get_package(), - self.path, - 'getting model grid', - self.structure.name, - inspect.stack()[0][3], - type_, value_, traceback_, None, - self.sim_data.debug, ex) - - if model_grid.grid_type() == DiscretizationType.DISU: - layers.append(1) - else: - num_layers = model_grid.num_layers() - if num_layers is not None: - layers.append(num_layers) - else: - layers.append(1) - else: - # search data dictionary for layer size - layer_size = self.find_dimension_size(layer) - if len(layer_size) == 1: - layers.append(layer_size[0]) - else: - message = 'Unable to find the size of expected layer ' \ - 'dimension {} '.format(layer) - type_, value_, traceback_ = sys.exc_info() - raise MFDataException( - self.structure.get_model(), - self.structure.get_package(), - self.structure.path, 'resolving layer dimensions', - self.structure.name, inspect.stack()[0][3], - type_, value_, traceback_, message, - self._simulation_data.debug) - return tuple(layers) - - def get_description(self, description=None, data_set=None): - if data_set is None: - data_set = self.structure - for data_item in data_set.data_items.values(): - if data_item.type == DatumType.record: - # record within a record, recurse - description = self.get_description(description, data_item) - else: - if data_item.description: - if description: - description = '{}\n{}'.format(description, - data_item.description) - else: - description = data_item.description - return description - - def load(self, first_line, file_handle, block_header, - pre_data_comments=None, external_file_info=None): - self.enabled = True - - def is_valid(self): - # TODO: Implement for each data type - return self._valid - - def _structure_init(self, data_set=None): - if data_set is None: - # Initialize variables - data_set = self.structure - for data_item_struct in data_set.data_item_structures: - if data_item_struct.type == DatumType.record: - # this is a record within a record, recurse - self._structure_init(data_item_struct) - else: - if len(self.structure.data_item_structures) == 1: - # data item name is a keyword to look for - self._keyword = data_item_struct.name - - def _get_constant_formatting_string(self, const_val, layer, data_type, - suffix='\n'): - sim_data = self._simulation_data - const_format = list(sim_data.constant_formatting) - const_format[1] = to_string(const_val, data_type, self._simulation_data, - self._data_dimensions) - return '{}{}'.format(sim_data.indent_string.join(const_format), suffix) - - def _get_aux_var_name(self, aux_var_index): - aux_var_names = self._data_dimensions.package_dim.get_aux_variables() - # TODO: Verify that this works for multi-dimensional layering - return aux_var_names[0][aux_var_index[0]+1] - - def _get_storage_obj(self): - return self._data_storage - - -class MFMultiDimVar(MFData): - def __init__(self, sim_data, model_or_sim, structure, enable=True, - path=None, dimensions=None): - super(MFMultiDimVar, self).__init__(sim_data, model_or_sim, structure, - enable, path, dimensions) - - @property - def data_type(self): - raise NotImplementedError( - 'must define dat_type in child ' - 'class to use this base class') - - @property - def plotable(self): - raise NotImplementedError( - 'must define plotable in child ' - 'class to use this base class') - - def _get_internal_formatting_string(self, layer): - storage = self._get_storage_obj() - if layer is None: - layer_storage = storage.layer_storage.first_item() - else: - layer_storage = storage.layer_storage[layer] - int_format = ['INTERNAL'] - data_type = self.structure.get_datum_type(return_enum_type=True) - if storage.data_structure_type != DataStructureType.recarray: - int_format.append('FACTOR') - if layer_storage.factor is not None: - if data_type == DatumType.integer: - int_format.append(str(int(layer_storage.factor))) - else: - int_format.append(str(layer_storage.factor)) - else: - if data_type == DatumType.double_precision: - int_format.append('1.0') - else: - int_format.append('1') - if layer_storage.iprn is not None: - int_format.append('IPRN') - int_format.append(str(layer_storage.iprn)) - return self._simulation_data.indent_string.join(int_format) - - def _get_external_formatting_string(self, layer, ext_file_action): - storage = self._get_storage_obj() - if layer is None: - layer_storage = storage.layer_storage.first_item() - else: - layer_storage = storage.layer_storage[layer] - # resolve external file path - file_mgmt = self._simulation_data.mfpath - model_name = self._data_dimensions.package_dim.model_dim[0].model_name - ext_file_path = file_mgmt.get_updated_path(layer_storage.fname, - model_name, - ext_file_action) - layer_storage.fname = ext_file_path - ext_format = ['OPEN/CLOSE', "'{}'".format(ext_file_path)] - if storage.data_structure_type != DataStructureType.recarray: - if layer_storage.factor is not None: - data_type = self.structure.get_datum_type(return_enum_type=True) - ext_format.append('FACTOR') - if data_type == DatumType.integer: - ext_format.append(str(int(layer_storage.factor))) - else: - ext_format.append(str(layer_storage.factor)) - if layer_storage.binary: - ext_format.append('(BINARY)') - if layer_storage.iprn is not None: - ext_format.append('IPRN') - ext_format.append(str(layer_storage.iprn)) - return '{}\n'.format( - self._simulation_data.indent_string.join(ext_format)) +from operator import itemgetter +import sys +import inspect +from ..mfbase import MFDataException, MFInvalidTransientBlockHeaderException, \ + FlopyException, VerbosityLevel +from ..data.mfstructure import DatumType +from ..coordinates.modeldimensions import DataDimensions, DiscretizationType +from ...datbase import DataInterface, DataType +from .mfdatastorage import DataStructureType +from .mfdatautil import to_string + + +class MFTransient(object): + """ + Parent class for transient data. This class contains internal objects and + methods that most end users will not need to access directly. + + Parameters + ---------- + *args, **kwargs + Parameters present to support multiple child class interfaces + + Attributes + ---------- + _current_key : str + current key defining specific transient dataset to be accessed + _data_storage : dict + dictionary of DataStorage objects + + Methods + ------- + add_transient_key(transient_key) + verifies the validity of the transient key about to be added + get_data_prep(transient_key) + called prior to the child class getting data. ensures that the data + retrieved will come from the dataset of a specific transient_key + _set_data_prep(transient_key) + called prior to the child class setting data. ensures that the data + set will go to the dataset of a specific transient_key + _get_file_entry_prep(transient_key) + called prior to the child class getting the file entry. ensures that + the file entry only reflects the data from a specific transient_key + _load_prep(first_line, file_handle, block_header, pre_data_comments) + called prior to the child class loading data from a file. figures out + what transient_key to store the data under + _append_list_as_record_prep(record, transient_key) + called prior to the child class appending a list to a record. ensures + that the list gets appended to the record associated with the key + transient_key + _update_record_prep(transient_key) + called prior to the child class updating a record. ensures that the + record being updated is the one associated with the key transient_key + get_active_key_list() : list + returns a list of the active transient keys + _verify_sp(sp_num) : bool + returns true of the stress period sp_num is within the expected range + of stress periods for this model + + See Also + -------- + + Notes + ----- + + Examples + -------- + + + """ + def __init__(self, *args, **kwargs): + self._current_key = None + self._data_storage = None + + def add_transient_key(self, transient_key): + if isinstance(transient_key, int): + self._verify_sp(transient_key) + + def update_transient_key(self, old_transient_key, new_transient_key): + if old_transient_key in self._data_storage: + # replace dictionary key + self._data_storage[new_transient_key] = \ + self._data_storage[old_transient_key] + del self._data_storage[old_transient_key] + if self._current_key == old_transient_key: + # update current key + self._current_key = new_transient_key + + def _transient_setup(self, data_storage): + self._data_storage = data_storage + + def get_data_prep(self, transient_key=0): + if isinstance(transient_key, int): + self._verify_sp(transient_key) + self._current_key = transient_key + if transient_key not in self._data_storage: + self.add_transient_key(transient_key) + + def _set_data_prep(self, data, transient_key=0): + if isinstance(transient_key, int): + self._verify_sp(transient_key) + if isinstance(transient_key, tuple): + self._current_key = transient_key[0] + else: + self._current_key = transient_key + if self._current_key not in self._data_storage: + self.add_transient_key(self._current_key) + + def _get_file_entry_prep(self, transient_key=0): + if isinstance(transient_key, int): + self._verify_sp(transient_key) + self._current_key = transient_key + + def _load_prep(self, block_header): + # transient key is first non-keyword block variable + transient_key = block_header.get_transient_key() + if isinstance(transient_key, int): + if not self._verify_sp(transient_key): + message = 'Invalid transient key "{}" in block' \ + ' "{}"'.format(transient_key, block_header.name) + raise MFInvalidTransientBlockHeaderException(message) + if transient_key not in self._data_storage: + self.add_transient_key(transient_key) + self._current_key = transient_key + + def _append_list_as_record_prep(self, record, transient_key=0): + if isinstance(transient_key, int): + self._verify_sp(transient_key) + self._current_key = transient_key + if transient_key not in self._data_storage: + self.add_transient_key(transient_key) + + def _update_record_prep(self, transient_key=0): + if isinstance(transient_key, int): + self._verify_sp(transient_key) + self._current_key = transient_key + + def get_active_key_list(self): + return sorted(self._data_storage.items(), key=itemgetter(0)) + + def get_active_key_dict(self): + key_dict = {} + for key in self._data_storage.keys(): + key_dict[key] = True + return key_dict + + def _verify_sp(self, sp_num): + if self._path[0].lower() == 'nam': + return True + if not ('tdis', 'dimensions', 'nper') in self._simulation_data.mfdata: + raise FlopyException('Could not find number of stress periods (' + 'nper).') + nper = self._simulation_data.mfdata[('tdis', 'dimensions', 'nper')] + if not (sp_num <= nper.get_data()): + if self._simulation_data.verbosity_level.value >= \ + VerbosityLevel.normal.value: + print('WARNING: Stress period value {} in package {} is ' + 'greater than the number of stress periods defined ' + 'in nper.'.format(sp_num + 1, + self.structure.get_package())) + return True + + +class MFData(DataInterface): + """ + Base class for all data. This class contains internal objects and methods + that most end users will not need to access directly. + + Parameters + ---------- + sim_data : MFSimulationData + container class for all data for a MF6 simulation + structure : MFDataStructure + defines the structure of the data + enable : bool + whether this data is currently being used + path : tuple + tuple describing path to the data generally in the format (, + , , ) + dimensions : DataDimensions + object used to retrieve dimension information about data + *args, **kwargs : exists to support different child class parameter sets + with extra init parameters + + Attributes + ---------- + _current_key : str + current key defining specific transient dataset to be accessed + + Methods + ------- + new_simulation(sim_data) + points data object to a new simulation + layer_shape() : tuple + returns the shape of the layered dimensions + + See Also + -------- + + Notes + ----- + + Examples + -------- + + + """ + def __init__(self, sim_data, model_or_sim, structure, enable=True, path=None, + dimensions=None, *args, **kwargs): + # initialize + self._current_key = None + self._valid = True + self._simulation_data = sim_data + self._model_or_sim = model_or_sim + self.structure = structure + self.enabled = enable + self.repeating = False + if path is None: + self._path = structure.path + else: + self._path = path + self._data_name = structure.name + self._data_storage = None + self._data_type = structure.type + self._keyword = '' + if self._simulation_data is not None: + self._data_dimensions = DataDimensions(dimensions, structure) + # build a unique path in the simulation dictionary + self._org_path = self._path + index = 0 + while self._path in self._simulation_data.mfdata: + self._path = self._org_path[:-1] + \ + ('{}_{}'.format(self._org_path[-1], index),) + index += 1 + self._structure_init() + # tie this to the simulation dictionary + sim_data.mfdata[self._path] = self + + def __repr__(self): + return repr(self._get_storage_obj()) + + def __str__(self): + return str(self._get_storage_obj()) + + @property + def array(self): + kwargs = {'array': True} + return self.get_data(apply_mult=True, **kwargs) + + @property + def name(self): + return self.structure.name + + @property + def model(self): + if self._model_or_sim is not None and \ + self._model_or_sim.type == 'Model': + return self._model_or_sim + else: + return None + + @property + def data_type(self): + raise NotImplementedError( + 'must define dat_type in child ' + 'class to use this base class') + + @property + def dtype(self): + raise NotImplementedError( + 'must define dtype in child ' + 'class to use this base class') + + @property + def plotable(self): + raise NotImplementedError( + 'must define plotable in child ' + 'class to use this base class') + + def _resync(self): + model = self.model + if model is not None: + model._mg_resync = True + + @staticmethod + def _tas_info(tas_str): + if isinstance(tas_str, str): + lst_str = tas_str.split(' ') + if len(lst_str) >= 2 and lst_str[0].lower() == 'timearrayseries': + return lst_str[1], lst_str[0] + return None, None + + def export(self, f, **kwargs): + from flopy.export import utils + + if self.data_type == DataType.array2d and len(self.array.shape) == 2 \ + and self.array.shape[1] > 0: + return utils.array2d_export(f, self, **kwargs) + elif self.data_type == DataType.array3d: + return utils.array3d_export(f, self, **kwargs) + elif self.data_type == DataType.transient2d: + return utils.transient2d_export(f, self, **kwargs) + elif self.data_type == DataType.transientlist: + return utils.mflist_export(f, self, **kwargs) + return utils.transient2d_export(f, self, **kwargs) + + def new_simulation(self, sim_data): + self._simulation_data = sim_data + self._data_storage = None + + def find_dimension_size(self, dimension_name): + parent_path = self._path[:-1] + result = self._simulation_data.mfdata.find_in_path(parent_path, + dimension_name) + if result[0] is not None: + return [result[0].get_data()] + else: + return [] + + def aux_var_names(self): + return self.find_dimension_size('auxnames') + + def layer_shape(self): + layers = [] + layer_dims = self.structure.data_item_structures[0] \ + .layer_dims + if len(layer_dims) == 1: + layers.append(self._data_dimensions.get_model_grid(). \ + num_layers()) + else: + for layer in layer_dims: + if layer == 'nlay': + # get the layer size from the model grid + try: + model_grid = self._data_dimensions.get_model_grid() + except Exception as ex: + type_, value_, traceback_ = sys.exc_info() + raise MFDataException(self.structure.get_model(), + self.structure.get_package(), + self.path, + 'getting model grid', + self.structure.name, + inspect.stack()[0][3], + type_, value_, traceback_, None, + self.sim_data.debug, ex) + + if model_grid.grid_type() == DiscretizationType.DISU: + layers.append(1) + else: + num_layers = model_grid.num_layers() + if num_layers is not None: + layers.append(num_layers) + else: + layers.append(1) + else: + # search data dictionary for layer size + layer_size = self.find_dimension_size(layer) + if len(layer_size) == 1: + layers.append(layer_size[0]) + else: + message = 'Unable to find the size of expected layer ' \ + 'dimension {} '.format(layer) + type_, value_, traceback_ = sys.exc_info() + raise MFDataException( + self.structure.get_model(), + self.structure.get_package(), + self.structure.path, 'resolving layer dimensions', + self.structure.name, inspect.stack()[0][3], + type_, value_, traceback_, message, + self._simulation_data.debug) + return tuple(layers) + + def get_description(self, description=None, data_set=None): + if data_set is None: + data_set = self.structure + for data_item in data_set.data_items.values(): + if data_item.type == DatumType.record: + # record within a record, recurse + description = self.get_description(description, data_item) + else: + if data_item.description: + if description: + description = '{}\n{}'.format(description, + data_item.description) + else: + description = data_item.description + return description + + def load(self, first_line, file_handle, block_header, + pre_data_comments=None, external_file_info=None): + self.enabled = True + + def is_valid(self): + # TODO: Implement for each data type + return self._valid + + def _structure_init(self, data_set=None): + if data_set is None: + # Initialize variables + data_set = self.structure + for data_item_struct in data_set.data_item_structures: + if data_item_struct.type == DatumType.record: + # this is a record within a record, recurse + self._structure_init(data_item_struct) + else: + if len(self.structure.data_item_structures) == 1: + # data item name is a keyword to look for + self._keyword = data_item_struct.name + + def _get_constant_formatting_string(self, const_val, layer, data_type, + suffix='\n'): + sim_data = self._simulation_data + const_format = list(sim_data.constant_formatting) + const_format[1] = to_string(const_val, data_type, self._simulation_data, + self._data_dimensions) + return '{}{}'.format(sim_data.indent_string.join(const_format), suffix) + + def _get_aux_var_name(self, aux_var_index): + aux_var_names = self._data_dimensions.package_dim.get_aux_variables() + # TODO: Verify that this works for multi-dimensional layering + return aux_var_names[0][aux_var_index[0]+1] + + def _get_storage_obj(self): + return self._data_storage + + +class MFMultiDimVar(MFData): + def __init__(self, sim_data, model_or_sim, structure, enable=True, + path=None, dimensions=None): + super(MFMultiDimVar, self).__init__(sim_data, model_or_sim, structure, + enable, path, dimensions) + + @property + def data_type(self): + raise NotImplementedError( + 'must define dat_type in child ' + 'class to use this base class') + + @property + def plotable(self): + raise NotImplementedError( + 'must define plotable in child ' + 'class to use this base class') + + def _get_internal_formatting_string(self, layer): + storage = self._get_storage_obj() + if layer is None: + layer_storage = storage.layer_storage.first_item() + else: + layer_storage = storage.layer_storage[layer] + int_format = ['INTERNAL'] + data_type = self.structure.get_datum_type(return_enum_type=True) + if storage.data_structure_type != DataStructureType.recarray: + int_format.append('FACTOR') + if layer_storage.factor is not None: + if data_type == DatumType.integer: + int_format.append(str(int(layer_storage.factor))) + else: + int_format.append(str(layer_storage.factor)) + else: + if data_type == DatumType.double_precision: + int_format.append('1.0') + else: + int_format.append('1') + if layer_storage.iprn is not None: + int_format.append('IPRN') + int_format.append(str(layer_storage.iprn)) + return self._simulation_data.indent_string.join(int_format) + + def _get_external_formatting_string(self, layer, ext_file_action): + storage = self._get_storage_obj() + if layer is None: + layer_storage = storage.layer_storage.first_item() + else: + layer_storage = storage.layer_storage[layer] + # resolve external file path + file_mgmt = self._simulation_data.mfpath + model_name = self._data_dimensions.package_dim.model_dim[0].model_name + ext_file_path = file_mgmt.get_updated_path(layer_storage.fname, + model_name, + ext_file_action) + layer_storage.fname = ext_file_path + ext_format = ['OPEN/CLOSE', "'{}'".format(ext_file_path)] + if storage.data_structure_type != DataStructureType.recarray: + if layer_storage.factor is not None: + data_type = self.structure.get_datum_type(return_enum_type=True) + ext_format.append('FACTOR') + if data_type == DatumType.integer: + ext_format.append(str(int(layer_storage.factor))) + else: + ext_format.append(str(layer_storage.factor)) + if layer_storage.binary: + ext_format.append('(BINARY)') + if layer_storage.iprn is not None: + ext_format.append('IPRN') + ext_format.append(str(layer_storage.iprn)) + return '{}\n'.format( + self._simulation_data.indent_string.join(ext_format)) diff --git a/flopy/mf6/data/mfdataarray.py b/flopy/mf6/data/mfdataarray.py index 08b3454dab..1406f72c81 100644 --- a/flopy/mf6/data/mfdataarray.py +++ b/flopy/mf6/data/mfdataarray.py @@ -1,1261 +1,1261 @@ -import sys, inspect, copy, os -import numpy as np -from collections import OrderedDict -from ..data.mfstructure import DatumType -from .mfdatastorage import DataStorage, DataStructureType, DataStorageType -from ...utils.datautil import MultiList -from ..mfbase import ExtFileAction, MFDataException -from ..utils.mfenums import DiscretizationType -from ...datbase import DataType -from .mffileaccess import MFFileAccessArray -from .mfdata import MFMultiDimVar, MFTransient - - -class MFArray(MFMultiDimVar): - """ - Provides an interface for the user to access and update MODFLOW array data. - - Parameters - ---------- - sim_data : MFSimulationData - data contained in the simulation - structure : MFDataStructure - describes the structure of the data - data : list or ndarray - actual data - enable : bool - enable/disable the array - path : tuple - path in the data dictionary to this MFArray - dimensions : MFDataDimensions - dimension information related to the model, package, and array - - Attributes - ---------- - data_type : DataType - type of data stored in the scalar - plotable : bool - if the scalar is plotable - dtype : numpy.dtype - the scalar's numpy data type - data : variable - calls get_data with default parameters - - Methods - ------- - new_simulation : (sim_data : MFSimulationData) - initialize MFArray object for a new simulation - supports_layered : bool - Returns whether this MFArray supports layered data - set_layered_data : (layered_data : bool) - Sets whether this MFArray supports layered data - store_as_external_file : (external_file_path : string, layer_num : int, - replace_existing_external : bool) - Stores data from layer "layer_num" to an external file at - "external_file_path". For unlayered data do not pass in "layer". - If layer is not specified all layers will be stored with each layer - as a separate file. If replace_existing_external is set to False, - this method will not do anything if the data is already in an - external file. - store_as_internal_array : (multiplier : float, layer_num : int) - Stores data from layer "layer_num" internally within the MODFLOW file - with a multiplier "multiplier". For unlayered data do not pass in - "layer". - has_data : (layer_num : int) : bool - Returns whether layer "layer_num" has any data associated with it. - For unlayered data do not pass in "layer". - get_data : (layer_num : int) : ndarray - Returns the data associated with layer "layer_num". If "layer_num" is - None, returns all data. - set_data : (data : ndarray/list, multiplier : float, layer_num : int) - Sets the contents of the data at layer "layer_num" to "data" with - multiplier "multiplier". For unlayered - data do not pass in "layer_num". data can have the following formats: - 1) ndarray - numpy ndarray containing all of the data - 2) [data] - python list containing all of the data - 3) val - a single constant value to be used for all of the data - 4) {'filename':filename, 'factor':fct, 'iprn':print, 'data':data} - - dictionary defining external file information - 5) {'data':data, 'factor':fct, 'iprn':print) - dictionary defining - internal information. Data that is layered can also be set by defining - a list with a length equal to the number of layers in the model. - Each layer in the list contains the data as defined in the - formats above: - [layer_1_val, [layer_2_array_vals], - {'filename':file_with_layer_3_data, 'factor':fct, 'iprn':print}] - - load : (first_line : string, file_handle : file descriptor, - block_header : MFBlockHeader, pre_data_comments : MFComment) : - tuple (bool, string) - Loads data from first_line (the first line of data) and open file - file_handle which is pointing to the second line of data. Returns a - tuple with the first item indicating whether all data was read and - the second item being the last line of text read from the file. - get_file_entry : (layer : int) : string - Returns a string containing the data in layer "layer". For unlayered - data do not pass in "layer". - - See Also - -------- - - Notes - ----- - - Examples - -------- - - - """ - def __init__(self, sim_data, model_or_sim, structure, data=None, - enable=True, path=None, dimensions=None): - super(MFArray, self).__init__(sim_data, model_or_sim, structure, enable, path, - dimensions) - if self.structure.layered: - try: - self._layer_shape = self.layer_shape() - except Exception as ex: - type_, value_, traceback_ = sys.exc_info() - raise MFDataException(self.structure.get_model(), - self.structure.get_package(), - self._path, - 'resolving layer dimensions', - self.structure.name, - inspect.stack()[0][3], type_, - value_, traceback_, None, - self._simulation_data.debug, ex) - else: - self._layer_shape = (1,) - if self._layer_shape[0] is None: - self._layer_shape = (1,) - self._data_type = structure.data_item_structures[0].type - try: - shp_ml = MultiList(shape=self._layer_shape) - self._data_storage = self._new_storage(shp_ml.get_total_size() - != 1) - except Exception as ex: - type_, value_, traceback_ = sys.exc_info() - raise MFDataException(structure.get_model(), - structure.get_package(), path, - 'creating storage', structure.name, - inspect.stack()[0][3], - type_, value_, traceback_, None, - sim_data.debug, ex) - self._last_line_info = [] - if self.structure.type == DatumType.integer: - multiplier = [1] - else: - multiplier = [1.0] - if data is not None: - try: - self._get_storage_obj().set_data(data, key=self._current_key, - multiplier=multiplier) - except Exception as ex: - type_, value_, traceback_ = sys.exc_info() - raise MFDataException(self.structure.get_model(), - self.structure.get_package(), - self._path, - 'setting data', - self.structure.name, - inspect.stack()[0][3], type_, - value_, traceback_, None, - self._simulation_data.debug, ex) - - def __setattr__(self, name, value): - if name == '__setstate__': - raise AttributeError(name) - elif name == 'fname': - self._get_storage_obj().layer_storage.first_item().fname = value - elif name == 'factor': - self._get_storage_obj().layer_storage.first_item().factor = value - elif name == 'iprn': - self._get_storage_obj().layer_storage.first_item().iprn = value - elif name == 'binary': - self._get_storage_obj().layer_storage.first_item().binary = value - else: - super(MFArray, self).__setattr__(name, value) - - def __getitem__(self, k): - if isinstance(k, int): - k = (k,) - storage = self._get_storage_obj() - if storage.layered and (isinstance(k, tuple) or isinstance(k, list)): - if not storage.layer_storage.in_shape(k): - comment = 'Could not retrieve layer {} of "{}". There' \ - 'are only {} layers available' \ - '.'.format(k, self.structure.name, - len(storage.layer_storage)) - type_, value_, traceback_ = sys.exc_info() - raise MFDataException(self.structure.get_model(), - self.structure.get_package(), - self._path, - 'getting data', - self.structure.name, - inspect.stack()[0][3], type_, - value_, traceback_, comment, - self._simulation_data.debug) - # for layered data treat k as layer number(s) - return storage.layer_storage[k] - else: - # for non-layered data treat k as an array/list index of the data - if isinstance(k, int): - try: - if len(self._get_data(apply_mult=True).shape) == 1: - return self._get_data(apply_mult=True)[k] - elif self._get_data(apply_mult=True).shape[0] == 1: - return self._get_data(apply_mult=True)[0, k] - elif self._get_data(apply_mult=True).shape[1] == 1: - return self._get_data(apply_mult=True)[k, 0] - except Exception as ex: - type_, value_, traceback_ = sys.exc_info() - raise MFDataException(self.structure.get_model(), - self.structure.get_package(), - self._path, - 'setting data', - self.structure.name, - inspect.stack()[0][3], type_, - value_, traceback_, None, - self._simulation_data.debug, ex) - - comment = 'Unable to resolve index "{}" for ' \ - 'multidimensional data.'.format(k) - type_, value_, traceback_ = sys.exc_info() - raise MFDataException(self.structure.get_model(), - self.structure.get_package(), - self._path, - 'getting data', - self.structure.name, - inspect.stack()[0][3], type_, - value_, traceback_, comment, - self._simulation_data.debug) - else: - try: - if isinstance(k, tuple): - if len(k) == 3: - return self._get_data(apply_mult=True)[k[0], k[1], - k[2]] - elif len(k) == 2: - return self._get_data(apply_mult=True)[k[0], k[1]] - if len(k) == 1: - return self._get_data(apply_mult=True)[k] - else: - return self._get_data(apply_mult=True)[(k,)] - except Exception as ex: - type_, value_, traceback_ = sys.exc_info() - raise MFDataException(self.structure.get_model(), - self.structure.get_package(), - self._path, - 'setting data', - self.structure.name, - inspect.stack()[0][3], type_, - value_, traceback_, None, - self._simulation_data.debug, ex) - - def __setitem__(self, k, value): - storage = self._get_storage_obj() - self._resync() - if storage.layered: - if isinstance(k, int): - k = (k,) - # for layered data treat k as a layer number - try: - storage.layer_storage[k]._set_data(value) - except Exception as ex: - type_, value_, traceback_ = sys.exc_info() - raise MFDataException(self.structure.get_model(), - self.structure.get_package(), - self._path, - 'setting data', - self.structure.name, - inspect.stack()[0][3], type_, - value_, traceback_, None, - self._simulation_data.debug, ex) - - else: - try: - # for non-layered data treat k as an array/list index of the data - a = self._get_data() - a[k] = value - a = a.astype(self._get_data().dtype) - layer_storage = storage.layer_storage.first_item() - self._get_storage_obj()._set_data(a, key=self._current_key, - multiplier=layer_storage.factor) - except Exception as ex: - type_, value_, traceback_ = sys.exc_info() - raise MFDataException(self.structure.get_model(), - self.structure.get_package(), - self._path, - 'setting data', - self.structure.name, - inspect.stack()[0][3], type_, - value_, traceback_, None, - self._simulation_data.debug, ex) - - @property - def data_type(self): - if self.structure.layered: - return DataType.array3d - else: - return DataType.array2d - - @property - def dtype(self): - return self._get_data().dtype.type - - @property - def plotable(self): - if self.model is None: - return False - else: - return True - - def new_simulation(self, sim_data): - super(MFArray, self).new_simulation(sim_data) - self._data_storage = self._new_storage(False) - self._layer_shape = (1,) - - def supports_layered(self): - try: - model_grid = self._data_dimensions.get_model_grid() - except Exception as ex: - type_, value_, traceback_ = sys.exc_info() - raise MFDataException(self.structure.get_model(), - self.structure.get_package(), - self._path, - 'getting model grid', - self.structure.name, - inspect.stack()[0][3], type_, - value_, traceback_, None, - self._simulation_data.debug, ex) - return self.structure.layered and \ - model_grid.grid_type() != DiscretizationType.DISU - - def set_layered_data(self, layered_data): - if layered_data is True and self.structure.layered is False: - if self._data_dimensions.get_model_grid().grid_type() == \ - DiscretizationType.DISU: - comment = 'Layered option not available for unstructured ' \ - 'grid. {}'.format(self._path) - else: - comment = 'Data "{}" does not support layered option. ' \ - '{}'.format(self._data_name, self._path) - type_, value_, traceback_ = sys.exc_info() - raise MFDataException(self.structure.get_model(), - self.structure.get_package(), - self._path, - 'setting layered data', self.structure.name, - inspect.stack()[0][3], type_, value_, - traceback_, comment, - self._simulation_data.debug) - self._get_storage_obj().layered = layered_data - - def make_layered(self): - if self.supports_layered(): - try: - self._get_storage_obj().make_layered() - except Exception as ex: - type_, value_, traceback_ = sys.exc_info() - raise MFDataException(self.structure.get_model(), - self.structure.get_package(), - self._path, - 'making data layered', - self.structure.name, - inspect.stack()[0][3], type_, - value_, traceback_, None, - self._simulation_data.debug, ex) - else: - if self._data_dimensions.get_model_grid().grid_type() == \ - DiscretizationType.DISU: - comment = 'Layered option not available for unstructured ' \ - 'grid. {}'.format(self._path) - else: - comment = 'Data "{}" does not support layered option. ' \ - '{}'.format(self._data_name, self._path) - type_, value_, traceback_ = sys.exc_info() - raise MFDataException(self.structure.get_model(), - self.structure.get_package(), - self._path, - 'converting data to layered', - self.structure.name, - inspect.stack()[0][3], type_, value_, - traceback_, comment, - self._simulation_data.debug) - - def store_as_external_file(self, external_file_path, layer=None, - binary=False, - replace_existing_external=True): - storage = self._get_storage_obj() - if storage is None: - self._set_storage_obj(self._new_storage(False, True)) - storage = self._get_storage_obj() - # build list of layers - if layer is None: - layer_list = [] - for index in range(0, storage.layer_storage.get_total_size()): - if replace_existing_external or \ - storage.layer_storage[index].data_storage_type == \ - DataStorageType.internal_array: - layer_list.append(index) - else: - if replace_existing_external or \ - storage.layer_storage[layer].data_storage_type == \ - DataStorageType.internal_array: - layer_list = [layer] - else: - layer_list = [] - - # store data from each layer in a separate file - for current_layer in layer_list: - # determine external file name for layer - if len(layer_list) > 0: - fname, ext = os.path.splitext(external_file_path) - if len(layer_list) == 1: - file_path = '{}{}'.format(fname, ext) - else: - file_path = '{}_layer{}{}'.format(fname, current_layer + 1, - ext) - else: - file_path = external_file_path - if isinstance(current_layer, int): - current_layer = (current_layer,) - # get the layer's data - data = self._get_data(current_layer, True) - if data is None: - # do not write empty data to an external file - continue - if isinstance(data, str) and self._tas_info(data)[0] is not \ - None: - # data must not be time array series information - continue - if storage.get_data_dimensions(current_layer)[0] == -9999: - # data must have well defined dimensions to make external - continue - try: - # store layer's data in external file - factor = storage.layer_storage[current_layer].factor - external_data = {'filename': file_path, - 'data': self._get_data(current_layer, True), - 'factor': factor, - 'binary': binary} - self._set_data(external_data, layer=current_layer) - except Exception as ex: - type_, value_, traceback_ = sys.exc_info() - raise MFDataException(self.structure.get_model(), - self.structure.get_package(), - self._path, - 'storing data in external file ' - '{}'.format(external_file_path), - self.structure.name, - inspect.stack()[0][3], type_, - value_, traceback_, None, - self._simulation_data.debug, ex) - - def has_data(self, layer=None): - storage = self._get_storage_obj() - if storage is None: - return False - if isinstance(layer, int): - layer = (layer,) - try: - return storage.has_data(layer) - except Exception as ex: - type_, value_, traceback_ = sys.exc_info() - raise MFDataException(self.structure.get_model(), - self.structure.get_package(), - self._path, - 'checking for data', - self.structure.name, - inspect.stack()[0][3], type_, - value_, traceback_, None, - self._simulation_data.debug, ex) - - @property - def data(self): - return self._get_data() - - def get_data(self, layer=None, apply_mult=False, **kwargs): - return self._get_data(layer, apply_mult, **kwargs) - - def _get_data(self, layer=None, apply_mult=False, **kwargs): - if self._get_storage_obj() is None: - self._data_storage = self._new_storage(False) - if isinstance(layer, int): - layer = (layer,) - storage = self._get_storage_obj() - if storage is not None: - try: - data = storage.get_data(layer, apply_mult) - if 'array' in kwargs and kwargs['array'] \ - and isinstance(self, MFTransientArray): - data = np.expand_dims(data, 0) - return data - except Exception as ex: - type_, value_, traceback_ = sys.exc_info() - raise MFDataException(self.structure.get_model(), - self.structure.get_package(), - self._path, - 'getting data', - self.structure.name, - inspect.stack()[0][3], type_, - value_, traceback_, None, - self._simulation_data.debug, ex) - return None - - def set_data(self, data, multiplier=None, layer=None): - self._set_data(data, multiplier, layer) - - def _set_data(self, data, multiplier=None, layer=None): - self._resync() - if self._get_storage_obj() is None: - self._data_storage = self._new_storage(False) - if multiplier is None: - multiplier = [self._get_storage_obj().get_default_mult()] - if isinstance(layer, int): - layer = (layer,) - if isinstance(data, str): - # check to see if this is a time series array - tas_name, tas_label = self._tas_info(data) - if tas_name is not None: - # verify and save as time series array - self._get_storage_obj().set_tas(tas_name, tas_label, - self._current_key) - return - - storage = self._get_storage_obj() - if self.structure.name == 'aux' and layer is None: - if isinstance(data, dict): - aux_data = copy.deepcopy(data['data']) - else: - aux_data = data - # make a list out of a single item - if isinstance(aux_data, int) or \ - isinstance(aux_data, float) or \ - isinstance(aux_data, str): - aux_data = [[aux_data]] - # handle special case of aux variables in an array - self.layered = True - aux_var_names = self._data_dimensions.\ - package_dim.get_aux_variables() - if len(aux_data) == len(aux_var_names[0]) - 1: - for layer, aux_var_data in enumerate(aux_data): - if layer > 0 and \ - layer >= storage.layer_storage.get_total_size(): - storage.add_layer() - if isinstance(data, dict): - # put layer data back in dictionary - layer_data = data - layer_data['data'] = aux_var_data - else: - layer_data = aux_var_data - try: - storage.set_data(layer_data, [layer], multiplier, - self._current_key) - except Exception as ex: - type_, value_, traceback_ = sys.exc_info() - raise MFDataException(self.structure.get_model(), - self.structure.get_package(), - self._path, - 'setting data', - self.structure.name, - inspect.stack()[0][3], type_, - value_, traceback_, None, - self._simulation_data.debug, ex) - else: - message = 'Unable to set data for aux variable. ' \ - 'Expected {} aux variables but got ' \ - '{}.'.format(len(aux_var_names[0]), - len(data)) - type_, value_, traceback_ = sys.exc_info() - raise MFDataException( - self._data_dimensions.structure.get_model(), - self._data_dimensions.structure.get_package(), - self._data_dimensions.structure.path, - 'setting aux variables', - self._data_dimensions.structure.name, - inspect.stack()[0][3], type_, value_, traceback_, - message, self._simulation_data.debug) - else: - try: - storage.set_data(data, layer, multiplier, - key=self._current_key) - except Exception as ex: - type_, value_, traceback_ = sys.exc_info() - raise MFDataException(self.structure.get_model(), - self.structure.get_package(), - self._path, - 'setting data', - self.structure.name, - inspect.stack()[0][3], type_, - value_, traceback_, None, - self._simulation_data.debug, ex) - self._layer_shape = storage.layer_storage.list_shape - - def load(self, first_line, file_handle, block_header, - pre_data_comments=None, external_file_info=None): - super(MFArray, self).load(first_line, file_handle, block_header, - pre_data_comments=None, - external_file_info=None) - self._resync() - if self.structure.layered: - try: - model_grid = self._data_dimensions.get_model_grid() - except Exception as ex: - type_, value_, traceback_ = sys.exc_info() - raise MFDataException(self.structure.get_model(), - self.structure.get_package(), - self._path, - 'getting model grid', - self.structure.name, - inspect.stack()[0][3], type_, - value_, traceback_, None, - self._simulation_data.debug, ex) - if self._layer_shape[-1] != model_grid.num_layers(): - if model_grid.grid_type() == DiscretizationType.DISU: - self._layer_shape = (1,) - else: - self._layer_shape = (model_grid.num_layers(),) - if self._layer_shape[-1] is None: - self._layer_shape = (1,) - shape_ml = MultiList(shape=self._layer_shape) - self._set_storage_obj(self._new_storage( - shape_ml.get_total_size() != 1, True)) - file_access = MFFileAccessArray(self.structure, self._data_dimensions, - self._simulation_data, self._path, - self._current_key) - storage = self._get_storage_obj() - self._layer_shape, return_val = file_access.load_from_package( - first_line, file_handle, self._layer_shape, storage, - self._keyword, pre_data_comments=None) - if external_file_info is not None: - storage.point_to_existing_external_file( - external_file_info, storage.layer_storage.get_total_size() - 1) - - return return_val - - def _is_layered_aux(self): - # determine if this is the special aux variable case - if self.structure.name.lower() == 'aux' and \ - self._get_storage_obj().layered: - return True - else: - return False - - def get_file_entry(self, layer=None, - ext_file_action=ExtFileAction.copy_relative_paths): - return self._get_file_entry(layer, ext_file_action) - - def _get_file_entry(self, layer=None, - ext_file_action=ExtFileAction.copy_relative_paths): - if isinstance(layer, int): - layer = (layer,) - data_storage = self._get_storage_obj() - if data_storage is None or \ - data_storage.layer_storage.get_total_size() == 0 \ - or not data_storage.has_data(): - return '' - - layered_aux = self._is_layered_aux() - - # prepare indent - indent = self._simulation_data.indent_string - shape_ml = MultiList(shape=self._layer_shape) - if shape_ml.get_total_size() == 1: - data_indent = indent - else: - data_indent = '{}{}'.format(indent, - self._simulation_data.indent_string) - - file_entry_array = [] - if data_storage.data_structure_type == DataStructureType.scalar: - # scalar data, like in the case of a time array series gets written - # on a single line - try: - data = data_storage.get_data() - except Exception as ex: - type_, value_, traceback_ = sys.exc_info() - raise MFDataException(self.structure.get_model(), - self.structure.get_package(), - self._path, - 'getting data', - self.structure.name, - inspect.stack()[0][3], type_, - value_, traceback_, None, - self._simulation_data.debug, ex) - if self.structure.data_item_structures[0].numeric_index or \ - self.structure.data_item_structures[0].is_cellid: - # for cellid and numeric indices convert from 0 base to 1 based - data = abs(data) + 1 - file_entry_array.append('{}{}{}{}\n'.format(indent, - self.structure.name, - indent, - data)) - elif data_storage.layered: - if not layered_aux: - if not self.structure.data_item_structures[0].just_data: - name = self.structure.name - file_entry_array.append('{}{}{}{}\n'.format(indent, name, - indent, - 'LAYERED')) - else: - file_entry_array.append('{}{}\n'.format(indent, 'LAYERED')) - - if layer is None: - layer_min = shape_ml.first_index() - layer_max = copy.deepcopy(self._layer_shape) - else: - # set layer range - if not shape_ml.in_shape(layer): - comment = 'Layer {} for variable "{}" does not exist' \ - '.'.format(layer, self._data_name) - type_, value_, traceback_ = sys.exc_info() - raise MFDataException(self.structure.get_model(), - self.structure.get_package(), - self._path, - 'getting file entry', - self.structure.name, - inspect.stack()[0][3], type_, value_, - traceback_, comment, - self._simulation_data.debug) - - layer_min = layer - layer_max = shape_ml.inc_shape_idx(layer) - for layer in shape_ml.indexes(layer_min, layer_max): - file_entry_array.append( - self._get_file_entry_layer(layer, data_indent, - data_storage.layer_storage[ - layer].data_storage_type, - ext_file_action, - layered_aux)) - else: - # data is not layered - if not self.structure.data_item_structures[0].just_data: - if self._data_name == 'aux': - file_entry_array.append('{}{}\n'.format( - indent, self._get_aux_var_name([0]))) - else: - file_entry_array.append('{}{}\n'.format(indent, - self.structure.name)) - - data_storage_type = data_storage.layer_storage[0].data_storage_type - file_entry_array.append( - self._get_file_entry_layer(None, data_indent, - data_storage_type, - ext_file_action)) - - return ''.join(file_entry_array) - - def _new_storage(self, set_layers=True, base_storage=False, - stress_period=0): - if set_layers: - return DataStorage(self._simulation_data, self._model_or_sim, - self._data_dimensions, self._get_file_entry, - DataStorageType.internal_array, - DataStructureType.ndarray, self._layer_shape, - stress_period=stress_period, - data_path=self._path) - else: - return DataStorage(self._simulation_data, self._model_or_sim, - self._data_dimensions, self._get_file_entry, - DataStorageType.internal_array, - DataStructureType.ndarray, - stress_period=stress_period, - data_path=self._path) - - def _get_storage_obj(self): - return self._data_storage - - def _set_storage_obj(self, storage): - self._data_storage = storage - - def _get_file_entry_layer(self, layer, data_indent, storage_type, - ext_file_action, layered_aux=False): - if not self.structure.data_item_structures[0].just_data and \ - not layered_aux: - indent_string = '{}{}'.format(self._simulation_data.indent_string, - self._simulation_data.indent_string) - else: - indent_string = self._simulation_data.indent_string - - file_entry = '' - if layered_aux: - try: - # display aux name - file_entry = '{}{}\n'.format(indent_string, - self._get_aux_var_name(layer)) - except Exception as ex: - type_, value_, traceback_ = sys.exc_info() - raise MFDataException(self.structure.get_model(), - self.structure.get_package(), - self._path, - 'getting aux variables', - self.structure.name, - inspect.stack()[0][3], type_, - value_, traceback_, None, - self._simulation_data.debug, ex) - indent_string = '{}{}'.format(indent_string, - self._simulation_data.indent_string) - - data_storage = self._get_storage_obj() - if storage_type == DataStorageType.internal_array: - # internal data header + data - format_str = self._get_internal_formatting_string(layer).upper() - lay_str = self._get_data_layer_string(layer, data_indent).upper() - file_entry = '{}{}{}\n{}'.format(file_entry, indent_string, - format_str, lay_str) - elif storage_type == DataStorageType.internal_constant: - # constant data - try: - const_val = data_storage.get_const_val(layer) - except Exception as ex: - type_, value_, traceback_ = sys.exc_info() - raise MFDataException(self.structure.get_model(), - self.structure.get_package(), - self._path, - 'getting constant value', - self.structure.name, - inspect.stack()[0][3], type_, - value_, traceback_, None, - self._simulation_data.debug, ex) - const_str = self._get_constant_formatting_string( - const_val, layer, self._data_type).upper() - file_entry = '{}{}{}'.format(file_entry, indent_string, - const_str) - else: - # external data - ext_str = self._get_external_formatting_string(layer, - ext_file_action) - file_entry = '{}{}{}'.format(file_entry, indent_string, - ext_str) - # add to active list of external files - try: - file_path = data_storage.get_external_file_path(layer) - except Exception as ex: - type_, value_, traceback_ = sys.exc_info() - comment = 'Could not get external file path for layer ' \ - '"{}"'.format(layer), - raise MFDataException(self.structure.get_model(), - self.structure.get_package(), - self._path, - 'getting external file path', - self.structure.name, - inspect.stack()[0][3], type_, - value_, traceback_, comment, - self._simulation_data.debug, ex) - package_dim = self._data_dimensions.package_dim - model_name = package_dim.model_dim[0].model_name - self._simulation_data.mfpath.add_ext_file(file_path, model_name) - return file_entry - - def _get_data_layer_string(self, layer, data_indent): - # iterate through data layer - try: - data = self._get_storage_obj().get_data(layer, False) - except Exception as ex: - type_, value_, traceback_ = sys.exc_info() - comment = 'Could not get data for layer "{}"'.format(layer) - raise MFDataException(self.structure.get_model(), - self.structure.get_package(), - self._path, - 'getting data', - self.structure.name, - inspect.stack()[0][3], type_, - value_, traceback_, comment, - self._simulation_data.debug, ex) - file_access = MFFileAccessArray(self.structure, self._data_dimensions, - self._simulation_data, self._path, - self._current_key) - return file_access.get_data_string(data, self._data_type, data_indent) - - def _resolve_layer_index(self, layer, allow_multiple_layers=False): - # handle layered vs non-layered data - storage = self._get_storage_obj() - if storage.layered: - if layer is None: - if storage.layer_storage.get_total_size() == 1: - layer_index = [0] - elif allow_multiple_layers: - layer_index = storage.get_active_layer_indices() - else: - comment = 'Data "{}" is layered but no ' \ - 'layer_num was specified' \ - '.'.format(self._data_name) - type_, value_, traceback_ = sys.exc_info() - raise MFDataException(self.structure.get_model(), - self.structure.get_package(), - self._path, - 'resolving layer index', - self.structure.name, - inspect.stack()[0][3], type_, value_, - traceback_, comment, - self._simulation_data.debug) - - else: - layer_index = [layer] - else: - layer_index = [[0]] - return layer_index - - def _verify_data(self, data_iter, layer_num): - # TODO: Implement - return True - - def plot(self, filename_base=None, file_extension=None, mflay=None, - fignum=None, title=None, **kwargs): - """ - Plot 3-D model input data - - Parameters - ---------- - filename_base : str - Base file name that will be used to automatically generate file - names for output image files. Plots will be exported as image - files if file_name_base is not None. (default is None) - file_extension : str - Valid matplotlib.pyplot file extension for savefig(). Only used - if filename_base is not None. (default is 'png') - mflay : int - MODFLOW zero-based layer number to return. If None, then all - all layers will be included. (default is None) - **kwargs : dict - axes : list of matplotlib.pyplot.axis - List of matplotlib.pyplot.axis that will be used to plot - data for each layer. If axes=None axes will be generated. - (default is None) - pcolor : bool - Boolean used to determine if matplotlib.pyplot.pcolormesh - plot will be plotted. (default is True) - colorbar : bool - Boolean used to determine if a color bar will be added to - the matplotlib.pyplot.pcolormesh. Only used if pcolor=True. - (default is False) - inactive : bool - Boolean used to determine if a black overlay in inactive - cells in a layer will be displayed. (default is True) - contour : bool - Boolean used to determine if matplotlib.pyplot.contour - plot will be plotted. (default is False) - clabel : bool - Boolean used to determine if matplotlib.pyplot.clabel - will be plotted. Only used if contour=True. (default is False) - grid : bool - Boolean used to determine if the model grid will be plotted - on the figure. (default is False) - masked_values : list - List of unique values to be excluded from the plot. - - Returns - ---------- - out : list - Empty list is returned if filename_base is not None. Otherwise - a list of matplotlib.pyplot.axis is returned. - """ - from flopy.plot import PlotUtilities - - if not self.plotable: - raise TypeError("Simulation level packages are not plotable") - - if len(self.array.shape) == 2: - axes = PlotUtilities._plot_util2d_helper(self, - title=title, - filename_base=filename_base, - file_extension=file_extension, - fignum=fignum, - **kwargs) - elif len(self.array.shape) == 3: - axes = PlotUtilities._plot_util3d_helper(self, - filename_base=filename_base, - file_extension=file_extension, - mflay=mflay, - fignum=fignum, - **kwargs) - else: - axes = None - - return axes - - -class MFTransientArray(MFArray, MFTransient): - """ - Provides an interface for the user to access and update MODFLOW transient - array data. - - Parameters - ---------- - sim_data : MFSimulationData - data contained in the simulation - structure : MFDataStructure - describes the structure of the data - data : list or ndarray - actual data - enable : bool - enable/disable the array - path : tuple - path in the data dictionary to this MFArray - dimensions : MFDataDimensions - dimension information related to the model, package, and array - - Methods - ------- - add_transient_key : (transient_key : int) - Adds a new transient time allowing data for that time to be stored and - retrieved using the key "transient_key" - get_data : (layer_num : int, key : int) : ndarray - Returns the data associated with layer "layer_num" during time "key". - If "layer_num" is None, returns all data for time "key". - set_data : (data : ndarray/list, multiplier : float, layer_num : int, - key : int) - Sets the contents of the data at layer "layer_num" and time "key" to - "data" with multiplier "multiplier". For unlayered data do not pass - in "layer_num". - load : (first_line : string, file_handle : file descriptor, - block_header : MFBlockHeader, pre_data_comments : MFComment) : - tuple (bool, string) - Loads data from first_line (the first line of data) and open file - handle which is pointing to the second line of data. Returns a - tuple with the first item indicating whether all data was read - and the second item being the last line of text read from the file. - get_file_entry : (layer : int, key : int) : string - Returns a string containing the data in layer "layer" at time "key". - For unlayered data do not pass in "layer". - - See Also - -------- - - Notes - ----- - - Examples - -------- - - - """ - def __init__(self, sim_data, model_or_sim, structure, enable=True, - path=None, dimensions=None): - super(MFTransientArray, self).__init__(sim_data=sim_data, - model_or_sim=model_or_sim, - structure=structure, - data=None, - enable=enable, - path=path, - dimensions=dimensions) - self._transient_setup(self._data_storage) - self.repeating = True - - @property - def data_type(self): - return DataType.transient2d - - def remove_transient_key(self, transient_key): - if transient_key in self._data_storage: - del self._data_storage[transient_key] - - def add_transient_key(self, transient_key): - super(MFTransientArray, self).add_transient_key(transient_key) - self._data_storage[transient_key] = \ - super(MFTransientArray, self)._new_storage(stress_period= - transient_key) - - def store_as_external_file(self, external_file_path, layer=None, - binary=False, - replace_existing_external=True): - sim_time = self._data_dimensions.package_dim.model_dim[ - 0].simulation_time - num_sp = sim_time.get_num_stress_periods() - # store each stress period in separate file(s) - for sp in range(0, num_sp): - if sp in self._data_storage: - self._current_key = sp - layer_storage = self._get_storage_obj().layer_storage - if layer_storage.get_total_size() > 0 and \ - self._get_storage_obj().layer_storage[0].\ - layer_storage_type != \ - DataStorageType.external_file: - fname, ext = os.path.splitext(external_file_path) - full_name = '{}_{}{}'.format(fname, sp+1, ext) - super(MFTransientArray, self).\ - store_as_external_file(full_name, layer, binary, - replace_existing_external) - - def get_data(self, layer=None, apply_mult=True, **kwargs): - if self._data_storage is not None and len(self._data_storage) > 0: - if layer is None: - output = None - sim_time = self._data_dimensions.package_dim.model_dim[ - 0].simulation_time - num_sp = sim_time.get_num_stress_periods() - if 'array' in kwargs: - data = None - for sp in range(0, num_sp): - if sp in self._data_storage: - self.get_data_prep(sp) - data = super(MFTransientArray, self).get_data( - apply_mult=apply_mult, **kwargs) - data = np.expand_dims(data, 0) - else: - if data is None: - # get any data - self.get_data_prep(self._data_storage.key()[0]) - data = super(MFTransientArray, self).get_data( - apply_mult=apply_mult, **kwargs) - data = np.expand_dims(data, 0) - if self.structure.type == DatumType.integer: - data = np.full_like(data, 0) - else: - data = np.full_like(data, 0.0) - if output is None: - output = data - else: - output = np.concatenate((output, data)) - return output - else: - for sp in range(0, num_sp): - data = None - if sp in self._data_storage: - self.get_data_prep(sp) - data = super(MFTransientArray, self).get_data( - apply_mult=apply_mult, **kwargs) - if output is None: - if 'array' in kwargs: - output = [data] - else: - output = {sp: data} - else: - if 'array' in kwargs: - output.append(data) - else: - output[sp] = data - return output - else: - self.get_data_prep(layer) - return super(MFTransientArray, self).get_data( - apply_mult=apply_mult) - else: - return None - - def set_data(self, data, multiplier=None, layer=None, key=None): - if isinstance(data, dict) or isinstance(data, OrderedDict): - # each item in the dictionary is a list for one stress period - # the dictionary key is the stress period the list is for - del_keys = [] - for key, list_item in data.items(): - if list_item is None: - self.remove_transient_key(key) - del_keys.append(key) - else: - self._set_data_prep(list_item, key) - super(MFTransientArray, self).set_data(list_item, - multiplier, layer) - for key in del_keys: - del data[key] - else: - if key is None: - # search for a key - new_key_index = self.structure.first_non_keyword_index() - if new_key_index is not None and hasattr(data, '__len__') and \ - len(data) > new_key_index: - key = data[new_key_index] - else: - key = 0 - if data is None: - self.remove_transient_key(key) - else: - self._set_data_prep(data, key) - super(MFTransientArray, self).set_data(data, multiplier, - layer) - - def get_file_entry(self, key=0, - ext_file_action=ExtFileAction.copy_relative_paths): - self._get_file_entry_prep(key) - return super(MFTransientArray, self).get_file_entry(ext_file_action= - ext_file_action) - - def load(self, first_line, file_handle, block_header, - pre_data_comments=None, external_file_info=None): - self._load_prep(block_header) - return super(MFTransientArray, self).load(first_line, file_handle, - pre_data_comments, - external_file_info) - - def _new_storage(self, set_layers=True, base_storage=False, - stress_period=0): - if base_storage: - if not isinstance(stress_period, int): - stress_period = 1 - return super(MFTransientArray, self)._new_storage(set_layers, - base_storage, - stress_period) - else: - return OrderedDict() - - def _set_storage_obj(self, storage): - self._data_storage[self._current_key] = storage - - def _get_storage_obj(self): - if self._current_key is None or \ - self._current_key not in self._data_storage: - return None - return self._data_storage[self._current_key] - - def plot(self, kper=None, filename_base=None, file_extension=None, - mflay=None, fignum=None, **kwargs): - """ - Plot transient array model input data - - Parameters - ---------- - transient2d : flopy.utils.util_array.Transient2D object - filename_base : str - Base file name that will be used to automatically generate file - names for output image files. Plots will be exported as image - files if file_name_base is not None. (default is None) - file_extension : str - Valid matplotlib.pyplot file extension for savefig(). Only used - if filename_base is not None. (default is 'png') - **kwargs : dict - axes : list of matplotlib.pyplot.axis - List of matplotlib.pyplot.axis that will be used to plot - data for each layer. If axes=None axes will be generated. - (default is None) - pcolor : bool - Boolean used to determine if matplotlib.pyplot.pcolormesh - plot will be plotted. (default is True) - colorbar : bool - Boolean used to determine if a color bar will be added to - the matplotlib.pyplot.pcolormesh. Only used if pcolor=True. - (default is False) - inactive : bool - Boolean used to determine if a black overlay in inactive - cells in a layer will be displayed. (default is True) - contour : bool - Boolean used to determine if matplotlib.pyplot.contour - plot will be plotted. (default is False) - clabel : bool - Boolean used to determine if matplotlib.pyplot.clabel - will be plotted. Only used if contour=True. (default is False) - grid : bool - Boolean used to determine if the model grid will be plotted - on the figure. (default is False) - masked_values : list - List of unique values to be excluded from the plot. - kper : str - MODFLOW zero-based stress period number to return. If - kper='all' then data for all stress period will be - extracted. (default is zero). - - Returns - ---------- - axes : list - Empty list is returned if filename_base is not None. Otherwise - a list of matplotlib.pyplot.axis is returned. - """ - from flopy.plot.plotutil import PlotUtilities - - if not self.plotable: - raise TypeError("Simulation level packages are not plotable") - - axes = PlotUtilities._plot_transient2d_helper(self, - filename_base=filename_base, - file_extension=file_extension, - kper=kper, - fignum=fignum, - **kwargs) +import sys, inspect, copy, os +import numpy as np +from collections import OrderedDict +from ..data.mfstructure import DatumType +from .mfdatastorage import DataStorage, DataStructureType, DataStorageType +from ...utils.datautil import MultiList +from ..mfbase import ExtFileAction, MFDataException +from ..utils.mfenums import DiscretizationType +from ...datbase import DataType +from .mffileaccess import MFFileAccessArray +from .mfdata import MFMultiDimVar, MFTransient + + +class MFArray(MFMultiDimVar): + """ + Provides an interface for the user to access and update MODFLOW array data. + + Parameters + ---------- + sim_data : MFSimulationData + data contained in the simulation + structure : MFDataStructure + describes the structure of the data + data : list or ndarray + actual data + enable : bool + enable/disable the array + path : tuple + path in the data dictionary to this MFArray + dimensions : MFDataDimensions + dimension information related to the model, package, and array + + Attributes + ---------- + data_type : DataType + type of data stored in the scalar + plotable : bool + if the scalar is plotable + dtype : numpy.dtype + the scalar's numpy data type + data : variable + calls get_data with default parameters + + Methods + ------- + new_simulation : (sim_data : MFSimulationData) + initialize MFArray object for a new simulation + supports_layered : bool + Returns whether this MFArray supports layered data + set_layered_data : (layered_data : bool) + Sets whether this MFArray supports layered data + store_as_external_file : (external_file_path : string, layer_num : int, + replace_existing_external : bool) + Stores data from layer "layer_num" to an external file at + "external_file_path". For unlayered data do not pass in "layer". + If layer is not specified all layers will be stored with each layer + as a separate file. If replace_existing_external is set to False, + this method will not do anything if the data is already in an + external file. + store_as_internal_array : (multiplier : float, layer_num : int) + Stores data from layer "layer_num" internally within the MODFLOW file + with a multiplier "multiplier". For unlayered data do not pass in + "layer". + has_data : (layer_num : int) : bool + Returns whether layer "layer_num" has any data associated with it. + For unlayered data do not pass in "layer". + get_data : (layer_num : int) : ndarray + Returns the data associated with layer "layer_num". If "layer_num" is + None, returns all data. + set_data : (data : ndarray/list, multiplier : float, layer_num : int) + Sets the contents of the data at layer "layer_num" to "data" with + multiplier "multiplier". For unlayered + data do not pass in "layer_num". data can have the following formats: + 1) ndarray - numpy ndarray containing all of the data + 2) [data] - python list containing all of the data + 3) val - a single constant value to be used for all of the data + 4) {'filename':filename, 'factor':fct, 'iprn':print, 'data':data} - + dictionary defining external file information + 5) {'data':data, 'factor':fct, 'iprn':print) - dictionary defining + internal information. Data that is layered can also be set by defining + a list with a length equal to the number of layers in the model. + Each layer in the list contains the data as defined in the + formats above: + [layer_1_val, [layer_2_array_vals], + {'filename':file_with_layer_3_data, 'factor':fct, 'iprn':print}] + + load : (first_line : string, file_handle : file descriptor, + block_header : MFBlockHeader, pre_data_comments : MFComment) : + tuple (bool, string) + Loads data from first_line (the first line of data) and open file + file_handle which is pointing to the second line of data. Returns a + tuple with the first item indicating whether all data was read and + the second item being the last line of text read from the file. + get_file_entry : (layer : int) : string + Returns a string containing the data in layer "layer". For unlayered + data do not pass in "layer". + + See Also + -------- + + Notes + ----- + + Examples + -------- + + + """ + def __init__(self, sim_data, model_or_sim, structure, data=None, + enable=True, path=None, dimensions=None): + super(MFArray, self).__init__(sim_data, model_or_sim, structure, enable, path, + dimensions) + if self.structure.layered: + try: + self._layer_shape = self.layer_shape() + except Exception as ex: + type_, value_, traceback_ = sys.exc_info() + raise MFDataException(self.structure.get_model(), + self.structure.get_package(), + self._path, + 'resolving layer dimensions', + self.structure.name, + inspect.stack()[0][3], type_, + value_, traceback_, None, + self._simulation_data.debug, ex) + else: + self._layer_shape = (1,) + if self._layer_shape[0] is None: + self._layer_shape = (1,) + self._data_type = structure.data_item_structures[0].type + try: + shp_ml = MultiList(shape=self._layer_shape) + self._data_storage = self._new_storage(shp_ml.get_total_size() + != 1) + except Exception as ex: + type_, value_, traceback_ = sys.exc_info() + raise MFDataException(structure.get_model(), + structure.get_package(), path, + 'creating storage', structure.name, + inspect.stack()[0][3], + type_, value_, traceback_, None, + sim_data.debug, ex) + self._last_line_info = [] + if self.structure.type == DatumType.integer: + multiplier = [1] + else: + multiplier = [1.0] + if data is not None: + try: + self._get_storage_obj().set_data(data, key=self._current_key, + multiplier=multiplier) + except Exception as ex: + type_, value_, traceback_ = sys.exc_info() + raise MFDataException(self.structure.get_model(), + self.structure.get_package(), + self._path, + 'setting data', + self.structure.name, + inspect.stack()[0][3], type_, + value_, traceback_, None, + self._simulation_data.debug, ex) + + def __setattr__(self, name, value): + if name == '__setstate__': + raise AttributeError(name) + elif name == 'fname': + self._get_storage_obj().layer_storage.first_item().fname = value + elif name == 'factor': + self._get_storage_obj().layer_storage.first_item().factor = value + elif name == 'iprn': + self._get_storage_obj().layer_storage.first_item().iprn = value + elif name == 'binary': + self._get_storage_obj().layer_storage.first_item().binary = value + else: + super(MFArray, self).__setattr__(name, value) + + def __getitem__(self, k): + if isinstance(k, int): + k = (k,) + storage = self._get_storage_obj() + if storage.layered and (isinstance(k, tuple) or isinstance(k, list)): + if not storage.layer_storage.in_shape(k): + comment = 'Could not retrieve layer {} of "{}". There' \ + 'are only {} layers available' \ + '.'.format(k, self.structure.name, + len(storage.layer_storage)) + type_, value_, traceback_ = sys.exc_info() + raise MFDataException(self.structure.get_model(), + self.structure.get_package(), + self._path, + 'getting data', + self.structure.name, + inspect.stack()[0][3], type_, + value_, traceback_, comment, + self._simulation_data.debug) + # for layered data treat k as layer number(s) + return storage.layer_storage[k] + else: + # for non-layered data treat k as an array/list index of the data + if isinstance(k, int): + try: + if len(self._get_data(apply_mult=True).shape) == 1: + return self._get_data(apply_mult=True)[k] + elif self._get_data(apply_mult=True).shape[0] == 1: + return self._get_data(apply_mult=True)[0, k] + elif self._get_data(apply_mult=True).shape[1] == 1: + return self._get_data(apply_mult=True)[k, 0] + except Exception as ex: + type_, value_, traceback_ = sys.exc_info() + raise MFDataException(self.structure.get_model(), + self.structure.get_package(), + self._path, + 'setting data', + self.structure.name, + inspect.stack()[0][3], type_, + value_, traceback_, None, + self._simulation_data.debug, ex) + + comment = 'Unable to resolve index "{}" for ' \ + 'multidimensional data.'.format(k) + type_, value_, traceback_ = sys.exc_info() + raise MFDataException(self.structure.get_model(), + self.structure.get_package(), + self._path, + 'getting data', + self.structure.name, + inspect.stack()[0][3], type_, + value_, traceback_, comment, + self._simulation_data.debug) + else: + try: + if isinstance(k, tuple): + if len(k) == 3: + return self._get_data(apply_mult=True)[k[0], k[1], + k[2]] + elif len(k) == 2: + return self._get_data(apply_mult=True)[k[0], k[1]] + if len(k) == 1: + return self._get_data(apply_mult=True)[k] + else: + return self._get_data(apply_mult=True)[(k,)] + except Exception as ex: + type_, value_, traceback_ = sys.exc_info() + raise MFDataException(self.structure.get_model(), + self.structure.get_package(), + self._path, + 'setting data', + self.structure.name, + inspect.stack()[0][3], type_, + value_, traceback_, None, + self._simulation_data.debug, ex) + + def __setitem__(self, k, value): + storage = self._get_storage_obj() + self._resync() + if storage.layered: + if isinstance(k, int): + k = (k,) + # for layered data treat k as a layer number + try: + storage.layer_storage[k]._set_data(value) + except Exception as ex: + type_, value_, traceback_ = sys.exc_info() + raise MFDataException(self.structure.get_model(), + self.structure.get_package(), + self._path, + 'setting data', + self.structure.name, + inspect.stack()[0][3], type_, + value_, traceback_, None, + self._simulation_data.debug, ex) + + else: + try: + # for non-layered data treat k as an array/list index of the data + a = self._get_data() + a[k] = value + a = a.astype(self._get_data().dtype) + layer_storage = storage.layer_storage.first_item() + self._get_storage_obj()._set_data(a, key=self._current_key, + multiplier=layer_storage.factor) + except Exception as ex: + type_, value_, traceback_ = sys.exc_info() + raise MFDataException(self.structure.get_model(), + self.structure.get_package(), + self._path, + 'setting data', + self.structure.name, + inspect.stack()[0][3], type_, + value_, traceback_, None, + self._simulation_data.debug, ex) + + @property + def data_type(self): + if self.structure.layered: + return DataType.array3d + else: + return DataType.array2d + + @property + def dtype(self): + return self._get_data().dtype.type + + @property + def plotable(self): + if self.model is None: + return False + else: + return True + + def new_simulation(self, sim_data): + super(MFArray, self).new_simulation(sim_data) + self._data_storage = self._new_storage(False) + self._layer_shape = (1,) + + def supports_layered(self): + try: + model_grid = self._data_dimensions.get_model_grid() + except Exception as ex: + type_, value_, traceback_ = sys.exc_info() + raise MFDataException(self.structure.get_model(), + self.structure.get_package(), + self._path, + 'getting model grid', + self.structure.name, + inspect.stack()[0][3], type_, + value_, traceback_, None, + self._simulation_data.debug, ex) + return self.structure.layered and \ + model_grid.grid_type() != DiscretizationType.DISU + + def set_layered_data(self, layered_data): + if layered_data is True and self.structure.layered is False: + if self._data_dimensions.get_model_grid().grid_type() == \ + DiscretizationType.DISU: + comment = 'Layered option not available for unstructured ' \ + 'grid. {}'.format(self._path) + else: + comment = 'Data "{}" does not support layered option. ' \ + '{}'.format(self._data_name, self._path) + type_, value_, traceback_ = sys.exc_info() + raise MFDataException(self.structure.get_model(), + self.structure.get_package(), + self._path, + 'setting layered data', self.structure.name, + inspect.stack()[0][3], type_, value_, + traceback_, comment, + self._simulation_data.debug) + self._get_storage_obj().layered = layered_data + + def make_layered(self): + if self.supports_layered(): + try: + self._get_storage_obj().make_layered() + except Exception as ex: + type_, value_, traceback_ = sys.exc_info() + raise MFDataException(self.structure.get_model(), + self.structure.get_package(), + self._path, + 'making data layered', + self.structure.name, + inspect.stack()[0][3], type_, + value_, traceback_, None, + self._simulation_data.debug, ex) + else: + if self._data_dimensions.get_model_grid().grid_type() == \ + DiscretizationType.DISU: + comment = 'Layered option not available for unstructured ' \ + 'grid. {}'.format(self._path) + else: + comment = 'Data "{}" does not support layered option. ' \ + '{}'.format(self._data_name, self._path) + type_, value_, traceback_ = sys.exc_info() + raise MFDataException(self.structure.get_model(), + self.structure.get_package(), + self._path, + 'converting data to layered', + self.structure.name, + inspect.stack()[0][3], type_, value_, + traceback_, comment, + self._simulation_data.debug) + + def store_as_external_file(self, external_file_path, layer=None, + binary=False, + replace_existing_external=True): + storage = self._get_storage_obj() + if storage is None: + self._set_storage_obj(self._new_storage(False, True)) + storage = self._get_storage_obj() + # build list of layers + if layer is None: + layer_list = [] + for index in range(0, storage.layer_storage.get_total_size()): + if replace_existing_external or \ + storage.layer_storage[index].data_storage_type == \ + DataStorageType.internal_array: + layer_list.append(index) + else: + if replace_existing_external or \ + storage.layer_storage[layer].data_storage_type == \ + DataStorageType.internal_array: + layer_list = [layer] + else: + layer_list = [] + + # store data from each layer in a separate file + for current_layer in layer_list: + # determine external file name for layer + if len(layer_list) > 0: + fname, ext = os.path.splitext(external_file_path) + if len(layer_list) == 1: + file_path = '{}{}'.format(fname, ext) + else: + file_path = '{}_layer{}{}'.format(fname, current_layer + 1, + ext) + else: + file_path = external_file_path + if isinstance(current_layer, int): + current_layer = (current_layer,) + # get the layer's data + data = self._get_data(current_layer, True) + if data is None: + # do not write empty data to an external file + continue + if isinstance(data, str) and self._tas_info(data)[0] is not \ + None: + # data must not be time array series information + continue + if storage.get_data_dimensions(current_layer)[0] == -9999: + # data must have well defined dimensions to make external + continue + try: + # store layer's data in external file + factor = storage.layer_storage[current_layer].factor + external_data = {'filename': file_path, + 'data': self._get_data(current_layer, True), + 'factor': factor, + 'binary': binary} + self._set_data(external_data, layer=current_layer) + except Exception as ex: + type_, value_, traceback_ = sys.exc_info() + raise MFDataException(self.structure.get_model(), + self.structure.get_package(), + self._path, + 'storing data in external file ' + '{}'.format(external_file_path), + self.structure.name, + inspect.stack()[0][3], type_, + value_, traceback_, None, + self._simulation_data.debug, ex) + + def has_data(self, layer=None): + storage = self._get_storage_obj() + if storage is None: + return False + if isinstance(layer, int): + layer = (layer,) + try: + return storage.has_data(layer) + except Exception as ex: + type_, value_, traceback_ = sys.exc_info() + raise MFDataException(self.structure.get_model(), + self.structure.get_package(), + self._path, + 'checking for data', + self.structure.name, + inspect.stack()[0][3], type_, + value_, traceback_, None, + self._simulation_data.debug, ex) + + @property + def data(self): + return self._get_data() + + def get_data(self, layer=None, apply_mult=False, **kwargs): + return self._get_data(layer, apply_mult, **kwargs) + + def _get_data(self, layer=None, apply_mult=False, **kwargs): + if self._get_storage_obj() is None: + self._data_storage = self._new_storage(False) + if isinstance(layer, int): + layer = (layer,) + storage = self._get_storage_obj() + if storage is not None: + try: + data = storage.get_data(layer, apply_mult) + if 'array' in kwargs and kwargs['array'] \ + and isinstance(self, MFTransientArray): + data = np.expand_dims(data, 0) + return data + except Exception as ex: + type_, value_, traceback_ = sys.exc_info() + raise MFDataException(self.structure.get_model(), + self.structure.get_package(), + self._path, + 'getting data', + self.structure.name, + inspect.stack()[0][3], type_, + value_, traceback_, None, + self._simulation_data.debug, ex) + return None + + def set_data(self, data, multiplier=None, layer=None): + self._set_data(data, multiplier, layer) + + def _set_data(self, data, multiplier=None, layer=None): + self._resync() + if self._get_storage_obj() is None: + self._data_storage = self._new_storage(False) + if multiplier is None: + multiplier = [self._get_storage_obj().get_default_mult()] + if isinstance(layer, int): + layer = (layer,) + if isinstance(data, str): + # check to see if this is a time series array + tas_name, tas_label = self._tas_info(data) + if tas_name is not None: + # verify and save as time series array + self._get_storage_obj().set_tas(tas_name, tas_label, + self._current_key) + return + + storage = self._get_storage_obj() + if self.structure.name == 'aux' and layer is None: + if isinstance(data, dict): + aux_data = copy.deepcopy(data['data']) + else: + aux_data = data + # make a list out of a single item + if isinstance(aux_data, int) or \ + isinstance(aux_data, float) or \ + isinstance(aux_data, str): + aux_data = [[aux_data]] + # handle special case of aux variables in an array + self.layered = True + aux_var_names = self._data_dimensions.\ + package_dim.get_aux_variables() + if len(aux_data) == len(aux_var_names[0]) - 1: + for layer, aux_var_data in enumerate(aux_data): + if layer > 0 and \ + layer >= storage.layer_storage.get_total_size(): + storage.add_layer() + if isinstance(data, dict): + # put layer data back in dictionary + layer_data = data + layer_data['data'] = aux_var_data + else: + layer_data = aux_var_data + try: + storage.set_data(layer_data, [layer], multiplier, + self._current_key) + except Exception as ex: + type_, value_, traceback_ = sys.exc_info() + raise MFDataException(self.structure.get_model(), + self.structure.get_package(), + self._path, + 'setting data', + self.structure.name, + inspect.stack()[0][3], type_, + value_, traceback_, None, + self._simulation_data.debug, ex) + else: + message = 'Unable to set data for aux variable. ' \ + 'Expected {} aux variables but got ' \ + '{}.'.format(len(aux_var_names[0]), + len(data)) + type_, value_, traceback_ = sys.exc_info() + raise MFDataException( + self._data_dimensions.structure.get_model(), + self._data_dimensions.structure.get_package(), + self._data_dimensions.structure.path, + 'setting aux variables', + self._data_dimensions.structure.name, + inspect.stack()[0][3], type_, value_, traceback_, + message, self._simulation_data.debug) + else: + try: + storage.set_data(data, layer, multiplier, + key=self._current_key) + except Exception as ex: + type_, value_, traceback_ = sys.exc_info() + raise MFDataException(self.structure.get_model(), + self.structure.get_package(), + self._path, + 'setting data', + self.structure.name, + inspect.stack()[0][3], type_, + value_, traceback_, None, + self._simulation_data.debug, ex) + self._layer_shape = storage.layer_storage.list_shape + + def load(self, first_line, file_handle, block_header, + pre_data_comments=None, external_file_info=None): + super(MFArray, self).load(first_line, file_handle, block_header, + pre_data_comments=None, + external_file_info=None) + self._resync() + if self.structure.layered: + try: + model_grid = self._data_dimensions.get_model_grid() + except Exception as ex: + type_, value_, traceback_ = sys.exc_info() + raise MFDataException(self.structure.get_model(), + self.structure.get_package(), + self._path, + 'getting model grid', + self.structure.name, + inspect.stack()[0][3], type_, + value_, traceback_, None, + self._simulation_data.debug, ex) + if self._layer_shape[-1] != model_grid.num_layers(): + if model_grid.grid_type() == DiscretizationType.DISU: + self._layer_shape = (1,) + else: + self._layer_shape = (model_grid.num_layers(),) + if self._layer_shape[-1] is None: + self._layer_shape = (1,) + shape_ml = MultiList(shape=self._layer_shape) + self._set_storage_obj(self._new_storage( + shape_ml.get_total_size() != 1, True)) + file_access = MFFileAccessArray(self.structure, self._data_dimensions, + self._simulation_data, self._path, + self._current_key) + storage = self._get_storage_obj() + self._layer_shape, return_val = file_access.load_from_package( + first_line, file_handle, self._layer_shape, storage, + self._keyword, pre_data_comments=None) + if external_file_info is not None: + storage.point_to_existing_external_file( + external_file_info, storage.layer_storage.get_total_size() - 1) + + return return_val + + def _is_layered_aux(self): + # determine if this is the special aux variable case + if self.structure.name.lower() == 'aux' and \ + self._get_storage_obj().layered: + return True + else: + return False + + def get_file_entry(self, layer=None, + ext_file_action=ExtFileAction.copy_relative_paths): + return self._get_file_entry(layer, ext_file_action) + + def _get_file_entry(self, layer=None, + ext_file_action=ExtFileAction.copy_relative_paths): + if isinstance(layer, int): + layer = (layer,) + data_storage = self._get_storage_obj() + if data_storage is None or \ + data_storage.layer_storage.get_total_size() == 0 \ + or not data_storage.has_data(): + return '' + + layered_aux = self._is_layered_aux() + + # prepare indent + indent = self._simulation_data.indent_string + shape_ml = MultiList(shape=self._layer_shape) + if shape_ml.get_total_size() == 1: + data_indent = indent + else: + data_indent = '{}{}'.format(indent, + self._simulation_data.indent_string) + + file_entry_array = [] + if data_storage.data_structure_type == DataStructureType.scalar: + # scalar data, like in the case of a time array series gets written + # on a single line + try: + data = data_storage.get_data() + except Exception as ex: + type_, value_, traceback_ = sys.exc_info() + raise MFDataException(self.structure.get_model(), + self.structure.get_package(), + self._path, + 'getting data', + self.structure.name, + inspect.stack()[0][3], type_, + value_, traceback_, None, + self._simulation_data.debug, ex) + if self.structure.data_item_structures[0].numeric_index or \ + self.structure.data_item_structures[0].is_cellid: + # for cellid and numeric indices convert from 0 base to 1 based + data = abs(data) + 1 + file_entry_array.append('{}{}{}{}\n'.format(indent, + self.structure.name, + indent, + data)) + elif data_storage.layered: + if not layered_aux: + if not self.structure.data_item_structures[0].just_data: + name = self.structure.name + file_entry_array.append('{}{}{}{}\n'.format(indent, name, + indent, + 'LAYERED')) + else: + file_entry_array.append('{}{}\n'.format(indent, 'LAYERED')) + + if layer is None: + layer_min = shape_ml.first_index() + layer_max = copy.deepcopy(self._layer_shape) + else: + # set layer range + if not shape_ml.in_shape(layer): + comment = 'Layer {} for variable "{}" does not exist' \ + '.'.format(layer, self._data_name) + type_, value_, traceback_ = sys.exc_info() + raise MFDataException(self.structure.get_model(), + self.structure.get_package(), + self._path, + 'getting file entry', + self.structure.name, + inspect.stack()[0][3], type_, value_, + traceback_, comment, + self._simulation_data.debug) + + layer_min = layer + layer_max = shape_ml.inc_shape_idx(layer) + for layer in shape_ml.indexes(layer_min, layer_max): + file_entry_array.append( + self._get_file_entry_layer(layer, data_indent, + data_storage.layer_storage[ + layer].data_storage_type, + ext_file_action, + layered_aux)) + else: + # data is not layered + if not self.structure.data_item_structures[0].just_data: + if self._data_name == 'aux': + file_entry_array.append('{}{}\n'.format( + indent, self._get_aux_var_name([0]))) + else: + file_entry_array.append('{}{}\n'.format(indent, + self.structure.name)) + + data_storage_type = data_storage.layer_storage[0].data_storage_type + file_entry_array.append( + self._get_file_entry_layer(None, data_indent, + data_storage_type, + ext_file_action)) + + return ''.join(file_entry_array) + + def _new_storage(self, set_layers=True, base_storage=False, + stress_period=0): + if set_layers: + return DataStorage(self._simulation_data, self._model_or_sim, + self._data_dimensions, self._get_file_entry, + DataStorageType.internal_array, + DataStructureType.ndarray, self._layer_shape, + stress_period=stress_period, + data_path=self._path) + else: + return DataStorage(self._simulation_data, self._model_or_sim, + self._data_dimensions, self._get_file_entry, + DataStorageType.internal_array, + DataStructureType.ndarray, + stress_period=stress_period, + data_path=self._path) + + def _get_storage_obj(self): + return self._data_storage + + def _set_storage_obj(self, storage): + self._data_storage = storage + + def _get_file_entry_layer(self, layer, data_indent, storage_type, + ext_file_action, layered_aux=False): + if not self.structure.data_item_structures[0].just_data and \ + not layered_aux: + indent_string = '{}{}'.format(self._simulation_data.indent_string, + self._simulation_data.indent_string) + else: + indent_string = self._simulation_data.indent_string + + file_entry = '' + if layered_aux: + try: + # display aux name + file_entry = '{}{}\n'.format(indent_string, + self._get_aux_var_name(layer)) + except Exception as ex: + type_, value_, traceback_ = sys.exc_info() + raise MFDataException(self.structure.get_model(), + self.structure.get_package(), + self._path, + 'getting aux variables', + self.structure.name, + inspect.stack()[0][3], type_, + value_, traceback_, None, + self._simulation_data.debug, ex) + indent_string = '{}{}'.format(indent_string, + self._simulation_data.indent_string) + + data_storage = self._get_storage_obj() + if storage_type == DataStorageType.internal_array: + # internal data header + data + format_str = self._get_internal_formatting_string(layer).upper() + lay_str = self._get_data_layer_string(layer, data_indent).upper() + file_entry = '{}{}{}\n{}'.format(file_entry, indent_string, + format_str, lay_str) + elif storage_type == DataStorageType.internal_constant: + # constant data + try: + const_val = data_storage.get_const_val(layer) + except Exception as ex: + type_, value_, traceback_ = sys.exc_info() + raise MFDataException(self.structure.get_model(), + self.structure.get_package(), + self._path, + 'getting constant value', + self.structure.name, + inspect.stack()[0][3], type_, + value_, traceback_, None, + self._simulation_data.debug, ex) + const_str = self._get_constant_formatting_string( + const_val, layer, self._data_type).upper() + file_entry = '{}{}{}'.format(file_entry, indent_string, + const_str) + else: + # external data + ext_str = self._get_external_formatting_string(layer, + ext_file_action) + file_entry = '{}{}{}'.format(file_entry, indent_string, + ext_str) + # add to active list of external files + try: + file_path = data_storage.get_external_file_path(layer) + except Exception as ex: + type_, value_, traceback_ = sys.exc_info() + comment = 'Could not get external file path for layer ' \ + '"{}"'.format(layer), + raise MFDataException(self.structure.get_model(), + self.structure.get_package(), + self._path, + 'getting external file path', + self.structure.name, + inspect.stack()[0][3], type_, + value_, traceback_, comment, + self._simulation_data.debug, ex) + package_dim = self._data_dimensions.package_dim + model_name = package_dim.model_dim[0].model_name + self._simulation_data.mfpath.add_ext_file(file_path, model_name) + return file_entry + + def _get_data_layer_string(self, layer, data_indent): + # iterate through data layer + try: + data = self._get_storage_obj().get_data(layer, False) + except Exception as ex: + type_, value_, traceback_ = sys.exc_info() + comment = 'Could not get data for layer "{}"'.format(layer) + raise MFDataException(self.structure.get_model(), + self.structure.get_package(), + self._path, + 'getting data', + self.structure.name, + inspect.stack()[0][3], type_, + value_, traceback_, comment, + self._simulation_data.debug, ex) + file_access = MFFileAccessArray(self.structure, self._data_dimensions, + self._simulation_data, self._path, + self._current_key) + return file_access.get_data_string(data, self._data_type, data_indent) + + def _resolve_layer_index(self, layer, allow_multiple_layers=False): + # handle layered vs non-layered data + storage = self._get_storage_obj() + if storage.layered: + if layer is None: + if storage.layer_storage.get_total_size() == 1: + layer_index = [0] + elif allow_multiple_layers: + layer_index = storage.get_active_layer_indices() + else: + comment = 'Data "{}" is layered but no ' \ + 'layer_num was specified' \ + '.'.format(self._data_name) + type_, value_, traceback_ = sys.exc_info() + raise MFDataException(self.structure.get_model(), + self.structure.get_package(), + self._path, + 'resolving layer index', + self.structure.name, + inspect.stack()[0][3], type_, value_, + traceback_, comment, + self._simulation_data.debug) + + else: + layer_index = [layer] + else: + layer_index = [[0]] + return layer_index + + def _verify_data(self, data_iter, layer_num): + # TODO: Implement + return True + + def plot(self, filename_base=None, file_extension=None, mflay=None, + fignum=None, title=None, **kwargs): + """ + Plot 3-D model input data + + Parameters + ---------- + filename_base : str + Base file name that will be used to automatically generate file + names for output image files. Plots will be exported as image + files if file_name_base is not None. (default is None) + file_extension : str + Valid matplotlib.pyplot file extension for savefig(). Only used + if filename_base is not None. (default is 'png') + mflay : int + MODFLOW zero-based layer number to return. If None, then all + all layers will be included. (default is None) + **kwargs : dict + axes : list of matplotlib.pyplot.axis + List of matplotlib.pyplot.axis that will be used to plot + data for each layer. If axes=None axes will be generated. + (default is None) + pcolor : bool + Boolean used to determine if matplotlib.pyplot.pcolormesh + plot will be plotted. (default is True) + colorbar : bool + Boolean used to determine if a color bar will be added to + the matplotlib.pyplot.pcolormesh. Only used if pcolor=True. + (default is False) + inactive : bool + Boolean used to determine if a black overlay in inactive + cells in a layer will be displayed. (default is True) + contour : bool + Boolean used to determine if matplotlib.pyplot.contour + plot will be plotted. (default is False) + clabel : bool + Boolean used to determine if matplotlib.pyplot.clabel + will be plotted. Only used if contour=True. (default is False) + grid : bool + Boolean used to determine if the model grid will be plotted + on the figure. (default is False) + masked_values : list + List of unique values to be excluded from the plot. + + Returns + ---------- + out : list + Empty list is returned if filename_base is not None. Otherwise + a list of matplotlib.pyplot.axis is returned. + """ + from flopy.plot import PlotUtilities + + if not self.plotable: + raise TypeError("Simulation level packages are not plotable") + + if len(self.array.shape) == 2: + axes = PlotUtilities._plot_util2d_helper(self, + title=title, + filename_base=filename_base, + file_extension=file_extension, + fignum=fignum, + **kwargs) + elif len(self.array.shape) == 3: + axes = PlotUtilities._plot_util3d_helper(self, + filename_base=filename_base, + file_extension=file_extension, + mflay=mflay, + fignum=fignum, + **kwargs) + else: + axes = None + + return axes + + +class MFTransientArray(MFArray, MFTransient): + """ + Provides an interface for the user to access and update MODFLOW transient + array data. + + Parameters + ---------- + sim_data : MFSimulationData + data contained in the simulation + structure : MFDataStructure + describes the structure of the data + data : list or ndarray + actual data + enable : bool + enable/disable the array + path : tuple + path in the data dictionary to this MFArray + dimensions : MFDataDimensions + dimension information related to the model, package, and array + + Methods + ------- + add_transient_key : (transient_key : int) + Adds a new transient time allowing data for that time to be stored and + retrieved using the key "transient_key" + get_data : (layer_num : int, key : int) : ndarray + Returns the data associated with layer "layer_num" during time "key". + If "layer_num" is None, returns all data for time "key". + set_data : (data : ndarray/list, multiplier : float, layer_num : int, + key : int) + Sets the contents of the data at layer "layer_num" and time "key" to + "data" with multiplier "multiplier". For unlayered data do not pass + in "layer_num". + load : (first_line : string, file_handle : file descriptor, + block_header : MFBlockHeader, pre_data_comments : MFComment) : + tuple (bool, string) + Loads data from first_line (the first line of data) and open file + handle which is pointing to the second line of data. Returns a + tuple with the first item indicating whether all data was read + and the second item being the last line of text read from the file. + get_file_entry : (layer : int, key : int) : string + Returns a string containing the data in layer "layer" at time "key". + For unlayered data do not pass in "layer". + + See Also + -------- + + Notes + ----- + + Examples + -------- + + + """ + def __init__(self, sim_data, model_or_sim, structure, enable=True, + path=None, dimensions=None): + super(MFTransientArray, self).__init__(sim_data=sim_data, + model_or_sim=model_or_sim, + structure=structure, + data=None, + enable=enable, + path=path, + dimensions=dimensions) + self._transient_setup(self._data_storage) + self.repeating = True + + @property + def data_type(self): + return DataType.transient2d + + def remove_transient_key(self, transient_key): + if transient_key in self._data_storage: + del self._data_storage[transient_key] + + def add_transient_key(self, transient_key): + super(MFTransientArray, self).add_transient_key(transient_key) + self._data_storage[transient_key] = \ + super(MFTransientArray, self)._new_storage(stress_period= + transient_key) + + def store_as_external_file(self, external_file_path, layer=None, + binary=False, + replace_existing_external=True): + sim_time = self._data_dimensions.package_dim.model_dim[ + 0].simulation_time + num_sp = sim_time.get_num_stress_periods() + # store each stress period in separate file(s) + for sp in range(0, num_sp): + if sp in self._data_storage: + self._current_key = sp + layer_storage = self._get_storage_obj().layer_storage + if layer_storage.get_total_size() > 0 and \ + self._get_storage_obj().layer_storage[0].\ + layer_storage_type != \ + DataStorageType.external_file: + fname, ext = os.path.splitext(external_file_path) + full_name = '{}_{}{}'.format(fname, sp+1, ext) + super(MFTransientArray, self).\ + store_as_external_file(full_name, layer, binary, + replace_existing_external) + + def get_data(self, layer=None, apply_mult=True, **kwargs): + if self._data_storage is not None and len(self._data_storage) > 0: + if layer is None: + output = None + sim_time = self._data_dimensions.package_dim.model_dim[ + 0].simulation_time + num_sp = sim_time.get_num_stress_periods() + if 'array' in kwargs: + data = None + for sp in range(0, num_sp): + if sp in self._data_storage: + self.get_data_prep(sp) + data = super(MFTransientArray, self).get_data( + apply_mult=apply_mult, **kwargs) + data = np.expand_dims(data, 0) + else: + if data is None: + # get any data + self.get_data_prep(self._data_storage.key()[0]) + data = super(MFTransientArray, self).get_data( + apply_mult=apply_mult, **kwargs) + data = np.expand_dims(data, 0) + if self.structure.type == DatumType.integer: + data = np.full_like(data, 0) + else: + data = np.full_like(data, 0.0) + if output is None: + output = data + else: + output = np.concatenate((output, data)) + return output + else: + for sp in range(0, num_sp): + data = None + if sp in self._data_storage: + self.get_data_prep(sp) + data = super(MFTransientArray, self).get_data( + apply_mult=apply_mult, **kwargs) + if output is None: + if 'array' in kwargs: + output = [data] + else: + output = {sp: data} + else: + if 'array' in kwargs: + output.append(data) + else: + output[sp] = data + return output + else: + self.get_data_prep(layer) + return super(MFTransientArray, self).get_data( + apply_mult=apply_mult) + else: + return None + + def set_data(self, data, multiplier=None, layer=None, key=None): + if isinstance(data, dict) or isinstance(data, OrderedDict): + # each item in the dictionary is a list for one stress period + # the dictionary key is the stress period the list is for + del_keys = [] + for key, list_item in data.items(): + if list_item is None: + self.remove_transient_key(key) + del_keys.append(key) + else: + self._set_data_prep(list_item, key) + super(MFTransientArray, self).set_data(list_item, + multiplier, layer) + for key in del_keys: + del data[key] + else: + if key is None: + # search for a key + new_key_index = self.structure.first_non_keyword_index() + if new_key_index is not None and hasattr(data, '__len__') and \ + len(data) > new_key_index: + key = data[new_key_index] + else: + key = 0 + if data is None: + self.remove_transient_key(key) + else: + self._set_data_prep(data, key) + super(MFTransientArray, self).set_data(data, multiplier, + layer) + + def get_file_entry(self, key=0, + ext_file_action=ExtFileAction.copy_relative_paths): + self._get_file_entry_prep(key) + return super(MFTransientArray, self).get_file_entry(ext_file_action= + ext_file_action) + + def load(self, first_line, file_handle, block_header, + pre_data_comments=None, external_file_info=None): + self._load_prep(block_header) + return super(MFTransientArray, self).load(first_line, file_handle, + pre_data_comments, + external_file_info) + + def _new_storage(self, set_layers=True, base_storage=False, + stress_period=0): + if base_storage: + if not isinstance(stress_period, int): + stress_period = 1 + return super(MFTransientArray, self)._new_storage(set_layers, + base_storage, + stress_period) + else: + return OrderedDict() + + def _set_storage_obj(self, storage): + self._data_storage[self._current_key] = storage + + def _get_storage_obj(self): + if self._current_key is None or \ + self._current_key not in self._data_storage: + return None + return self._data_storage[self._current_key] + + def plot(self, kper=None, filename_base=None, file_extension=None, + mflay=None, fignum=None, **kwargs): + """ + Plot transient array model input data + + Parameters + ---------- + transient2d : flopy.utils.util_array.Transient2D object + filename_base : str + Base file name that will be used to automatically generate file + names for output image files. Plots will be exported as image + files if file_name_base is not None. (default is None) + file_extension : str + Valid matplotlib.pyplot file extension for savefig(). Only used + if filename_base is not None. (default is 'png') + **kwargs : dict + axes : list of matplotlib.pyplot.axis + List of matplotlib.pyplot.axis that will be used to plot + data for each layer. If axes=None axes will be generated. + (default is None) + pcolor : bool + Boolean used to determine if matplotlib.pyplot.pcolormesh + plot will be plotted. (default is True) + colorbar : bool + Boolean used to determine if a color bar will be added to + the matplotlib.pyplot.pcolormesh. Only used if pcolor=True. + (default is False) + inactive : bool + Boolean used to determine if a black overlay in inactive + cells in a layer will be displayed. (default is True) + contour : bool + Boolean used to determine if matplotlib.pyplot.contour + plot will be plotted. (default is False) + clabel : bool + Boolean used to determine if matplotlib.pyplot.clabel + will be plotted. Only used if contour=True. (default is False) + grid : bool + Boolean used to determine if the model grid will be plotted + on the figure. (default is False) + masked_values : list + List of unique values to be excluded from the plot. + kper : str + MODFLOW zero-based stress period number to return. If + kper='all' then data for all stress period will be + extracted. (default is zero). + + Returns + ---------- + axes : list + Empty list is returned if filename_base is not None. Otherwise + a list of matplotlib.pyplot.axis is returned. + """ + from flopy.plot.plotutil import PlotUtilities + + if not self.plotable: + raise TypeError("Simulation level packages are not plotable") + + axes = PlotUtilities._plot_transient2d_helper(self, + filename_base=filename_base, + file_extension=file_extension, + kper=kper, + fignum=fignum, + **kwargs) return axes \ No newline at end of file diff --git a/flopy/mf6/data/mfdatalist.py b/flopy/mf6/data/mfdatalist.py index 96886f03e4..a3cb716640 100644 --- a/flopy/mf6/data/mfdatalist.py +++ b/flopy/mf6/data/mfdatalist.py @@ -1,1258 +1,1258 @@ -from collections import OrderedDict -import math -import sys -import os -import inspect -import numpy as np -from ..utils.mfenums import DiscretizationType -from ..data import mfstructure, mfdata -from ..mfbase import MFDataException, ExtFileAction -from .mfstructure import DatumType -from ...utils import datautil -from ...datbase import DataListInterface, DataType -from .mffileaccess import MFFileAccessList -from .mfdatastorage import DataStorage, DataStorageType, DataStructureType -from .mfdatautil import to_string, iterable - - -class MFList(mfdata.MFMultiDimVar, DataListInterface): - """ - Provides an interface for the user to access and update MODFLOW - scalar data. - - Parameters - ---------- - sim_data : MFSimulationData - data contained in the simulation - structure : MFDataStructure - describes the structure of the data - data : list or ndarray - actual data - enable : bool - enable/disable the array - path : tuple - path in the data dictionary to this MFArray - dimensions : MFDataDimensions - dimension information related to the model, package, and array - - Attributes - ---------- - data_type : DataType - type of data stored in the scalar - plotable : bool - if the scalar is plotable - dtype : numpy.dtype - the scalar's numpy data type - data : variable - calls get_data with default parameters - - Methods - ------- - new_simulation : (sim_data : MFSimulationData) - initialize MFArray object for a new simulation - has_data : (layer_num : int) : bool - Returns whether layer "layer_num" has any data associated with it. - For unlayered data do not pass in "layer". - get_data : (layer_num : int) : ndarray - Returns the data associated with layer "layer_num". If "layer_num" is - None, returns all data. - set_data : (data : ndarray/list/dict, multiplier : float, layer_num : int) - Sets the contents of the data at layer "layer_num" to "data" with - multiplier "multiplier". For unlayered data do not pass in - "layer_num". data can have the following formats: - 1) ndarray - ndarray containing the datalist - 2) [(line_one), (line_two), ...] - list where each like of the - datalist is a tuple within the list - 3) {'filename':filename, factor=fct, iprn=print_code, data=data} - - dictionary defining the external file containing the datalist. - If the data is transient, a dictionary can be used to specify each - stress period where the dictionary key is - 1 and - the dictionary value is the datalist data defined above: - {0:ndarray, 1:[(line_one), (line_two), ...], 2:{'filename':filename}) - append_data : (data : list(tuple)) - Appends "data" to the end of this list. Assumes data is in a format - that can be appended directly to a numpy recarray. - append_list_as_record : (data : list) - Appends the list "data" as a single record in this list's recarray. - Assumes "data" has the correct dimensions. - update_record : (record : list, key_index : int) - Updates a record at index "key_index" with the contents of "record". - If the index does not exist update_record appends the contents of - "record" to this list's recarray. - search_data : (search_term : string, col : int) - Searches the list data at column "col" for "search_term". If col is - None search_data searches the entire list. - load : (first_line : string, file_handle : file descriptor, - block_header : MFBlockHeader, pre_data_comments : MFComment) : - tuple (bool, string) - Loads data from first_line (the first line of data) and open file - file_handle which is pointing to the second line of data. Returns a - tuple with the first item indicating whether all data was read - and the second item being the last line of text read from the file. - get_file_entry : (layer : int) : string - Returns a string containing the data in layer "layer". For unlayered - data do not pass in "layer". - store_as_external_file : (external_file_path : str, binary : bool) - store all data externally in file external_file_path. the binary - allows storage in a binary file. If replace_existing_external is set - to False, this method will not do anything if the data is already in - an external file. - - See Also - -------- - - Notes - ----- - - Examples - -------- - - - """ - def __init__(self, sim_data, model_or_sim, structure, data=None, - enable=True, path=None, dimensions=None, package=None): - super(MFList, self).__init__(sim_data, model_or_sim, structure, enable, - path, dimensions) - try: - self._data_storage = self._new_storage() - except Exception as ex: - type_, value_, traceback_ = sys.exc_info() - raise MFDataException(structure.get_model(), - structure.get_package(), path, - 'creating storage', structure.name, - inspect.stack()[0][3], - type_, value_, traceback_, None, - sim_data.debug, ex) - self._package = package - self._last_line_info = [] - self._data_line = None - self._temp_dict = {} - self._crnt_line_num = 1 - if data is not None: - try: - self.set_data(data, True) - except Exception as ex: - type_, value_, traceback_ = sys.exc_info() - raise MFDataException(structure.get_model(), - structure.get_package(), path, - 'setting data', structure.name, - inspect.stack()[0][3], - type_, value_, traceback_, None, - sim_data.debug, ex) - - @property - def data_type(self): - return DataType.list - - @property - def package(self): - return self._package - - @property - def dtype(self): - return self.get_data().dtype - - @property - def plotable(self): - if self.model is None: - return False - else: - return True - - def to_array(self, kper=0, mask=False): - i0 = 1 - sarr = self.get_data(key=kper) - if not isinstance(sarr, list): - sarr = [sarr] - if len(sarr) == 0 or sarr[0] is None: - return None - if 'inode' in sarr[0].dtype.names: - raise NotImplementedError() - arrays = {} - model_grid = self._data_dimensions.get_model_grid() - - if model_grid._grid_type.value == 1: - shape = (model_grid.num_layers(), model_grid.num_rows(), - model_grid.num_columns()) - elif model_grid._grid_type.value == 2: - shape = (model_grid.num_layers(), model_grid.num_cells_per_layer()) - else: - shape = (model_grid.num_cells_per_layer(),) - - for name in sarr[0].dtype.names[i0:]: - if not sarr[0].dtype.fields[name][0] == object: - arr = np.zeros(shape) - arrays[name] = arr.copy() - - if np.isscalar(sarr[0]): - # if there are no entries for this kper - if sarr[0] == 0: - if mask: - for name, arr in arrays.items(): - arrays[name][:] = np.NaN - return arrays - else: - raise Exception("MfList: something bad happened") - - for name, arr in arrays.items(): - cnt = np.zeros(shape, dtype=np.float64) - for sp_rec in sarr: - if sp_rec is not None: - for rec in sp_rec: - arr[rec['cellid']] += rec[name] - cnt[rec['cellid']] += 1. - # average keys that should not be added - if name != 'cond' and name != 'flux': - idx = cnt > 0. - arr[idx] /= cnt[idx] - if mask: - arr = np.ma.masked_where(cnt == 0., arr) - arr[cnt == 0.] = np.NaN - - arrays[name] = arr.copy() - # elif mask: - # for name, arr in arrays.items(): - # arrays[name][:] = np.NaN - return arrays - - def new_simulation(self, sim_data): - try: - super(MFList, self).new_simulation(sim_data) - self._data_storage = self._new_storage() - except Exception as ex: - type_, value_, traceback_ = sys.exc_info() - raise MFDataException(self.structure.get_model(), - self.structure.get_package(), - self._path, - 'reinitializing', self.structure.name, - inspect.stack()[0][3], - type_, value_, traceback_, None, - self._simulation_data.debug, ex) - - self._data_line = None - - def store_as_external_file(self, external_file_path, binary=False, - replace_existing_external=True): - # only store data externally (do not subpackage info) - if self.structure.construct_package is None: - storage = self._get_storage_obj() - # check if data is already stored external - if replace_existing_external or storage is None or \ - storage.layer_storage.first_item().data_storage_type == \ - DataStorageType.internal_array: - data = self._get_data() - # if not empty dataset - if data is not None: - external_data = {'filename': external_file_path, - 'data': self._get_data(), - 'binary': binary} - self._set_data(external_data) - - def has_data(self): - try: - if self._get_storage_obj() is None: - return False - return self._get_storage_obj().has_data() - except Exception as ex: - type_, value_, traceback_ = sys.exc_info() - raise MFDataException(self.structure.get_model(), - self.structure.get_package(), self._path, - 'checking for data', self.structure.name, - inspect.stack()[0][3], type_, value_, - traceback_, None, - self._simulation_data.debug, ex) - - def _get_data(self, apply_mult=False, **kwargs): - try: - if self._get_storage_obj() is None: - return None - return self._get_storage_obj().get_data() - except Exception as ex: - type_, value_, traceback_ = sys.exc_info() - raise MFDataException(self.structure.get_model(), - self.structure.get_package(), self._path, - 'getting data', self.structure.name, - inspect.stack()[0][3], type_, value_, - traceback_, None, - self._simulation_data.debug, ex) - - def get_data(self, apply_mult=False, **kwargs): - return self._get_data(apply_mult, **kwargs) - - def _set_data(self, data, autofill=False): - if isinstance(data, dict): - if 'data' in data: - data_check = data['data'] - else: - data_check = None - else: - data_check = data - if iterable(data_check): - # verify data length - min_line_size = self.structure.get_min_record_entries() - if isinstance(data_check[0], np.record) or \ - (iterable(data_check[0]) and not - isinstance(data_check[0], str)): - # data contains multiple records - for data_line in data_check: - self._check_line_size(data_line, min_line_size) - else: - # data is a single record - self._check_line_size(data_check, min_line_size) - # set data - self._resync() - try: - if self._get_storage_obj() is None: - self._data_storage = self._new_storage() - # store data - self._get_storage_obj().set_data(data, autofill=autofill) - except Exception as ex: - type_, value_, traceback_ = sys.exc_info() - raise MFDataException(self.structure.get_model(), - self.structure.get_package(), self._path, - 'setting data', self.structure.name, - inspect.stack()[0][3], type_, value_, - traceback_, None, - self._simulation_data.debug, ex) - - def _check_line_size(self, data_line, min_line_size): - if 0 < len(data_line) < min_line_size: - min_line_size = self.structure.get_min_record_entries() - message = 'Data line {} only has {} entries, ' \ - 'minimum number of entries is ' \ - '{}.'.format(data_line, len(data_line), - min_line_size) - type_, value_, traceback_ = sys.exc_info() - raise MFDataException( - self.structure.get_model(), - self.structure.get_package(), - self.structure.path, - 'storing data', - self.structure.name, - inspect.stack()[0][3], - type_, value_, traceback_, message, - self._simulation_data.debug) - - def set_data(self, data, autofill=False): - self._set_data(data, autofill) - - def append_data(self, data): - try: - self._resync() - if self._get_storage_obj() is None: - self._data_storage = self._new_storage() - # store data - self._get_storage_obj().append_data(data) - except Exception as ex: - type_, value_, traceback_ = sys.exc_info() - raise MFDataException(self.structure.get_model(), - self.structure.get_package(), - self._path, - 'appending data', self.structure.name, - inspect.stack()[0][3], type_, value_, - traceback_, None, - self._simulation_data.debug, ex) - - def append_list_as_record(self, record): - self._resync() - try: - # convert to tuple - tuple_record = () - for item in record: - tuple_record += (item,) - # store - self._get_storage_obj().append_data([tuple_record]) - except Exception as ex: - type_, value_, traceback_ = sys.exc_info() - raise MFDataException(self.structure.get_model(), - self.structure.get_package(), - self._path, - 'appending data', self.structure.name, - inspect.stack()[0][3], type_, value_, - traceback_, None, - self._simulation_data.debug, ex) - - def update_record(self, record, key_index): - self.append_list_as_record(record) - - def search_data(self, search_term, col=None): - try: - data = self._get_storage_obj().get_data() - if data is not None: - search_term = search_term.lower() - for row in data: - col_num = 0 - for val in row: - if val is not None and val.lower() == search_term and \ - (col == None or col == col_num): - return (row, col) - col_num += 1 - return None - except Exception as ex: - type_, value_, traceback_ = sys.exc_info() - if col is None: - col = '' - raise MFDataException(self.structure.get_model(), - self.structure.get_package(), self._path, - 'searching for data', self.structure.name, - inspect.stack()[0][3], type_, value_, - traceback_, - 'search_term={}\ncol={}'.format(search_term, - col), - self._simulation_data.debug, ex) - - def get_file_entry(self, values_only=False, - ext_file_action=ExtFileAction.copy_relative_paths): - return self._get_file_entry(values_only, ext_file_action) - - def _get_file_entry(self, values_only=False, - ext_file_action=ExtFileAction.copy_relative_paths): - try: - # freeze model grid to boost performance - self._data_dimensions.lock() - # init - indent = self._simulation_data.indent_string - file_entry = [] - storage = self._get_storage_obj() - if storage is None or not storage.has_data(): - return '' - - # write out initial comments - if storage.pre_data_comments: - file_entry.append(storage.pre_data_comments.get_file_entry()) - except Exception as ex: - type_, value_, traceback_ = sys.exc_info() - raise MFDataException(self.structure.get_model(), - self.structure.get_package(), - self._path, 'get file entry initialization', - self.structure.name, - inspect.stack()[0][3], type_, value_, - traceback_, None, - self._simulation_data.debug, ex) - - if storage.layer_storage.first_item().data_storage_type == \ - DataStorageType.external_file: - try: - ext_string = self._get_external_formatting_string(0, - ext_file_action) - file_entry.append('{}{}{}'.format(indent, indent, - ext_string)) - # write file - - except Exception as ex: - type_, value_, traceback_ = sys.exc_info() - raise MFDataException(self.structure.get_model(), - self.structure.get_package(), - self._path, - 'formatting external file string', - self.structure.name, - inspect.stack()[0][3], type_, value_, - traceback_, None, - self._simulation_data.debug, ex) - else: - try: - data_complete = storage.get_data() - if storage.layer_storage.first_item().data_storage_type == \ - DataStorageType.internal_constant: - data_lines = 1 - else: - data_lines = len(data_complete) - except Exception as ex: - type_, value_, traceback_ = sys.exc_info() - raise MFDataException(self.structure.get_model(), - self.structure.get_package(), - self._path, - 'getting data from storage', - self.structure.name, - inspect.stack()[0][3], type_, value_, - traceback_, None, - self._simulation_data.debug, ex) - - # loop through list line by line - assumes first data_item size - # is representative - self._crnt_line_num = 1 - for mflist_line in range(0, data_lines): - text_line = [] - index = 0 - self._get_file_entry_record(data_complete, mflist_line, - text_line, index, self.structure, - storage, indent) - - # include comments - if mflist_line in storage.comments and \ - storage.comments[mflist_line].text: - text_line.append(storage.comments[mflist_line].text) - - file_entry.append('{}{}\n'.format(indent, indent. - join(text_line))) - self._crnt_line_num += 1 - - # unfreeze model grid - self._data_dimensions.unlock() - return ''.join(file_entry) - - def _get_file_entry_record(self, data_complete, mflist_line, text_line, - index, data_set, storage, indent): - if storage.layer_storage.first_item().data_storage_type == \ - DataStorageType.internal_constant: - try: - # constant data - data_type = self.structure.data_item_structures[1].type - const_str = self._get_constant_formatting_string( - storage.get_const_val(0), 0, data_type, '') - text_line.append('{}{}{}'.format(indent, indent, - const_str.upper())) - except Exception as ex: - type_, value_, traceback_ = sys.exc_info() - raise MFDataException(self.structure.get_model(), - self.structure.get_package(), - self._path, - 'getting constant data', - self.structure.name, - inspect.stack()[0][3], type_, value_, - traceback_, None, - self._simulation_data.debug, ex) - else: - data_dim = self._data_dimensions - data_line = data_complete[mflist_line] - for data_item in data_set.data_item_structures: - if data_item.is_aux: - try: - aux_var_names = \ - data_dim.package_dim.get_aux_variables() - if aux_var_names is not None: - for aux_var_name in aux_var_names[0]: - if aux_var_name.lower() != 'auxiliary': - data_val = data_line[index] - text_line.append(to_string( - data_val, data_item.type, - self._simulation_data, - self._data_dimensions, - data_item.is_cellid, - data_item.possible_cellid, - data_item)) - index += 1 - except Exception as ex: - type_, value_, traceback_ = sys.exc_info() - raise MFDataException(self.structure.get_model(), - self.structure.get_package(), - self._path, - 'processing auxiliary ' - 'variables', - self.structure.name, - inspect.stack()[0][3], type_, - value_, - traceback_, None, - self._simulation_data.debug, ex) - elif data_item.type == DatumType.record: - # record within a record, recurse - self._get_file_entry_record(data_complete, mflist_line, - text_line, index, data_item, - storage, indent) - elif (not data_item.is_boundname or - data_dim.package_dim.boundnames()) and \ - (not data_item.optional or data_item.name_length < 5 - or not data_item.is_mname or not storage.in_model): - data_complete_len = len(data_line) - if data_complete_len <= index: - if data_item.optional == False: - message = 'Not enough data provided ' \ - 'for {}. Data for required data ' \ - 'item "{}" not ' \ - 'found (data path: {})' \ - '.'.format(self.structure.name, - data_item.name, - self._path,) - type_, value_, traceback_ = sys.exc_info() - raise MFDataException(self.structure.get_model(), - self.structure.get_package(), - self._path, - 'building file entry record', - self.structure.name, - inspect.stack()[0][3], type_, - value_, traceback_, message, - self._simulation_data.debug) - else: - break - try: - # resolve size of data - resolved_shape, shape_rule = data_dim.get_data_shape( - data_item, self.structure, [data_line], - repeating_key=self._current_key) - data_val = data_line[index] - if data_item.is_cellid or (data_item.possible_cellid - and storage._validate_cellid([data_val], 0)): - if data_item.shape is not None and \ - len(data_item.shape) > 0 and \ - data_item.shape[0] == 'ncelldim': - model_grid = data_dim.get_model_grid() - cellid_size = \ - model_grid.\ - get_num_spatial_coordinates() - data_item.remove_cellid(resolved_shape, - cellid_size) - data_size = 1 - if len(resolved_shape) == 1 and \ - datautil.DatumUtil.is_int(resolved_shape[0]): - data_size = int(resolved_shape[0]) - if data_size < 0: - # unable to resolve data size based on shape, use - # the data heading names to resolve data size - data_size = storage.resolve_data_size(index) - except Exception as ex: - type_, value_, traceback_ = sys.exc_info() - raise MFDataException(self.structure.get_model(), - self.structure.get_package(), - self._path, - 'resolving data shape', - self.structure.name, - inspect.stack()[0][3], type_, - value_, traceback_, - 'Verify that your data is the ' - 'correct shape', - self._simulation_data.debug, ex) - for data_index in range(0, data_size): - if data_complete_len > index: - data_val = data_line[index] - if data_item.type == DatumType.keyword: - if data_val is not None: - text_line.append(data_item.display_name) - if self.structure.block_variable: - # block variables behave differently for - # now. this needs to be resolved - # more consistently at some point - index += 1 - elif data_item.type == DatumType.keystring: - if data_val is not None: - text_line.append(data_val) - index += 1 - - # keystring must be at the end of the line so - # everything else is part of the keystring data - data_key = data_val.lower() - if data_key not in data_item.keystring_dict: - keystr_struct = data_item.keystring_dict[ - '{}record'.format(data_key)] - else: - keystr_struct = data_item.keystring_dict[ - data_key] - if isinstance(keystr_struct, - mfstructure.MFDataStructure): - # data items following keystring - ks_structs = keystr_struct.\ - data_item_structures[1:] - else: - # key string stands alone - ks_structs = [keystr_struct] - ks_struct_index = 0 - max_index = len(ks_structs) - 1 - for data_index in range(index, - data_complete_len): - if data_line[data_index] is not None: - try: - k_data_item = ks_structs[ - ks_struct_index] - text_line.append(to_string( - data_line[data_index], - k_data_item.type, - self._simulation_data, - self._data_dimensions, - k_data_item.is_cellid, - k_data_item.possible_cellid, - k_data_item)) - except Exception as ex: - message = 'An error occurred ' \ - 'while converting data '\ - 'to a string. This ' \ - 'error occurred while ' \ - 'processing "{}" line ' \ - '{} data item "{}".' \ - '(data path: {})' \ - '.'.format( - self.structure.name, - data_item.name, - self._crnt_line_num, - self._path) - type_, value_, \ - traceback_ = sys.exc_info() - raise MFDataException( - self.structure.get_model(), - self.structure.get_package(), - self._path, - 'converting data ' - 'to a string', - self.structure.name, - inspect.stack()[0][ - 3], type_, - value_, traceback_, - message, - self. - _simulation_data. - debug, ex) - if ks_struct_index < max_index: - # increment until last record - # entry then repeat last entry - ks_struct_index += 1 - index = data_index - elif data_val is not None and (not isinstance( - data_val, float) or - not math.isnan(data_val)): - try: - if data_item.tagged and data_index == 0: - # data item tagged, include data item name - # as a keyword - text_line.append(to_string( - data_val, DatumType.string, - self._simulation_data, - self._data_dimensions, - False, data_item=data_item)) - index += 1 - data_val = data_line[index] - text_line.append( - to_string(data_val, data_item.type, - self._simulation_data, - self._data_dimensions, - data_item.is_cellid, - data_item.possible_cellid, - data_item)) - except Exception as ex: - message = 'An error occurred while ' \ - 'converting data to a ' \ - 'string. ' \ - 'This error occurred while ' \ - 'processing "{}" line {} data ' \ - 'item "{}".(data path: {})'\ - '.'.format(self.structure.name, - data_item.name, - self._crnt_line_num, - self._path) - type_, value_, traceback_ = sys.exc_info() - raise MFDataException(self.structure. - get_model(), - self.structure. - get_package(), - self._path, - 'converting data ' - 'to a string', - self.structure.name, - inspect.stack()[0][ - 3], type_, - value_, traceback_, - message, - self. - _simulation_data. - debug, ex) - index += 1 - elif not data_item.optional and shape_rule is None: - message = 'Not enough data provided ' \ - 'for {}. Data for required data ' \ - 'item "{}" not ' \ - 'found (data path: {})' \ - '.'.format(self.structure.name, - data_item.name, - self._path) - type_, value_, traceback_ = sys.exc_info() - raise MFDataException(self.structure.get_model(), - self.structure.get_package(), - self._path, - 'building data line', - self.structure.name, - inspect.stack()[0][3], type_, - value_, traceback_, message, - self._simulation_data.debug) - - def load(self, first_line, file_handle, block_header, - pre_data_comments=None, external_file_info=None): - super(MFList, self).load(first_line, file_handle, block_header, - pre_data_comments=None) - self._resync() - file_access = MFFileAccessList( self.structure, self._data_dimensions, - self._simulation_data, self._path, - self._current_key) - storage = self._get_storage_obj() - result = file_access.load_from_package( - first_line, file_handle, storage, pre_data_comments) - if external_file_info is not None: - storage.point_to_existing_external_file(external_file_info, 0) - return result - - def _new_storage(self, stress_period=0): - return DataStorage(self._simulation_data, self._model_or_sim, - self._data_dimensions, self._get_file_entry, - DataStorageType.internal_array, - DataStructureType.recarray, - stress_period=stress_period, - data_path=self._path) - - def _get_storage_obj(self): - return self._data_storage - - def plot(self, key=None, names=None, filename_base=None, - file_extension=None, mflay=None, **kwargs): - """ - Plot boundary condition (MfList) data - - Parameters - ---------- - key : str - MfList dictionary key. (default is None) - names : list - List of names for figure titles. (default is None) - filename_base : str - Base file name that will be used to automatically generate file - names for output image files. Plots will be exported as image - files if file_name_base is not None. (default is None) - file_extension : str - Valid matplotlib.pyplot file extension for savefig(). Only used - if filename_base is not None. (default is 'png') - mflay : int - MODFLOW zero-based layer number to return. If None, then all - all layers will be included. (default is None) - **kwargs : dict - axes : list of matplotlib.pyplot.axis - List of matplotlib.pyplot.axis that will be used to plot - data for each layer. If axes=None axes will be generated. - (default is None) - pcolor : bool - Boolean used to determine if matplotlib.pyplot.pcolormesh - plot will be plotted. (default is True) - colorbar : bool - Boolean used to determine if a color bar will be added to - the matplotlib.pyplot.pcolormesh. Only used if pcolor=True. - (default is False) - inactive : bool - Boolean used to determine if a black overlay in inactive - cells in a layer will be displayed. (default is True) - contour : bool - Boolean used to determine if matplotlib.pyplot.contour - plot will be plotted. (default is False) - clabel : bool - Boolean used to determine if matplotlib.pyplot.clabel - will be plotted. Only used if contour=True. (default is False) - grid : bool - Boolean used to determine if the model grid will be plotted - on the figure. (default is False) - masked_values : list - List of unique values to be excluded from the plot. - - Returns - ---------- - out : list - Empty list is returned if filename_base is not None. Otherwise - a list of matplotlib.pyplot.axis is returned. - """ - from flopy.plot import PlotUtilities - - if not self.plotable: - raise TypeError("Simulation level packages are not plotable") - - if 'cellid' not in self.dtype.names: - return - - PlotUtilities._plot_mflist_helper(mflist=self, key=key, kper=None, - names=names, filename_base=None, - file_extension=None, mflay=None, - **kwargs ) - - -class MFTransientList(MFList, mfdata.MFTransient, DataListInterface): - """ - Provides an interface for the user to access and update MODFLOW transient - list data. - - Parameters - ---------- - sim_data : MFSimulationData - data contained in the simulation - structure : MFDataStructure - describes the structure of the data - data : list or ndarray - actual data - enable : bool - enable/disable the array - path : tuple - path in the data dictionary to this MFArray - dimensions : MFDataDimensions - dimension information related to the model, package, and array - - Methods - ------- - add_transient_key : (transient_key : int) - Adds a new transient time allowing data for that time to be stored and - retrieved using the key "transient_key" - add_one :(transient_key : int) - Adds one to the data stored at key "transient_key" - get_data : (key : int) : ndarray - Returns the data during time "key". - set_data : (data : ndarray/list, multiplier : float, key : int) - Sets the contents of the data at time "key" to "data" with - multiplier "multiplier". - load : (first_line : string, file_handle : file descriptor, - block_header : MFBlockHeader, pre_data_comments : MFComment) : - tuple (bool, string) - Loads data from first_line (the first line of data) and open file - file_handle which is pointing to the second line of data. Returns a - tuple with the first item indicating whether all data was read - and the second item being the last line of text read from the file. - get_file_entry : (key : int) : string - Returns a string containing the data at time "key". - append_list_as_record : (data : list, key : int) - Appends the list "data" as a single record in this list's recarray at - time "key". Assumes "data" has the correct dimensions. - update_record : (record : list, key_index : int, key : int) - Updates a record at index "key_index" and time "key" with the contents - of "record". If the index does not exist update_record appends the - contents of "record" to this list's recarray. - See Also - -------- - - Notes - ----- - - Examples - -------- - - - """ - def __init__(self, sim_data, model_or_sim, structure, enable=True, path=None, - dimensions=None, package=None): - super(MFTransientList, self).__init__(sim_data=sim_data, - model_or_sim=model_or_sim, - structure=structure, - data=None, - enable=enable, - path=path, - dimensions=dimensions, - package=package) - self._transient_setup(self._data_storage) - self.repeating = True - - @property - def data_type(self): - return DataType.transientlist - - @property - def dtype(self): - data = self.get_data() - if len(data) > 0: - return data[0].dtype - else: - return None - - @property - def masked_4D_arrays(self): - model_grid = self._data_dimensions.get_model_grid() - nper = self._data_dimensions.package_dim.model_dim[0].simulation_time \ - .get_num_stress_periods() - # get the first kper - arrays = self.to_array(kper=0, mask=True) - - if arrays is not None: - # initialize these big arrays - if model_grid.grid_type() == DiscretizationType.DIS: - m4ds = {} - for name, array in arrays.items(): - m4d = np.zeros((nper, model_grid.num_layers, - model_grid.num_rows, model_grid.num_columns)) - m4d[0, :, :, :] = array - m4ds[name] = m4d - for kper in range(1, nper): - arrays = self.to_array(kper=kper, mask=True) - for name, array in arrays.items(): - m4ds[name][kper, :, :, :] = array - return m4ds - else: - m3ds = {} - for name, array in arrays.items(): - m3d = np.zeros((nper, model_grid.num_layers, - model_grid.num_cells_per_layer())) - m3d[0, :, :] = array - m3ds[name] = m3d - for kper in range(1, nper): - arrays = self.to_array(kper=kper, mask=True) - for name, array in arrays.items(): - m3ds[name][kper, :, :] = array - return m3ds - - def masked_4D_arrays_itr(self): - model_grid = self._data_dimensions.get_model_grid() - nper = self._data_dimensions.package_dim.model_dim[0].simulation_time \ - .get_num_stress_periods() - # get the first kper - arrays = self.to_array(kper=0, mask=True) - - if arrays is not None: - # initialize these big arrays - for name, array in arrays.items(): - if model_grid.grid_type() == DiscretizationType.DIS: - m4d = np.zeros((nper, model_grid.num_layers(), - model_grid.num_rows(), model_grid.num_columns())) - m4d[0, :, :, :] = array - for kper in range(1, nper): - arrays = self.to_array(kper=kper, mask=True) - for tname, array in arrays.items(): - if tname == name: - m4d[kper, :, :, :] = array - yield name, m4d - else: - m3d = np.zeros((nper, model_grid.num_layers(), - model_grid.num_cells_per_layer())) - m3d[0, :, :] = array - for kper in range(1, nper): - arrays = self.to_array(kper=kper, mask=True) - for tname, array in arrays.items(): - if tname == name: - m3d[kper, :, :] = array - yield name, m3d - - def to_array(self, kper=0, mask=False): - return super(MFTransientList, self).to_array(kper, mask) - - def remove_transient_key(self, transient_key): - if transient_key in self._data_storage: - del self._data_storage[transient_key] - - def add_transient_key(self, transient_key): - super(MFTransientList, self).add_transient_key(transient_key) - if isinstance(transient_key, int): - stress_period = transient_key - else: - stress_period = 1 - self._data_storage[transient_key] = \ - super(MFTransientList, self)._new_storage(stress_period) - - @property - def data(self): - return self.get_data() - - def store_as_external_file(self, external_file_path, binary=False, - replace_existing_external=True): - sim_time = self._data_dimensions.package_dim.model_dim[ - 0].simulation_time - num_sp = sim_time.get_num_stress_periods() - for sp in range(0, num_sp): - if sp in self._data_storage: - self._current_key = sp - layer_storage = self._get_storage_obj().layer_storage - if layer_storage.get_total_size() > 0 and \ - self._get_storage_obj().layer_storage[0].\ - layer_storage_type != \ - DataStorageType.external_file: - fname, ext = os.path.splitext(external_file_path) - full_name = '{}_{}{}'.format(fname, sp+1, ext) - super(MFTransientList, self).\ - store_as_external_file(full_name, binary, - replace_existing_external) - - def get_data(self, key=None, apply_mult=False, **kwargs): - if self._data_storage is not None and len(self._data_storage) > 0: - if key is None: - if 'array' in kwargs: - output = [] - sim_time = self._data_dimensions.package_dim.model_dim[ - 0].simulation_time - num_sp = sim_time.get_num_stress_periods() - for sp in range(0, num_sp): - if sp in self._data_storage: - self.get_data_prep(sp) - output.append(super(MFTransientList, self).get_data( - apply_mult=apply_mult)) - else: - output.append(None) - return output - else: - output = {} - for key in self._data_storage.keys(): - self.get_data_prep(key) - output[key] = super(MFTransientList, self).get_data( - apply_mult=apply_mult) - return output - self.get_data_prep(key) - return super(MFTransientList, self).get_data(apply_mult=apply_mult) - else: - return None - - def set_data(self, data, key=None, autofill=False): - if (isinstance(data, dict) or isinstance(data, OrderedDict)): - if 'filename' not in data: - # each item in the dictionary is a list for one stress period - # the dictionary key is the stress period the list is for - del_keys = [] - for key, list_item in data.items(): - if list_item is None: - self.remove_transient_key(key) - del_keys.append(key) - else: - self._set_data_prep(list_item, key) - super(MFTransientList, self).set_data(list_item, - autofill= - autofill) - for key in del_keys: - del data[key] - else: - self._set_data_prep(data['data'], key) - super(MFTransientList, self).set_data(data, autofill) - else: - if key is None: - # search for a key - new_key_index = self.structure.first_non_keyword_index() - if new_key_index is not None and len(data) > new_key_index: - key = data[new_key_index] - else: - key = 0 - if data is None: - self.remove_transient_key(key) - else: - self._set_data_prep(data, key) - super(MFTransientList, self).set_data(data, autofill) - - def get_file_entry(self, key=0, - ext_file_action=ExtFileAction.copy_relative_paths): - self._get_file_entry_prep(key) - return super(MFTransientList, self).get_file_entry(ext_file_action= - ext_file_action) - - def load(self, first_line, file_handle, block_header, - pre_data_comments=None, external_file_info=None): - self._load_prep(block_header) - return super(MFTransientList, self).load(first_line, file_handle, - block_header, - pre_data_comments, - external_file_info) - - def append_list_as_record(self, record, key=0): - self._append_list_as_record_prep(record, key) - super(MFTransientList, self).append_list_as_record(record) - - def update_record(self, record, key_index, key=0): - self._update_record_prep(key) - super(MFTransientList, self).update_record(record, key_index) - - def _new_storage(self, stress_period=0): - return OrderedDict() - - def _get_storage_obj(self): - if self._current_key is None or \ - self._current_key not in self._data_storage: - return None - return self._data_storage[self._current_key] - - def plot(self, key=None, names=None, kper=0, - filename_base=None, file_extension=None, mflay=None, - **kwargs): - """ - Plot stress period boundary condition (MfList) data for a specified - stress period - - Parameters - ---------- - key : str - MfList dictionary key. (default is None) - names : list - List of names for figure titles. (default is None) - kper : int - MODFLOW zero-based stress period number to return. (default is zero) - filename_base : str - Base file name that will be used to automatically generate file - names for output image files. Plots will be exported as image - files if file_name_base is not None. (default is None) - file_extension : str - Valid matplotlib.pyplot file extension for savefig(). Only used - if filename_base is not None. (default is 'png') - mflay : int - MODFLOW zero-based layer number to return. If None, then all - all layers will be included. (default is None) - **kwargs : dict - axes : list of matplotlib.pyplot.axis - List of matplotlib.pyplot.axis that will be used to plot - data for each layer. If axes=None axes will be generated. - (default is None) - pcolor : bool - Boolean used to determine if matplotlib.pyplot.pcolormesh - plot will be plotted. (default is True) - colorbar : bool - Boolean used to determine if a color bar will be added to - the matplotlib.pyplot.pcolormesh. Only used if pcolor=True. - (default is False) - inactive : bool - Boolean used to determine if a black overlay in inactive - cells in a layer will be displayed. (default is True) - contour : bool - Boolean used to determine if matplotlib.pyplot.contour - plot will be plotted. (default is False) - clabel : bool - Boolean used to determine if matplotlib.pyplot.clabel - will be plotted. Only used if contour=True. (default is False) - grid : bool - Boolean used to determine if the model grid will be plotted - on the figure. (default is False) - masked_values : list - List of unique values to be excluded from the plot. - - Returns - ---------- - out : list - Empty list is returned if filename_base is not None. Otherwise - a list of matplotlib.pyplot.axis is returned. - """ - from flopy.plot import PlotUtilities - - if not self.plotable: - raise TypeError("Simulation level packages are not plotable") - - if 'cellid' not in self.dtype.names: - return - - axes = PlotUtilities._plot_mflist_helper(self, key=key, names=names, - kper=kper, filename_base=filename_base, - file_extension=file_extension, mflay=mflay, - **kwargs) - return axes - - -class MFMultipleList(MFTransientList): - """ - Provides an interface for the user to access and update MODFLOW multiple - list data. This is list data that is in the same format as the - MFTransientList, but is not time based. - - Parameters - ---------- - sim_data : MFSimulationData - data contained in the simulation - structure : MFDataStructure - describes the structure of the data - data : list or ndarray - actual data - enable : bool - enable/disable the array - path : tuple - path in the data dictionary to this MFArray - dimensions : MFDataDimensions - dimension information related to the model, package, and array - - See Also - -------- - - Notes - ----- - - Examples - -------- - - - """ - def __init__(self, sim_data, model_or_sim, structure, enable=True, - path=None, dimensions=None, package=None): - super(MFMultipleList, self).__init__(sim_data=sim_data, - model_or_sim=model_or_sim, - structure=structure, - enable=enable, - path=path, - dimensions=dimensions, - package=package) - - def get_data(self, key=None, apply_mult=False, **kwargs): - return super(MFMultipleList, self).get_data(key=key, - apply_mult=apply_mult, +from collections import OrderedDict +import math +import sys +import os +import inspect +import numpy as np +from ..utils.mfenums import DiscretizationType +from ..data import mfstructure, mfdata +from ..mfbase import MFDataException, ExtFileAction +from .mfstructure import DatumType +from ...utils import datautil +from ...datbase import DataListInterface, DataType +from .mffileaccess import MFFileAccessList +from .mfdatastorage import DataStorage, DataStorageType, DataStructureType +from .mfdatautil import to_string, iterable + + +class MFList(mfdata.MFMultiDimVar, DataListInterface): + """ + Provides an interface for the user to access and update MODFLOW + scalar data. + + Parameters + ---------- + sim_data : MFSimulationData + data contained in the simulation + structure : MFDataStructure + describes the structure of the data + data : list or ndarray + actual data + enable : bool + enable/disable the array + path : tuple + path in the data dictionary to this MFArray + dimensions : MFDataDimensions + dimension information related to the model, package, and array + + Attributes + ---------- + data_type : DataType + type of data stored in the scalar + plotable : bool + if the scalar is plotable + dtype : numpy.dtype + the scalar's numpy data type + data : variable + calls get_data with default parameters + + Methods + ------- + new_simulation : (sim_data : MFSimulationData) + initialize MFArray object for a new simulation + has_data : (layer_num : int) : bool + Returns whether layer "layer_num" has any data associated with it. + For unlayered data do not pass in "layer". + get_data : (layer_num : int) : ndarray + Returns the data associated with layer "layer_num". If "layer_num" is + None, returns all data. + set_data : (data : ndarray/list/dict, multiplier : float, layer_num : int) + Sets the contents of the data at layer "layer_num" to "data" with + multiplier "multiplier". For unlayered data do not pass in + "layer_num". data can have the following formats: + 1) ndarray - ndarray containing the datalist + 2) [(line_one), (line_two), ...] - list where each like of the + datalist is a tuple within the list + 3) {'filename':filename, factor=fct, iprn=print_code, data=data} + - dictionary defining the external file containing the datalist. + If the data is transient, a dictionary can be used to specify each + stress period where the dictionary key is - 1 and + the dictionary value is the datalist data defined above: + {0:ndarray, 1:[(line_one), (line_two), ...], 2:{'filename':filename}) + append_data : (data : list(tuple)) + Appends "data" to the end of this list. Assumes data is in a format + that can be appended directly to a numpy recarray. + append_list_as_record : (data : list) + Appends the list "data" as a single record in this list's recarray. + Assumes "data" has the correct dimensions. + update_record : (record : list, key_index : int) + Updates a record at index "key_index" with the contents of "record". + If the index does not exist update_record appends the contents of + "record" to this list's recarray. + search_data : (search_term : string, col : int) + Searches the list data at column "col" for "search_term". If col is + None search_data searches the entire list. + load : (first_line : string, file_handle : file descriptor, + block_header : MFBlockHeader, pre_data_comments : MFComment) : + tuple (bool, string) + Loads data from first_line (the first line of data) and open file + file_handle which is pointing to the second line of data. Returns a + tuple with the first item indicating whether all data was read + and the second item being the last line of text read from the file. + get_file_entry : (layer : int) : string + Returns a string containing the data in layer "layer". For unlayered + data do not pass in "layer". + store_as_external_file : (external_file_path : str, binary : bool) + store all data externally in file external_file_path. the binary + allows storage in a binary file. If replace_existing_external is set + to False, this method will not do anything if the data is already in + an external file. + + See Also + -------- + + Notes + ----- + + Examples + -------- + + + """ + def __init__(self, sim_data, model_or_sim, structure, data=None, + enable=True, path=None, dimensions=None, package=None): + super(MFList, self).__init__(sim_data, model_or_sim, structure, enable, + path, dimensions) + try: + self._data_storage = self._new_storage() + except Exception as ex: + type_, value_, traceback_ = sys.exc_info() + raise MFDataException(structure.get_model(), + structure.get_package(), path, + 'creating storage', structure.name, + inspect.stack()[0][3], + type_, value_, traceback_, None, + sim_data.debug, ex) + self._package = package + self._last_line_info = [] + self._data_line = None + self._temp_dict = {} + self._crnt_line_num = 1 + if data is not None: + try: + self.set_data(data, True) + except Exception as ex: + type_, value_, traceback_ = sys.exc_info() + raise MFDataException(structure.get_model(), + structure.get_package(), path, + 'setting data', structure.name, + inspect.stack()[0][3], + type_, value_, traceback_, None, + sim_data.debug, ex) + + @property + def data_type(self): + return DataType.list + + @property + def package(self): + return self._package + + @property + def dtype(self): + return self.get_data().dtype + + @property + def plotable(self): + if self.model is None: + return False + else: + return True + + def to_array(self, kper=0, mask=False): + i0 = 1 + sarr = self.get_data(key=kper) + if not isinstance(sarr, list): + sarr = [sarr] + if len(sarr) == 0 or sarr[0] is None: + return None + if 'inode' in sarr[0].dtype.names: + raise NotImplementedError() + arrays = {} + model_grid = self._data_dimensions.get_model_grid() + + if model_grid._grid_type.value == 1: + shape = (model_grid.num_layers(), model_grid.num_rows(), + model_grid.num_columns()) + elif model_grid._grid_type.value == 2: + shape = (model_grid.num_layers(), model_grid.num_cells_per_layer()) + else: + shape = (model_grid.num_cells_per_layer(),) + + for name in sarr[0].dtype.names[i0:]: + if not sarr[0].dtype.fields[name][0] == object: + arr = np.zeros(shape) + arrays[name] = arr.copy() + + if np.isscalar(sarr[0]): + # if there are no entries for this kper + if sarr[0] == 0: + if mask: + for name, arr in arrays.items(): + arrays[name][:] = np.NaN + return arrays + else: + raise Exception("MfList: something bad happened") + + for name, arr in arrays.items(): + cnt = np.zeros(shape, dtype=np.float64) + for sp_rec in sarr: + if sp_rec is not None: + for rec in sp_rec: + arr[rec['cellid']] += rec[name] + cnt[rec['cellid']] += 1. + # average keys that should not be added + if name != 'cond' and name != 'flux': + idx = cnt > 0. + arr[idx] /= cnt[idx] + if mask: + arr = np.ma.masked_where(cnt == 0., arr) + arr[cnt == 0.] = np.NaN + + arrays[name] = arr.copy() + # elif mask: + # for name, arr in arrays.items(): + # arrays[name][:] = np.NaN + return arrays + + def new_simulation(self, sim_data): + try: + super(MFList, self).new_simulation(sim_data) + self._data_storage = self._new_storage() + except Exception as ex: + type_, value_, traceback_ = sys.exc_info() + raise MFDataException(self.structure.get_model(), + self.structure.get_package(), + self._path, + 'reinitializing', self.structure.name, + inspect.stack()[0][3], + type_, value_, traceback_, None, + self._simulation_data.debug, ex) + + self._data_line = None + + def store_as_external_file(self, external_file_path, binary=False, + replace_existing_external=True): + # only store data externally (do not subpackage info) + if self.structure.construct_package is None: + storage = self._get_storage_obj() + # check if data is already stored external + if replace_existing_external or storage is None or \ + storage.layer_storage.first_item().data_storage_type == \ + DataStorageType.internal_array: + data = self._get_data() + # if not empty dataset + if data is not None: + external_data = {'filename': external_file_path, + 'data': self._get_data(), + 'binary': binary} + self._set_data(external_data) + + def has_data(self): + try: + if self._get_storage_obj() is None: + return False + return self._get_storage_obj().has_data() + except Exception as ex: + type_, value_, traceback_ = sys.exc_info() + raise MFDataException(self.structure.get_model(), + self.structure.get_package(), self._path, + 'checking for data', self.structure.name, + inspect.stack()[0][3], type_, value_, + traceback_, None, + self._simulation_data.debug, ex) + + def _get_data(self, apply_mult=False, **kwargs): + try: + if self._get_storage_obj() is None: + return None + return self._get_storage_obj().get_data() + except Exception as ex: + type_, value_, traceback_ = sys.exc_info() + raise MFDataException(self.structure.get_model(), + self.structure.get_package(), self._path, + 'getting data', self.structure.name, + inspect.stack()[0][3], type_, value_, + traceback_, None, + self._simulation_data.debug, ex) + + def get_data(self, apply_mult=False, **kwargs): + return self._get_data(apply_mult, **kwargs) + + def _set_data(self, data, autofill=False): + if isinstance(data, dict): + if 'data' in data: + data_check = data['data'] + else: + data_check = None + else: + data_check = data + if iterable(data_check): + # verify data length + min_line_size = self.structure.get_min_record_entries() + if isinstance(data_check[0], np.record) or \ + (iterable(data_check[0]) and not + isinstance(data_check[0], str)): + # data contains multiple records + for data_line in data_check: + self._check_line_size(data_line, min_line_size) + else: + # data is a single record + self._check_line_size(data_check, min_line_size) + # set data + self._resync() + try: + if self._get_storage_obj() is None: + self._data_storage = self._new_storage() + # store data + self._get_storage_obj().set_data(data, autofill=autofill) + except Exception as ex: + type_, value_, traceback_ = sys.exc_info() + raise MFDataException(self.structure.get_model(), + self.structure.get_package(), self._path, + 'setting data', self.structure.name, + inspect.stack()[0][3], type_, value_, + traceback_, None, + self._simulation_data.debug, ex) + + def _check_line_size(self, data_line, min_line_size): + if 0 < len(data_line) < min_line_size: + min_line_size = self.structure.get_min_record_entries() + message = 'Data line {} only has {} entries, ' \ + 'minimum number of entries is ' \ + '{}.'.format(data_line, len(data_line), + min_line_size) + type_, value_, traceback_ = sys.exc_info() + raise MFDataException( + self.structure.get_model(), + self.structure.get_package(), + self.structure.path, + 'storing data', + self.structure.name, + inspect.stack()[0][3], + type_, value_, traceback_, message, + self._simulation_data.debug) + + def set_data(self, data, autofill=False): + self._set_data(data, autofill) + + def append_data(self, data): + try: + self._resync() + if self._get_storage_obj() is None: + self._data_storage = self._new_storage() + # store data + self._get_storage_obj().append_data(data) + except Exception as ex: + type_, value_, traceback_ = sys.exc_info() + raise MFDataException(self.structure.get_model(), + self.structure.get_package(), + self._path, + 'appending data', self.structure.name, + inspect.stack()[0][3], type_, value_, + traceback_, None, + self._simulation_data.debug, ex) + + def append_list_as_record(self, record): + self._resync() + try: + # convert to tuple + tuple_record = () + for item in record: + tuple_record += (item,) + # store + self._get_storage_obj().append_data([tuple_record]) + except Exception as ex: + type_, value_, traceback_ = sys.exc_info() + raise MFDataException(self.structure.get_model(), + self.structure.get_package(), + self._path, + 'appending data', self.structure.name, + inspect.stack()[0][3], type_, value_, + traceback_, None, + self._simulation_data.debug, ex) + + def update_record(self, record, key_index): + self.append_list_as_record(record) + + def search_data(self, search_term, col=None): + try: + data = self._get_storage_obj().get_data() + if data is not None: + search_term = search_term.lower() + for row in data: + col_num = 0 + for val in row: + if val is not None and val.lower() == search_term and \ + (col == None or col == col_num): + return (row, col) + col_num += 1 + return None + except Exception as ex: + type_, value_, traceback_ = sys.exc_info() + if col is None: + col = '' + raise MFDataException(self.structure.get_model(), + self.structure.get_package(), self._path, + 'searching for data', self.structure.name, + inspect.stack()[0][3], type_, value_, + traceback_, + 'search_term={}\ncol={}'.format(search_term, + col), + self._simulation_data.debug, ex) + + def get_file_entry(self, values_only=False, + ext_file_action=ExtFileAction.copy_relative_paths): + return self._get_file_entry(values_only, ext_file_action) + + def _get_file_entry(self, values_only=False, + ext_file_action=ExtFileAction.copy_relative_paths): + try: + # freeze model grid to boost performance + self._data_dimensions.lock() + # init + indent = self._simulation_data.indent_string + file_entry = [] + storage = self._get_storage_obj() + if storage is None or not storage.has_data(): + return '' + + # write out initial comments + if storage.pre_data_comments: + file_entry.append(storage.pre_data_comments.get_file_entry()) + except Exception as ex: + type_, value_, traceback_ = sys.exc_info() + raise MFDataException(self.structure.get_model(), + self.structure.get_package(), + self._path, 'get file entry initialization', + self.structure.name, + inspect.stack()[0][3], type_, value_, + traceback_, None, + self._simulation_data.debug, ex) + + if storage.layer_storage.first_item().data_storage_type == \ + DataStorageType.external_file: + try: + ext_string = self._get_external_formatting_string(0, + ext_file_action) + file_entry.append('{}{}{}'.format(indent, indent, + ext_string)) + # write file + + except Exception as ex: + type_, value_, traceback_ = sys.exc_info() + raise MFDataException(self.structure.get_model(), + self.structure.get_package(), + self._path, + 'formatting external file string', + self.structure.name, + inspect.stack()[0][3], type_, value_, + traceback_, None, + self._simulation_data.debug, ex) + else: + try: + data_complete = storage.get_data() + if storage.layer_storage.first_item().data_storage_type == \ + DataStorageType.internal_constant: + data_lines = 1 + else: + data_lines = len(data_complete) + except Exception as ex: + type_, value_, traceback_ = sys.exc_info() + raise MFDataException(self.structure.get_model(), + self.structure.get_package(), + self._path, + 'getting data from storage', + self.structure.name, + inspect.stack()[0][3], type_, value_, + traceback_, None, + self._simulation_data.debug, ex) + + # loop through list line by line - assumes first data_item size + # is representative + self._crnt_line_num = 1 + for mflist_line in range(0, data_lines): + text_line = [] + index = 0 + self._get_file_entry_record(data_complete, mflist_line, + text_line, index, self.structure, + storage, indent) + + # include comments + if mflist_line in storage.comments and \ + storage.comments[mflist_line].text: + text_line.append(storage.comments[mflist_line].text) + + file_entry.append('{}{}\n'.format(indent, indent. + join(text_line))) + self._crnt_line_num += 1 + + # unfreeze model grid + self._data_dimensions.unlock() + return ''.join(file_entry) + + def _get_file_entry_record(self, data_complete, mflist_line, text_line, + index, data_set, storage, indent): + if storage.layer_storage.first_item().data_storage_type == \ + DataStorageType.internal_constant: + try: + # constant data + data_type = self.structure.data_item_structures[1].type + const_str = self._get_constant_formatting_string( + storage.get_const_val(0), 0, data_type, '') + text_line.append('{}{}{}'.format(indent, indent, + const_str.upper())) + except Exception as ex: + type_, value_, traceback_ = sys.exc_info() + raise MFDataException(self.structure.get_model(), + self.structure.get_package(), + self._path, + 'getting constant data', + self.structure.name, + inspect.stack()[0][3], type_, value_, + traceback_, None, + self._simulation_data.debug, ex) + else: + data_dim = self._data_dimensions + data_line = data_complete[mflist_line] + for data_item in data_set.data_item_structures: + if data_item.is_aux: + try: + aux_var_names = \ + data_dim.package_dim.get_aux_variables() + if aux_var_names is not None: + for aux_var_name in aux_var_names[0]: + if aux_var_name.lower() != 'auxiliary': + data_val = data_line[index] + text_line.append(to_string( + data_val, data_item.type, + self._simulation_data, + self._data_dimensions, + data_item.is_cellid, + data_item.possible_cellid, + data_item)) + index += 1 + except Exception as ex: + type_, value_, traceback_ = sys.exc_info() + raise MFDataException(self.structure.get_model(), + self.structure.get_package(), + self._path, + 'processing auxiliary ' + 'variables', + self.structure.name, + inspect.stack()[0][3], type_, + value_, + traceback_, None, + self._simulation_data.debug, ex) + elif data_item.type == DatumType.record: + # record within a record, recurse + self._get_file_entry_record(data_complete, mflist_line, + text_line, index, data_item, + storage, indent) + elif (not data_item.is_boundname or + data_dim.package_dim.boundnames()) and \ + (not data_item.optional or data_item.name_length < 5 + or not data_item.is_mname or not storage.in_model): + data_complete_len = len(data_line) + if data_complete_len <= index: + if data_item.optional == False: + message = 'Not enough data provided ' \ + 'for {}. Data for required data ' \ + 'item "{}" not ' \ + 'found (data path: {})' \ + '.'.format(self.structure.name, + data_item.name, + self._path,) + type_, value_, traceback_ = sys.exc_info() + raise MFDataException(self.structure.get_model(), + self.structure.get_package(), + self._path, + 'building file entry record', + self.structure.name, + inspect.stack()[0][3], type_, + value_, traceback_, message, + self._simulation_data.debug) + else: + break + try: + # resolve size of data + resolved_shape, shape_rule = data_dim.get_data_shape( + data_item, self.structure, [data_line], + repeating_key=self._current_key) + data_val = data_line[index] + if data_item.is_cellid or (data_item.possible_cellid + and storage._validate_cellid([data_val], 0)): + if data_item.shape is not None and \ + len(data_item.shape) > 0 and \ + data_item.shape[0] == 'ncelldim': + model_grid = data_dim.get_model_grid() + cellid_size = \ + model_grid.\ + get_num_spatial_coordinates() + data_item.remove_cellid(resolved_shape, + cellid_size) + data_size = 1 + if len(resolved_shape) == 1 and \ + datautil.DatumUtil.is_int(resolved_shape[0]): + data_size = int(resolved_shape[0]) + if data_size < 0: + # unable to resolve data size based on shape, use + # the data heading names to resolve data size + data_size = storage.resolve_data_size(index) + except Exception as ex: + type_, value_, traceback_ = sys.exc_info() + raise MFDataException(self.structure.get_model(), + self.structure.get_package(), + self._path, + 'resolving data shape', + self.structure.name, + inspect.stack()[0][3], type_, + value_, traceback_, + 'Verify that your data is the ' + 'correct shape', + self._simulation_data.debug, ex) + for data_index in range(0, data_size): + if data_complete_len > index: + data_val = data_line[index] + if data_item.type == DatumType.keyword: + if data_val is not None: + text_line.append(data_item.display_name) + if self.structure.block_variable: + # block variables behave differently for + # now. this needs to be resolved + # more consistently at some point + index += 1 + elif data_item.type == DatumType.keystring: + if data_val is not None: + text_line.append(data_val) + index += 1 + + # keystring must be at the end of the line so + # everything else is part of the keystring data + data_key = data_val.lower() + if data_key not in data_item.keystring_dict: + keystr_struct = data_item.keystring_dict[ + '{}record'.format(data_key)] + else: + keystr_struct = data_item.keystring_dict[ + data_key] + if isinstance(keystr_struct, + mfstructure.MFDataStructure): + # data items following keystring + ks_structs = keystr_struct.\ + data_item_structures[1:] + else: + # key string stands alone + ks_structs = [keystr_struct] + ks_struct_index = 0 + max_index = len(ks_structs) - 1 + for data_index in range(index, + data_complete_len): + if data_line[data_index] is not None: + try: + k_data_item = ks_structs[ + ks_struct_index] + text_line.append(to_string( + data_line[data_index], + k_data_item.type, + self._simulation_data, + self._data_dimensions, + k_data_item.is_cellid, + k_data_item.possible_cellid, + k_data_item)) + except Exception as ex: + message = 'An error occurred ' \ + 'while converting data '\ + 'to a string. This ' \ + 'error occurred while ' \ + 'processing "{}" line ' \ + '{} data item "{}".' \ + '(data path: {})' \ + '.'.format( + self.structure.name, + data_item.name, + self._crnt_line_num, + self._path) + type_, value_, \ + traceback_ = sys.exc_info() + raise MFDataException( + self.structure.get_model(), + self.structure.get_package(), + self._path, + 'converting data ' + 'to a string', + self.structure.name, + inspect.stack()[0][ + 3], type_, + value_, traceback_, + message, + self. + _simulation_data. + debug, ex) + if ks_struct_index < max_index: + # increment until last record + # entry then repeat last entry + ks_struct_index += 1 + index = data_index + elif data_val is not None and (not isinstance( + data_val, float) or + not math.isnan(data_val)): + try: + if data_item.tagged and data_index == 0: + # data item tagged, include data item name + # as a keyword + text_line.append(to_string( + data_val, DatumType.string, + self._simulation_data, + self._data_dimensions, + False, data_item=data_item)) + index += 1 + data_val = data_line[index] + text_line.append( + to_string(data_val, data_item.type, + self._simulation_data, + self._data_dimensions, + data_item.is_cellid, + data_item.possible_cellid, + data_item)) + except Exception as ex: + message = 'An error occurred while ' \ + 'converting data to a ' \ + 'string. ' \ + 'This error occurred while ' \ + 'processing "{}" line {} data ' \ + 'item "{}".(data path: {})'\ + '.'.format(self.structure.name, + data_item.name, + self._crnt_line_num, + self._path) + type_, value_, traceback_ = sys.exc_info() + raise MFDataException(self.structure. + get_model(), + self.structure. + get_package(), + self._path, + 'converting data ' + 'to a string', + self.structure.name, + inspect.stack()[0][ + 3], type_, + value_, traceback_, + message, + self. + _simulation_data. + debug, ex) + index += 1 + elif not data_item.optional and shape_rule is None: + message = 'Not enough data provided ' \ + 'for {}. Data for required data ' \ + 'item "{}" not ' \ + 'found (data path: {})' \ + '.'.format(self.structure.name, + data_item.name, + self._path) + type_, value_, traceback_ = sys.exc_info() + raise MFDataException(self.structure.get_model(), + self.structure.get_package(), + self._path, + 'building data line', + self.structure.name, + inspect.stack()[0][3], type_, + value_, traceback_, message, + self._simulation_data.debug) + + def load(self, first_line, file_handle, block_header, + pre_data_comments=None, external_file_info=None): + super(MFList, self).load(first_line, file_handle, block_header, + pre_data_comments=None) + self._resync() + file_access = MFFileAccessList( self.structure, self._data_dimensions, + self._simulation_data, self._path, + self._current_key) + storage = self._get_storage_obj() + result = file_access.load_from_package( + first_line, file_handle, storage, pre_data_comments) + if external_file_info is not None: + storage.point_to_existing_external_file(external_file_info, 0) + return result + + def _new_storage(self, stress_period=0): + return DataStorage(self._simulation_data, self._model_or_sim, + self._data_dimensions, self._get_file_entry, + DataStorageType.internal_array, + DataStructureType.recarray, + stress_period=stress_period, + data_path=self._path) + + def _get_storage_obj(self): + return self._data_storage + + def plot(self, key=None, names=None, filename_base=None, + file_extension=None, mflay=None, **kwargs): + """ + Plot boundary condition (MfList) data + + Parameters + ---------- + key : str + MfList dictionary key. (default is None) + names : list + List of names for figure titles. (default is None) + filename_base : str + Base file name that will be used to automatically generate file + names for output image files. Plots will be exported as image + files if file_name_base is not None. (default is None) + file_extension : str + Valid matplotlib.pyplot file extension for savefig(). Only used + if filename_base is not None. (default is 'png') + mflay : int + MODFLOW zero-based layer number to return. If None, then all + all layers will be included. (default is None) + **kwargs : dict + axes : list of matplotlib.pyplot.axis + List of matplotlib.pyplot.axis that will be used to plot + data for each layer. If axes=None axes will be generated. + (default is None) + pcolor : bool + Boolean used to determine if matplotlib.pyplot.pcolormesh + plot will be plotted. (default is True) + colorbar : bool + Boolean used to determine if a color bar will be added to + the matplotlib.pyplot.pcolormesh. Only used if pcolor=True. + (default is False) + inactive : bool + Boolean used to determine if a black overlay in inactive + cells in a layer will be displayed. (default is True) + contour : bool + Boolean used to determine if matplotlib.pyplot.contour + plot will be plotted. (default is False) + clabel : bool + Boolean used to determine if matplotlib.pyplot.clabel + will be plotted. Only used if contour=True. (default is False) + grid : bool + Boolean used to determine if the model grid will be plotted + on the figure. (default is False) + masked_values : list + List of unique values to be excluded from the plot. + + Returns + ---------- + out : list + Empty list is returned if filename_base is not None. Otherwise + a list of matplotlib.pyplot.axis is returned. + """ + from flopy.plot import PlotUtilities + + if not self.plotable: + raise TypeError("Simulation level packages are not plotable") + + if 'cellid' not in self.dtype.names: + return + + PlotUtilities._plot_mflist_helper(mflist=self, key=key, kper=None, + names=names, filename_base=None, + file_extension=None, mflay=None, + **kwargs ) + + +class MFTransientList(MFList, mfdata.MFTransient, DataListInterface): + """ + Provides an interface for the user to access and update MODFLOW transient + list data. + + Parameters + ---------- + sim_data : MFSimulationData + data contained in the simulation + structure : MFDataStructure + describes the structure of the data + data : list or ndarray + actual data + enable : bool + enable/disable the array + path : tuple + path in the data dictionary to this MFArray + dimensions : MFDataDimensions + dimension information related to the model, package, and array + + Methods + ------- + add_transient_key : (transient_key : int) + Adds a new transient time allowing data for that time to be stored and + retrieved using the key "transient_key" + add_one :(transient_key : int) + Adds one to the data stored at key "transient_key" + get_data : (key : int) : ndarray + Returns the data during time "key". + set_data : (data : ndarray/list, multiplier : float, key : int) + Sets the contents of the data at time "key" to "data" with + multiplier "multiplier". + load : (first_line : string, file_handle : file descriptor, + block_header : MFBlockHeader, pre_data_comments : MFComment) : + tuple (bool, string) + Loads data from first_line (the first line of data) and open file + file_handle which is pointing to the second line of data. Returns a + tuple with the first item indicating whether all data was read + and the second item being the last line of text read from the file. + get_file_entry : (key : int) : string + Returns a string containing the data at time "key". + append_list_as_record : (data : list, key : int) + Appends the list "data" as a single record in this list's recarray at + time "key". Assumes "data" has the correct dimensions. + update_record : (record : list, key_index : int, key : int) + Updates a record at index "key_index" and time "key" with the contents + of "record". If the index does not exist update_record appends the + contents of "record" to this list's recarray. + See Also + -------- + + Notes + ----- + + Examples + -------- + + + """ + def __init__(self, sim_data, model_or_sim, structure, enable=True, path=None, + dimensions=None, package=None): + super(MFTransientList, self).__init__(sim_data=sim_data, + model_or_sim=model_or_sim, + structure=structure, + data=None, + enable=enable, + path=path, + dimensions=dimensions, + package=package) + self._transient_setup(self._data_storage) + self.repeating = True + + @property + def data_type(self): + return DataType.transientlist + + @property + def dtype(self): + data = self.get_data() + if len(data) > 0: + return data[0].dtype + else: + return None + + @property + def masked_4D_arrays(self): + model_grid = self._data_dimensions.get_model_grid() + nper = self._data_dimensions.package_dim.model_dim[0].simulation_time \ + .get_num_stress_periods() + # get the first kper + arrays = self.to_array(kper=0, mask=True) + + if arrays is not None: + # initialize these big arrays + if model_grid.grid_type() == DiscretizationType.DIS: + m4ds = {} + for name, array in arrays.items(): + m4d = np.zeros((nper, model_grid.num_layers, + model_grid.num_rows, model_grid.num_columns)) + m4d[0, :, :, :] = array + m4ds[name] = m4d + for kper in range(1, nper): + arrays = self.to_array(kper=kper, mask=True) + for name, array in arrays.items(): + m4ds[name][kper, :, :, :] = array + return m4ds + else: + m3ds = {} + for name, array in arrays.items(): + m3d = np.zeros((nper, model_grid.num_layers, + model_grid.num_cells_per_layer())) + m3d[0, :, :] = array + m3ds[name] = m3d + for kper in range(1, nper): + arrays = self.to_array(kper=kper, mask=True) + for name, array in arrays.items(): + m3ds[name][kper, :, :] = array + return m3ds + + def masked_4D_arrays_itr(self): + model_grid = self._data_dimensions.get_model_grid() + nper = self._data_dimensions.package_dim.model_dim[0].simulation_time \ + .get_num_stress_periods() + # get the first kper + arrays = self.to_array(kper=0, mask=True) + + if arrays is not None: + # initialize these big arrays + for name, array in arrays.items(): + if model_grid.grid_type() == DiscretizationType.DIS: + m4d = np.zeros((nper, model_grid.num_layers(), + model_grid.num_rows(), model_grid.num_columns())) + m4d[0, :, :, :] = array + for kper in range(1, nper): + arrays = self.to_array(kper=kper, mask=True) + for tname, array in arrays.items(): + if tname == name: + m4d[kper, :, :, :] = array + yield name, m4d + else: + m3d = np.zeros((nper, model_grid.num_layers(), + model_grid.num_cells_per_layer())) + m3d[0, :, :] = array + for kper in range(1, nper): + arrays = self.to_array(kper=kper, mask=True) + for tname, array in arrays.items(): + if tname == name: + m3d[kper, :, :] = array + yield name, m3d + + def to_array(self, kper=0, mask=False): + return super(MFTransientList, self).to_array(kper, mask) + + def remove_transient_key(self, transient_key): + if transient_key in self._data_storage: + del self._data_storage[transient_key] + + def add_transient_key(self, transient_key): + super(MFTransientList, self).add_transient_key(transient_key) + if isinstance(transient_key, int): + stress_period = transient_key + else: + stress_period = 1 + self._data_storage[transient_key] = \ + super(MFTransientList, self)._new_storage(stress_period) + + @property + def data(self): + return self.get_data() + + def store_as_external_file(self, external_file_path, binary=False, + replace_existing_external=True): + sim_time = self._data_dimensions.package_dim.model_dim[ + 0].simulation_time + num_sp = sim_time.get_num_stress_periods() + for sp in range(0, num_sp): + if sp in self._data_storage: + self._current_key = sp + layer_storage = self._get_storage_obj().layer_storage + if layer_storage.get_total_size() > 0 and \ + self._get_storage_obj().layer_storage[0].\ + layer_storage_type != \ + DataStorageType.external_file: + fname, ext = os.path.splitext(external_file_path) + full_name = '{}_{}{}'.format(fname, sp+1, ext) + super(MFTransientList, self).\ + store_as_external_file(full_name, binary, + replace_existing_external) + + def get_data(self, key=None, apply_mult=False, **kwargs): + if self._data_storage is not None and len(self._data_storage) > 0: + if key is None: + if 'array' in kwargs: + output = [] + sim_time = self._data_dimensions.package_dim.model_dim[ + 0].simulation_time + num_sp = sim_time.get_num_stress_periods() + for sp in range(0, num_sp): + if sp in self._data_storage: + self.get_data_prep(sp) + output.append(super(MFTransientList, self).get_data( + apply_mult=apply_mult)) + else: + output.append(None) + return output + else: + output = {} + for key in self._data_storage.keys(): + self.get_data_prep(key) + output[key] = super(MFTransientList, self).get_data( + apply_mult=apply_mult) + return output + self.get_data_prep(key) + return super(MFTransientList, self).get_data(apply_mult=apply_mult) + else: + return None + + def set_data(self, data, key=None, autofill=False): + if (isinstance(data, dict) or isinstance(data, OrderedDict)): + if 'filename' not in data: + # each item in the dictionary is a list for one stress period + # the dictionary key is the stress period the list is for + del_keys = [] + for key, list_item in data.items(): + if list_item is None: + self.remove_transient_key(key) + del_keys.append(key) + else: + self._set_data_prep(list_item, key) + super(MFTransientList, self).set_data(list_item, + autofill= + autofill) + for key in del_keys: + del data[key] + else: + self._set_data_prep(data['data'], key) + super(MFTransientList, self).set_data(data, autofill) + else: + if key is None: + # search for a key + new_key_index = self.structure.first_non_keyword_index() + if new_key_index is not None and len(data) > new_key_index: + key = data[new_key_index] + else: + key = 0 + if data is None: + self.remove_transient_key(key) + else: + self._set_data_prep(data, key) + super(MFTransientList, self).set_data(data, autofill) + + def get_file_entry(self, key=0, + ext_file_action=ExtFileAction.copy_relative_paths): + self._get_file_entry_prep(key) + return super(MFTransientList, self).get_file_entry(ext_file_action= + ext_file_action) + + def load(self, first_line, file_handle, block_header, + pre_data_comments=None, external_file_info=None): + self._load_prep(block_header) + return super(MFTransientList, self).load(first_line, file_handle, + block_header, + pre_data_comments, + external_file_info) + + def append_list_as_record(self, record, key=0): + self._append_list_as_record_prep(record, key) + super(MFTransientList, self).append_list_as_record(record) + + def update_record(self, record, key_index, key=0): + self._update_record_prep(key) + super(MFTransientList, self).update_record(record, key_index) + + def _new_storage(self, stress_period=0): + return OrderedDict() + + def _get_storage_obj(self): + if self._current_key is None or \ + self._current_key not in self._data_storage: + return None + return self._data_storage[self._current_key] + + def plot(self, key=None, names=None, kper=0, + filename_base=None, file_extension=None, mflay=None, + **kwargs): + """ + Plot stress period boundary condition (MfList) data for a specified + stress period + + Parameters + ---------- + key : str + MfList dictionary key. (default is None) + names : list + List of names for figure titles. (default is None) + kper : int + MODFLOW zero-based stress period number to return. (default is zero) + filename_base : str + Base file name that will be used to automatically generate file + names for output image files. Plots will be exported as image + files if file_name_base is not None. (default is None) + file_extension : str + Valid matplotlib.pyplot file extension for savefig(). Only used + if filename_base is not None. (default is 'png') + mflay : int + MODFLOW zero-based layer number to return. If None, then all + all layers will be included. (default is None) + **kwargs : dict + axes : list of matplotlib.pyplot.axis + List of matplotlib.pyplot.axis that will be used to plot + data for each layer. If axes=None axes will be generated. + (default is None) + pcolor : bool + Boolean used to determine if matplotlib.pyplot.pcolormesh + plot will be plotted. (default is True) + colorbar : bool + Boolean used to determine if a color bar will be added to + the matplotlib.pyplot.pcolormesh. Only used if pcolor=True. + (default is False) + inactive : bool + Boolean used to determine if a black overlay in inactive + cells in a layer will be displayed. (default is True) + contour : bool + Boolean used to determine if matplotlib.pyplot.contour + plot will be plotted. (default is False) + clabel : bool + Boolean used to determine if matplotlib.pyplot.clabel + will be plotted. Only used if contour=True. (default is False) + grid : bool + Boolean used to determine if the model grid will be plotted + on the figure. (default is False) + masked_values : list + List of unique values to be excluded from the plot. + + Returns + ---------- + out : list + Empty list is returned if filename_base is not None. Otherwise + a list of matplotlib.pyplot.axis is returned. + """ + from flopy.plot import PlotUtilities + + if not self.plotable: + raise TypeError("Simulation level packages are not plotable") + + if 'cellid' not in self.dtype.names: + return + + axes = PlotUtilities._plot_mflist_helper(self, key=key, names=names, + kper=kper, filename_base=filename_base, + file_extension=file_extension, mflay=mflay, + **kwargs) + return axes + + +class MFMultipleList(MFTransientList): + """ + Provides an interface for the user to access and update MODFLOW multiple + list data. This is list data that is in the same format as the + MFTransientList, but is not time based. + + Parameters + ---------- + sim_data : MFSimulationData + data contained in the simulation + structure : MFDataStructure + describes the structure of the data + data : list or ndarray + actual data + enable : bool + enable/disable the array + path : tuple + path in the data dictionary to this MFArray + dimensions : MFDataDimensions + dimension information related to the model, package, and array + + See Also + -------- + + Notes + ----- + + Examples + -------- + + + """ + def __init__(self, sim_data, model_or_sim, structure, enable=True, + path=None, dimensions=None, package=None): + super(MFMultipleList, self).__init__(sim_data=sim_data, + model_or_sim=model_or_sim, + structure=structure, + enable=enable, + path=path, + dimensions=dimensions, + package=package) + + def get_data(self, key=None, apply_mult=False, **kwargs): + return super(MFMultipleList, self).get_data(key=key, + apply_mult=apply_mult, **kwargs) \ No newline at end of file diff --git a/flopy/mf6/data/mfdatascalar.py b/flopy/mf6/data/mfdatascalar.py index 13b91fded9..e0f0efcc00 100644 --- a/flopy/mf6/data/mfdatascalar.py +++ b/flopy/mf6/data/mfdatascalar.py @@ -1,688 +1,688 @@ -import sys, inspect -import numpy as np -from ..data.mfstructure import DatumType -from ..data import mfdata -from collections import OrderedDict -from ..mfbase import ExtFileAction, MFDataException -from ...datbase import DataType -from .mfdatautil import convert_data, to_string -from .mffileaccess import MFFileAccessScalar -from .mfdatastorage import DataStorage, DataStructureType, DataStorageType - - -class MFScalar(mfdata.MFData): - """ - Provides an interface for the user to access and update MODFLOW - scalar data. - - Parameters - ---------- - sim_data : MFSimulationData - data contained in the simulation - structure : MFDataStructure - describes the structure of the data - data : list or ndarray - actual data - enable : bool - enable/disable the array - path : tuple - path in the data dictionary to this MFArray - dimensions : MFDataDimensions - dimension information related to the model, package, and array - - Attributes - ---------- - data_type : DataType - type of data stored in the scalar - plotable : bool - if the scalar is plotable - dtype : numpy.dtype - the scalar's numpy data type - data : variable - calls get_data with default parameters - - Methods - ------- - has_data : () : bool - Returns whether this object has data associated with it. - get_data : () : ndarray - Returns the data associated with this object. - set_data : (data : ndarray/list, multiplier : float) - Sets the contents of the data to "data" with - multiplier "multiplier". - load : (first_line : string, file_handle : file descriptor, - block_header : MFBlockHeader, pre_data_comments : MFComment) : - tuple (bool, string) - Loads data from first_line (the first line of data) and open file - file_handle which is pointing to the second line of data. Returns a - tuple with the first item indicating whether all data was read - and the second item being the last line of text read from the file. - get_file_entry : () : string - Returns a string containing the data. - - See Also - -------- - - Notes - ----- - - Examples - -------- - - - """ - def __init__(self, sim_data, model_or_sim, structure, data=None, - enable=True, path=None, dimensions=None): - super(MFScalar, self).__init__(sim_data, model_or_sim, structure, - enable, path, dimensions) - self._data_type = self.structure.data_item_structures[0].type - self._data_storage = self._new_storage() - if data is not None: - self.set_data(data) - - @property - def data_type(self): - return DataType.scalar - - @property - def plotable(self): - return False - - @property - def dtype(self): - if self.structure.type == DatumType.double_precision: - return np.float64 - elif self.structure.type == DatumType.integer: - return np.int32 - elif self.structure.type == DatumType.recarray or \ - self.structure.type == DatumType.record or \ - self.structure.type == DatumType.repeating_record: - for data_item_struct in self.structure.data_item_structures: - if data_item_struct.type == DatumType.double_precision: - return np.float64 - elif data_item_struct.type == DatumType.integer: - return np.int32 - return None - - def has_data(self): - try: - return self._get_storage_obj().has_data() - except Exception as ex: - type_, value_, traceback_ = sys.exc_info() - raise MFDataException(self.structure.get_model(), - self.structure.get_package(), - self._path, - 'checking for data', - self.structure.name, - inspect.stack()[0][3], type_, - value_, traceback_, None, - self._simulation_data.debug, ex) - - @property - def data(self): - return self.get_data() - - def get_data(self, apply_mult=False, **kwargs): - try: - return self._get_storage_obj().get_data(apply_mult=apply_mult) - except Exception as ex: - type_, value_, traceback_ = sys.exc_info() - raise MFDataException(self.structure.get_model(), - self.structure.get_package(), - self._path, - 'getting data', - self.structure.name, - inspect.stack()[0][3], type_, - value_, traceback_, None, - self._simulation_data.debug, ex) - - def set_data(self, data): - self._resync() - if self.structure.type == DatumType.record: - if data is not None: - if not isinstance(data, list) or isinstance(data, np.ndarray) or \ - isinstance(data, tuple): - data = [data] - else: - while isinstance(data, list) or isinstance(data, np.ndarray) or \ - isinstance(data, tuple): - data = data[0] - if (isinstance(data, list) or isinstance(data, tuple)) and \ - len(data) > 1: - self._add_data_line_comment(data[1:], 0) - storage = self._get_storage_obj() - data_struct = self.structure.data_item_structures[0] - try: - converted_data = convert_data(data, self._data_dimensions, - self._data_type, data_struct) - except Exception as ex: - type_, value_, traceback_ = sys.exc_info() - comment = 'Could not convert data "{}" to type ' \ - '"{}".'.format(data, self._data_type) - raise MFDataException(self.structure.get_model(), - self.structure.get_package(), - self._path, - 'converting data', - self.structure.name, - inspect.stack()[0][3], type_, - value_, traceback_, comment, - self._simulation_data.debug, ex) - try: - storage.set_data(converted_data, key=self._current_key) - except Exception as ex: - type_, value_, traceback_ = sys.exc_info() - comment = 'Could not set data "{}" to type ' \ - '"{}".'.format(data, self._data_type) - raise MFDataException(self.structure.get_model(), - self.structure.get_package(), - self._path, - 'setting data', - self.structure.name, - inspect.stack()[0][3], type_, - value_, traceback_, comment, - self._simulation_data.debug, ex) - - def add_one(self): - datum_type = self.structure.get_datum_type() - if datum_type == int or datum_type == np.int32: - if self._get_storage_obj().get_data() is None: - try: - self._get_storage_obj().set_data(1) - except Exception as ex: - type_, value_, traceback_ = sys.exc_info() - comment = 'Could not set data to 1' - raise MFDataException(self.structure.get_model(), - self.structure.get_package(), - self._path, - 'setting data', - self.structure.name, - inspect.stack()[0][3], type_, - value_, traceback_, comment, - self._simulation_data.debug, ex) - else: - try: - current_val = self._get_storage_obj().get_data() - except Exception as ex: - type_, value_, traceback_ = sys.exc_info() - raise MFDataException(self.structure.get_model(), - self.structure.get_package(), - self._path, - 'getting data', - self.structure.name, - inspect.stack()[0][3], type_, - value_, traceback_, None, - self._simulation_data.debug, ex) - try: - self._get_storage_obj().set_data(current_val + 1) - except Exception as ex: - type_, value_, traceback_ = sys.exc_info() - comment = 'Could increment data "{}" by one' \ - '.'.format(current_val) - raise MFDataException(self.structure.get_model(), - self.structure.get_package(), - self._path, - 'setting data', - self.structure.name, - inspect.stack()[0][3], type_, - value_, traceback_, comment, - self._simulation_data.debug, ex) - else: - message = '{} of type {} does not support add one ' \ - 'operation.'.format(self._data_name, - self.structure.get_datum_type()) - type_, value_, traceback_ = sys.exc_info() - raise MFDataException(self.structure.get_model(), - self.structure.get_package(), - self._path, - 'adding one to scalar', - self.structure.name, - inspect.stack()[0][3], type_, - value_, traceback_, message, - self._simulation_data.debug) - - def get_file_entry(self, values_only=False, one_based=False, - ext_file_action=ExtFileAction.copy_relative_paths): - storage = self._get_storage_obj() - try: - if storage is None or \ - self._get_storage_obj().get_data() is None: - return '' - except Exception as ex: - type_, value_, traceback_ = sys.exc_info() - raise MFDataException(self.structure.get_model(), - self.structure.get_package(), - self._path, - 'getting data', - self.structure.name, - inspect.stack()[0][3], type_, - value_, traceback_, None, - self._simulation_data.debug, ex) - if self.structure.type == DatumType.keyword or self.structure.type ==\ - DatumType.record: - try: - data = storage.get_data() - except Exception as ex: - type_, value_, traceback_ = sys.exc_info() - raise MFDataException(self.structure.get_model(), - self.structure.get_package(), - self._path, - 'getting data', - self.structure.name, - inspect.stack()[0][3], type_, - value_, traceback_, None, - self._simulation_data.debug, ex) - if self.structure.type == DatumType.keyword: - if data is not None and data != False: - # keyword appears alone - return '{}{}\n'.format(self._simulation_data.indent_string, - self.structure.name.upper()) - else: - return '' - elif self.structure.type == DatumType.record: - text_line = [] - index = 0 - for data_item in self.structure.data_item_structures: - if data_item.type == DatumType.keyword and \ - data_item.optional == False: - if isinstance(data, list) or isinstance(data, tuple): - if len(data) > index and (data[index] is not None and - data[index] != False): - text_line.append(data_item.name.upper()) - if isinstance(data[index], str) and \ - data_item.name.upper() != \ - data[index].upper() and data[index] != '': - # since the data does not match the keyword - # assume the keyword was excluded - index -= 1 - else: - if data is not None and data != False: - text_line.append(data_item.name.upper()) - else: - if data is not None and data != '': - if isinstance(data, list) or isinstance(data, tuple): - if len(data) > index: - if data[index] is not None and \ - data[index] != False: - current_data = data[index] - else: - break - elif data_item.optional == True: - break - else: - message = 'Missing expected data. Data ' \ - 'size is {}. Index {} not' \ - 'found.'.format(len(data), index) - type_, value_, traceback_ = sys.exc_info() - raise MFDataException( - self.structure.get_model(), - self.structure.get_package(), - self._path, - 'getting data', - self.structure.name, - inspect.stack()[0][3], type_, - value_, traceback_, message, - self._simulation_data.debug) - - else: - current_data = data - if data_item.type == DatumType.keyword: - if current_data is not None and current_data != \ - False: - text_line.append(data_item.name.upper()) - else: - try: - text_line.append(to_string( - current_data, self._data_type, - self._simulation_data, - self._data_dimensions, - data_item = data_item)) - except Exception as ex: - message = 'Could not convert "{}" of type ' \ - '"{}" to a string' \ - '.'.format(current_data, - self._data_type) - type_, value_, traceback_ = sys.exc_info() - raise MFDataException( - self.structure.get_model(), - self.structure.get_package(), - self._path, - 'converting data to string', - self.structure.name, - inspect.stack()[0][3], type_, - value_, traceback_, message, - self._simulation_data.debug) - index += 1 - - text = self._simulation_data.indent_string.join(text_line) - return '{}{}\n'.format(self._simulation_data.indent_string, - text) - else: - data_item = self.structure.data_item_structures[0] - try: - if one_based: - if self.structure.type != DatumType.integer: - message = 'Data scalar "{}" can not be one_based ' \ - 'because it is not an integer' \ - '.'.format(self.structure.name) - type_, value_, traceback_ = sys.exc_info() - raise MFDataException( - self.structure.get_model(), - self.structure.get_package(), - self._path, - 'storing one based integer', - self.structure.name, - inspect.stack()[0][3], type_, - value_, traceback_, message, - self._simulation_data.debug) - data = self._get_storage_obj().get_data() + 1 - else: - data = self._get_storage_obj().get_data() - except Exception as ex: - type_, value_, traceback_ = sys.exc_info() - raise MFDataException(self.structure.get_model(), - self.structure.get_package(), - self._path, - 'getting data', - self.structure.name, - inspect.stack()[0][3], type_, - value_, traceback_, None, - self._simulation_data.debug) - try: - # data - values = to_string(data, self._data_type, self._simulation_data, - self._data_dimensions, data_item=data_item) - except Exception as ex: - message = 'Could not convert "{}" of type "{}" ' \ - 'to a string.'.format(data, - self._data_type) - type_, value_, traceback_ = sys.exc_info() - raise MFDataException(self.structure.get_model(), - self.structure.get_package(), - self._path, - 'converting data to string', - self.structure.name, - inspect.stack()[0][3], type_, - value_, traceback_, message, - self._simulation_data.debug) - if values_only: - return '{}{}'.format(self._simulation_data.indent_string, - values) - else: - # keyword + data - return '{}{}{}{}\n'.format(self._simulation_data.indent_string, - self.structure.name.upper(), - self._simulation_data.indent_string, - values) - - def load(self, first_line, file_handle, block_header, - pre_data_comments=None, external_file_info=None): - super(MFScalar, self).load(first_line, file_handle, block_header, - pre_data_comments=None, - external_file_info=None) - self._resync() - file_access = MFFileAccessScalar( - self.structure, self._data_dimensions, self._simulation_data, - self._path, self._current_key) - return file_access.load_from_package( - first_line, file_handle, self._get_storage_obj(), self._data_type, - self._keyword, pre_data_comments) - - def _new_storage(self, stress_period=0): - return DataStorage(self._simulation_data, self._model_or_sim, - self._data_dimensions, self.get_file_entry, - DataStorageType.internal_array, - DataStructureType.scalar, - stress_period=stress_period, data_path=self._path) - - def _get_storage_obj(self): - return self._data_storage - - def plot(self, filename_base=None, - file_extension=None, **kwargs): - """ - Helper method to plot scalar objects - - Parameters: - scalar : flopy.mf6.data.mfscalar object - filename_base : str - Base file name that will be used to automatically generate file - names for output image files. Plots will be exported as image - files if file_name_base is not None. (default is None) - file_extension : str - Valid matplotlib.pyplot file extension for savefig(). Only used - if filename_base is not None. (default is 'png') - - Returns: - axes: list matplotlib.axes object - """ - from flopy.plot.plotutil import PlotUtilities - - if not self.plotable: - raise TypeError("Scalar values are not plotable") - - axes = PlotUtilities._plot_scalar_helper(self, - filename_base=filename_base, - file_extension=file_extension, - **kwargs) - return axes - - -class MFScalarTransient(MFScalar, mfdata.MFTransient): - """ - Provides an interface for the user to access and update MODFLOW transient - scalar data. - - Parameters - ---------- - sim_data : MFSimulationData - data contained in the simulation - structure : MFDataStructure - describes the structure of the data - data : list or ndarray - actual data - enable : bool - enable/disable the array - path : tuple - path in the data dictionary to this MFArray - dimensions : MFDataDimensions - dimension information related to the model, package, and array - - Methods - ------- - add_transient_key : (transient_key : int) - Adds a new transient time allowing data for that time to be stored and - retrieved using the key "transient_key" - add_one :(transient_key : int) - Adds one to the data stored at key "transient_key" - get_data : (key : int) : ndarray - Returns the data associated with "key". - set_data : (data : ndarray/list, multiplier : float, key : int) - Sets the contents of the data at time "key" to - "data" with multiplier "multiplier". - load : (first_line : string, file_handle : file descriptor, - block_header : MFBlockHeader, pre_data_comments : MFComment) : - tuple (bool, string) - Loads data from first_line (the first line of data) and open file - file_handle which is pointing to the second line of data. Returns a - tuple with the first item indicating whether all data was read - and the second item being the last line of text read from the file. - get_file_entry : (key : int) : string - Returns a string containing the data at time "key". - - See Also - -------- - - Notes - ----- - - Examples - -------- - - - """ - def __init__(self, sim_data, model_or_sim, structure, enable=True, - path=None, dimensions=None): - super(MFScalarTransient, self).__init__(sim_data=sim_data, - model_or_sim=model_or_sim, - structure=structure, - enable=enable, - path=path, - dimensions=dimensions) - self._transient_setup(self._data_storage) - self.repeating = True - - @property - def data_type(self): - return DataType.transientscalar - - @property - def plotable(self): - if self.model is None: - return False - else: - return True - - def add_transient_key(self, key): - super(MFScalarTransient, self).add_transient_key(key) - if isinstance(key, int): - stress_period = key - else: - stress_period = 1 - self._data_storage[key] = \ - super(MFScalarTransient, self)._new_storage(stress_period) - - def add_one(self, key=0): - self._update_record_prep(key) - super(MFScalarTransient, self).add_one() - - def has_data(self, key=None): - if key is None: - data_found = False - for sto_key in self._data_storage.keys(): - self.get_data_prep(sto_key) - data_found = data_found or super(MFScalarTransient, - self).has_data() - if data_found: - break - else: - self.get_data_prep(key) - data_found = super(MFScalarTransient, self).has_data() - return data_found - - def get_data(self, key=0, **kwargs): - self.get_data_prep(key) - return super(MFScalarTransient, self).get_data() - - def set_data(self, data, key=None): - if isinstance(data, dict) or isinstance(data, OrderedDict): - # each item in the dictionary is a list for one stress period - # the dictionary key is the stress period the list is for - for key, list_item in data.items(): - self._set_data_prep(list_item, key) - super(MFScalarTransient, self).set_data(list_item) - else: - self._set_data_prep(data, key) - super(MFScalarTransient, self).set_data(data) - - def get_file_entry(self, key=None, ext_file_action= - ExtFileAction.copy_relative_paths): - if key is None: - file_entry = [] - for sto_key in self._data_storage.keys(): - if self.has_data(sto_key): - self._get_file_entry_prep(sto_key) - text_entry = super(MFScalarTransient, - self).get_file_entry(ext_file_action= - ext_file_action) - file_entry.append(text_entry) - if file_entry > 1: - return '\n\n'.join(file_entry) - elif file_entry == 1: - return file_entry[0] - else: - return '' - else: - self._get_file_entry_prep(key) - return super(MFScalarTransient, - self).get_file_entry(ext_file_action=ext_file_action) - - def load(self, first_line, file_handle, block_header, - pre_data_comments=None, external_file_info=None): - self._load_prep(block_header) - return super(MFScalarTransient, self).load(first_line, file_handle, - pre_data_comments, - external_file_info) - - def _new_storage(self, stress_period=0): - return OrderedDict() - - def _get_storage_obj(self): - if self._current_key is None or \ - self._current_key not in self._data_storage: - return None - return self._data_storage[self._current_key] - - def plot(self, filename_base=None, file_extension=None, - kper=0, fignum=None, **kwargs): - """ - Plot transient scalar model data - - Parameters - ---------- - transientscalar : flopy.mf6.data.mfdatascalar.MFScalarTransient object - filename_base : str - Base file name that will be used to automatically generate file - names for output image files. Plots will be exported as image - files if file_name_base is not None. (default is None) - file_extension : str - Valid matplotlib.pyplot file extension for savefig(). Only used - if filename_base is not None. (default is 'png') - **kwargs : dict - axes : list of matplotlib.pyplot.axis - List of matplotlib.pyplot.axis that will be used to plot - data for each layer. If axes=None axes will be generated. - (default is None) - pcolor : bool - Boolean used to determine if matplotlib.pyplot.pcolormesh - plot will be plotted. (default is True) - colorbar : bool - Boolean used to determine if a color bar will be added to - the matplotlib.pyplot.pcolormesh. Only used if pcolor=True. - (default is False) - inactive : bool - Boolean used to determine if a black overlay in inactive - cells in a layer will be displayed. (default is True) - contour : bool - Boolean used to determine if matplotlib.pyplot.contour - plot will be plotted. (default is False) - clabel : bool - Boolean used to determine if matplotlib.pyplot.clabel - will be plotted. Only used if contour=True. (default is False) - grid : bool - Boolean used to determine if the model grid will be plotted - on the figure. (default is False) - masked_values : list - List of unique values to be excluded from the plot. - kper : str - MODFLOW zero-based stress period number to return. If - kper='all' then data for all stress period will be - extracted. (default is zero). - - Returns - ---------- - axes : list - Empty list is returned if filename_base is not None. Otherwise - a list of matplotlib.pyplot.axis is returned. - """ - from flopy.plot.plotutil import PlotUtilities - - if not self.plotable: - raise TypeError("Simulation level packages are not plotable") - - axes = PlotUtilities._plot_transient2d_helper(self, - filename_base=filename_base, - file_extension=file_extension, - kper=kper, - fignum=fignum, - **kwargs) +import sys, inspect +import numpy as np +from ..data.mfstructure import DatumType +from ..data import mfdata +from collections import OrderedDict +from ..mfbase import ExtFileAction, MFDataException +from ...datbase import DataType +from .mfdatautil import convert_data, to_string +from .mffileaccess import MFFileAccessScalar +from .mfdatastorage import DataStorage, DataStructureType, DataStorageType + + +class MFScalar(mfdata.MFData): + """ + Provides an interface for the user to access and update MODFLOW + scalar data. + + Parameters + ---------- + sim_data : MFSimulationData + data contained in the simulation + structure : MFDataStructure + describes the structure of the data + data : list or ndarray + actual data + enable : bool + enable/disable the array + path : tuple + path in the data dictionary to this MFArray + dimensions : MFDataDimensions + dimension information related to the model, package, and array + + Attributes + ---------- + data_type : DataType + type of data stored in the scalar + plotable : bool + if the scalar is plotable + dtype : numpy.dtype + the scalar's numpy data type + data : variable + calls get_data with default parameters + + Methods + ------- + has_data : () : bool + Returns whether this object has data associated with it. + get_data : () : ndarray + Returns the data associated with this object. + set_data : (data : ndarray/list, multiplier : float) + Sets the contents of the data to "data" with + multiplier "multiplier". + load : (first_line : string, file_handle : file descriptor, + block_header : MFBlockHeader, pre_data_comments : MFComment) : + tuple (bool, string) + Loads data from first_line (the first line of data) and open file + file_handle which is pointing to the second line of data. Returns a + tuple with the first item indicating whether all data was read + and the second item being the last line of text read from the file. + get_file_entry : () : string + Returns a string containing the data. + + See Also + -------- + + Notes + ----- + + Examples + -------- + + + """ + def __init__(self, sim_data, model_or_sim, structure, data=None, + enable=True, path=None, dimensions=None): + super(MFScalar, self).__init__(sim_data, model_or_sim, structure, + enable, path, dimensions) + self._data_type = self.structure.data_item_structures[0].type + self._data_storage = self._new_storage() + if data is not None: + self.set_data(data) + + @property + def data_type(self): + return DataType.scalar + + @property + def plotable(self): + return False + + @property + def dtype(self): + if self.structure.type == DatumType.double_precision: + return np.float64 + elif self.structure.type == DatumType.integer: + return np.int32 + elif self.structure.type == DatumType.recarray or \ + self.structure.type == DatumType.record or \ + self.structure.type == DatumType.repeating_record: + for data_item_struct in self.structure.data_item_structures: + if data_item_struct.type == DatumType.double_precision: + return np.float64 + elif data_item_struct.type == DatumType.integer: + return np.int32 + return None + + def has_data(self): + try: + return self._get_storage_obj().has_data() + except Exception as ex: + type_, value_, traceback_ = sys.exc_info() + raise MFDataException(self.structure.get_model(), + self.structure.get_package(), + self._path, + 'checking for data', + self.structure.name, + inspect.stack()[0][3], type_, + value_, traceback_, None, + self._simulation_data.debug, ex) + + @property + def data(self): + return self.get_data() + + def get_data(self, apply_mult=False, **kwargs): + try: + return self._get_storage_obj().get_data(apply_mult=apply_mult) + except Exception as ex: + type_, value_, traceback_ = sys.exc_info() + raise MFDataException(self.structure.get_model(), + self.structure.get_package(), + self._path, + 'getting data', + self.structure.name, + inspect.stack()[0][3], type_, + value_, traceback_, None, + self._simulation_data.debug, ex) + + def set_data(self, data): + self._resync() + if self.structure.type == DatumType.record: + if data is not None: + if not isinstance(data, list) or isinstance(data, np.ndarray) or \ + isinstance(data, tuple): + data = [data] + else: + while isinstance(data, list) or isinstance(data, np.ndarray) or \ + isinstance(data, tuple): + data = data[0] + if (isinstance(data, list) or isinstance(data, tuple)) and \ + len(data) > 1: + self._add_data_line_comment(data[1:], 0) + storage = self._get_storage_obj() + data_struct = self.structure.data_item_structures[0] + try: + converted_data = convert_data(data, self._data_dimensions, + self._data_type, data_struct) + except Exception as ex: + type_, value_, traceback_ = sys.exc_info() + comment = 'Could not convert data "{}" to type ' \ + '"{}".'.format(data, self._data_type) + raise MFDataException(self.structure.get_model(), + self.structure.get_package(), + self._path, + 'converting data', + self.structure.name, + inspect.stack()[0][3], type_, + value_, traceback_, comment, + self._simulation_data.debug, ex) + try: + storage.set_data(converted_data, key=self._current_key) + except Exception as ex: + type_, value_, traceback_ = sys.exc_info() + comment = 'Could not set data "{}" to type ' \ + '"{}".'.format(data, self._data_type) + raise MFDataException(self.structure.get_model(), + self.structure.get_package(), + self._path, + 'setting data', + self.structure.name, + inspect.stack()[0][3], type_, + value_, traceback_, comment, + self._simulation_data.debug, ex) + + def add_one(self): + datum_type = self.structure.get_datum_type() + if datum_type == int or datum_type == np.int32: + if self._get_storage_obj().get_data() is None: + try: + self._get_storage_obj().set_data(1) + except Exception as ex: + type_, value_, traceback_ = sys.exc_info() + comment = 'Could not set data to 1' + raise MFDataException(self.structure.get_model(), + self.structure.get_package(), + self._path, + 'setting data', + self.structure.name, + inspect.stack()[0][3], type_, + value_, traceback_, comment, + self._simulation_data.debug, ex) + else: + try: + current_val = self._get_storage_obj().get_data() + except Exception as ex: + type_, value_, traceback_ = sys.exc_info() + raise MFDataException(self.structure.get_model(), + self.structure.get_package(), + self._path, + 'getting data', + self.structure.name, + inspect.stack()[0][3], type_, + value_, traceback_, None, + self._simulation_data.debug, ex) + try: + self._get_storage_obj().set_data(current_val + 1) + except Exception as ex: + type_, value_, traceback_ = sys.exc_info() + comment = 'Could increment data "{}" by one' \ + '.'.format(current_val) + raise MFDataException(self.structure.get_model(), + self.structure.get_package(), + self._path, + 'setting data', + self.structure.name, + inspect.stack()[0][3], type_, + value_, traceback_, comment, + self._simulation_data.debug, ex) + else: + message = '{} of type {} does not support add one ' \ + 'operation.'.format(self._data_name, + self.structure.get_datum_type()) + type_, value_, traceback_ = sys.exc_info() + raise MFDataException(self.structure.get_model(), + self.structure.get_package(), + self._path, + 'adding one to scalar', + self.structure.name, + inspect.stack()[0][3], type_, + value_, traceback_, message, + self._simulation_data.debug) + + def get_file_entry(self, values_only=False, one_based=False, + ext_file_action=ExtFileAction.copy_relative_paths): + storage = self._get_storage_obj() + try: + if storage is None or \ + self._get_storage_obj().get_data() is None: + return '' + except Exception as ex: + type_, value_, traceback_ = sys.exc_info() + raise MFDataException(self.structure.get_model(), + self.structure.get_package(), + self._path, + 'getting data', + self.structure.name, + inspect.stack()[0][3], type_, + value_, traceback_, None, + self._simulation_data.debug, ex) + if self.structure.type == DatumType.keyword or self.structure.type ==\ + DatumType.record: + try: + data = storage.get_data() + except Exception as ex: + type_, value_, traceback_ = sys.exc_info() + raise MFDataException(self.structure.get_model(), + self.structure.get_package(), + self._path, + 'getting data', + self.structure.name, + inspect.stack()[0][3], type_, + value_, traceback_, None, + self._simulation_data.debug, ex) + if self.structure.type == DatumType.keyword: + if data is not None and data != False: + # keyword appears alone + return '{}{}\n'.format(self._simulation_data.indent_string, + self.structure.name.upper()) + else: + return '' + elif self.structure.type == DatumType.record: + text_line = [] + index = 0 + for data_item in self.structure.data_item_structures: + if data_item.type == DatumType.keyword and \ + data_item.optional == False: + if isinstance(data, list) or isinstance(data, tuple): + if len(data) > index and (data[index] is not None and + data[index] != False): + text_line.append(data_item.name.upper()) + if isinstance(data[index], str) and \ + data_item.name.upper() != \ + data[index].upper() and data[index] != '': + # since the data does not match the keyword + # assume the keyword was excluded + index -= 1 + else: + if data is not None and data != False: + text_line.append(data_item.name.upper()) + else: + if data is not None and data != '': + if isinstance(data, list) or isinstance(data, tuple): + if len(data) > index: + if data[index] is not None and \ + data[index] != False: + current_data = data[index] + else: + break + elif data_item.optional == True: + break + else: + message = 'Missing expected data. Data ' \ + 'size is {}. Index {} not' \ + 'found.'.format(len(data), index) + type_, value_, traceback_ = sys.exc_info() + raise MFDataException( + self.structure.get_model(), + self.structure.get_package(), + self._path, + 'getting data', + self.structure.name, + inspect.stack()[0][3], type_, + value_, traceback_, message, + self._simulation_data.debug) + + else: + current_data = data + if data_item.type == DatumType.keyword: + if current_data is not None and current_data != \ + False: + text_line.append(data_item.name.upper()) + else: + try: + text_line.append(to_string( + current_data, self._data_type, + self._simulation_data, + self._data_dimensions, + data_item = data_item)) + except Exception as ex: + message = 'Could not convert "{}" of type ' \ + '"{}" to a string' \ + '.'.format(current_data, + self._data_type) + type_, value_, traceback_ = sys.exc_info() + raise MFDataException( + self.structure.get_model(), + self.structure.get_package(), + self._path, + 'converting data to string', + self.structure.name, + inspect.stack()[0][3], type_, + value_, traceback_, message, + self._simulation_data.debug) + index += 1 + + text = self._simulation_data.indent_string.join(text_line) + return '{}{}\n'.format(self._simulation_data.indent_string, + text) + else: + data_item = self.structure.data_item_structures[0] + try: + if one_based: + if self.structure.type != DatumType.integer: + message = 'Data scalar "{}" can not be one_based ' \ + 'because it is not an integer' \ + '.'.format(self.structure.name) + type_, value_, traceback_ = sys.exc_info() + raise MFDataException( + self.structure.get_model(), + self.structure.get_package(), + self._path, + 'storing one based integer', + self.structure.name, + inspect.stack()[0][3], type_, + value_, traceback_, message, + self._simulation_data.debug) + data = self._get_storage_obj().get_data() + 1 + else: + data = self._get_storage_obj().get_data() + except Exception as ex: + type_, value_, traceback_ = sys.exc_info() + raise MFDataException(self.structure.get_model(), + self.structure.get_package(), + self._path, + 'getting data', + self.structure.name, + inspect.stack()[0][3], type_, + value_, traceback_, None, + self._simulation_data.debug) + try: + # data + values = to_string(data, self._data_type, self._simulation_data, + self._data_dimensions, data_item=data_item) + except Exception as ex: + message = 'Could not convert "{}" of type "{}" ' \ + 'to a string.'.format(data, + self._data_type) + type_, value_, traceback_ = sys.exc_info() + raise MFDataException(self.structure.get_model(), + self.structure.get_package(), + self._path, + 'converting data to string', + self.structure.name, + inspect.stack()[0][3], type_, + value_, traceback_, message, + self._simulation_data.debug) + if values_only: + return '{}{}'.format(self._simulation_data.indent_string, + values) + else: + # keyword + data + return '{}{}{}{}\n'.format(self._simulation_data.indent_string, + self.structure.name.upper(), + self._simulation_data.indent_string, + values) + + def load(self, first_line, file_handle, block_header, + pre_data_comments=None, external_file_info=None): + super(MFScalar, self).load(first_line, file_handle, block_header, + pre_data_comments=None, + external_file_info=None) + self._resync() + file_access = MFFileAccessScalar( + self.structure, self._data_dimensions, self._simulation_data, + self._path, self._current_key) + return file_access.load_from_package( + first_line, file_handle, self._get_storage_obj(), self._data_type, + self._keyword, pre_data_comments) + + def _new_storage(self, stress_period=0): + return DataStorage(self._simulation_data, self._model_or_sim, + self._data_dimensions, self.get_file_entry, + DataStorageType.internal_array, + DataStructureType.scalar, + stress_period=stress_period, data_path=self._path) + + def _get_storage_obj(self): + return self._data_storage + + def plot(self, filename_base=None, + file_extension=None, **kwargs): + """ + Helper method to plot scalar objects + + Parameters: + scalar : flopy.mf6.data.mfscalar object + filename_base : str + Base file name that will be used to automatically generate file + names for output image files. Plots will be exported as image + files if file_name_base is not None. (default is None) + file_extension : str + Valid matplotlib.pyplot file extension for savefig(). Only used + if filename_base is not None. (default is 'png') + + Returns: + axes: list matplotlib.axes object + """ + from flopy.plot.plotutil import PlotUtilities + + if not self.plotable: + raise TypeError("Scalar values are not plotable") + + axes = PlotUtilities._plot_scalar_helper(self, + filename_base=filename_base, + file_extension=file_extension, + **kwargs) + return axes + + +class MFScalarTransient(MFScalar, mfdata.MFTransient): + """ + Provides an interface for the user to access and update MODFLOW transient + scalar data. + + Parameters + ---------- + sim_data : MFSimulationData + data contained in the simulation + structure : MFDataStructure + describes the structure of the data + data : list or ndarray + actual data + enable : bool + enable/disable the array + path : tuple + path in the data dictionary to this MFArray + dimensions : MFDataDimensions + dimension information related to the model, package, and array + + Methods + ------- + add_transient_key : (transient_key : int) + Adds a new transient time allowing data for that time to be stored and + retrieved using the key "transient_key" + add_one :(transient_key : int) + Adds one to the data stored at key "transient_key" + get_data : (key : int) : ndarray + Returns the data associated with "key". + set_data : (data : ndarray/list, multiplier : float, key : int) + Sets the contents of the data at time "key" to + "data" with multiplier "multiplier". + load : (first_line : string, file_handle : file descriptor, + block_header : MFBlockHeader, pre_data_comments : MFComment) : + tuple (bool, string) + Loads data from first_line (the first line of data) and open file + file_handle which is pointing to the second line of data. Returns a + tuple with the first item indicating whether all data was read + and the second item being the last line of text read from the file. + get_file_entry : (key : int) : string + Returns a string containing the data at time "key". + + See Also + -------- + + Notes + ----- + + Examples + -------- + + + """ + def __init__(self, sim_data, model_or_sim, structure, enable=True, + path=None, dimensions=None): + super(MFScalarTransient, self).__init__(sim_data=sim_data, + model_or_sim=model_or_sim, + structure=structure, + enable=enable, + path=path, + dimensions=dimensions) + self._transient_setup(self._data_storage) + self.repeating = True + + @property + def data_type(self): + return DataType.transientscalar + + @property + def plotable(self): + if self.model is None: + return False + else: + return True + + def add_transient_key(self, key): + super(MFScalarTransient, self).add_transient_key(key) + if isinstance(key, int): + stress_period = key + else: + stress_period = 1 + self._data_storage[key] = \ + super(MFScalarTransient, self)._new_storage(stress_period) + + def add_one(self, key=0): + self._update_record_prep(key) + super(MFScalarTransient, self).add_one() + + def has_data(self, key=None): + if key is None: + data_found = False + for sto_key in self._data_storage.keys(): + self.get_data_prep(sto_key) + data_found = data_found or super(MFScalarTransient, + self).has_data() + if data_found: + break + else: + self.get_data_prep(key) + data_found = super(MFScalarTransient, self).has_data() + return data_found + + def get_data(self, key=0, **kwargs): + self.get_data_prep(key) + return super(MFScalarTransient, self).get_data() + + def set_data(self, data, key=None): + if isinstance(data, dict) or isinstance(data, OrderedDict): + # each item in the dictionary is a list for one stress period + # the dictionary key is the stress period the list is for + for key, list_item in data.items(): + self._set_data_prep(list_item, key) + super(MFScalarTransient, self).set_data(list_item) + else: + self._set_data_prep(data, key) + super(MFScalarTransient, self).set_data(data) + + def get_file_entry(self, key=None, ext_file_action= + ExtFileAction.copy_relative_paths): + if key is None: + file_entry = [] + for sto_key in self._data_storage.keys(): + if self.has_data(sto_key): + self._get_file_entry_prep(sto_key) + text_entry = super(MFScalarTransient, + self).get_file_entry(ext_file_action= + ext_file_action) + file_entry.append(text_entry) + if file_entry > 1: + return '\n\n'.join(file_entry) + elif file_entry == 1: + return file_entry[0] + else: + return '' + else: + self._get_file_entry_prep(key) + return super(MFScalarTransient, + self).get_file_entry(ext_file_action=ext_file_action) + + def load(self, first_line, file_handle, block_header, + pre_data_comments=None, external_file_info=None): + self._load_prep(block_header) + return super(MFScalarTransient, self).load(first_line, file_handle, + pre_data_comments, + external_file_info) + + def _new_storage(self, stress_period=0): + return OrderedDict() + + def _get_storage_obj(self): + if self._current_key is None or \ + self._current_key not in self._data_storage: + return None + return self._data_storage[self._current_key] + + def plot(self, filename_base=None, file_extension=None, + kper=0, fignum=None, **kwargs): + """ + Plot transient scalar model data + + Parameters + ---------- + transientscalar : flopy.mf6.data.mfdatascalar.MFScalarTransient object + filename_base : str + Base file name that will be used to automatically generate file + names for output image files. Plots will be exported as image + files if file_name_base is not None. (default is None) + file_extension : str + Valid matplotlib.pyplot file extension for savefig(). Only used + if filename_base is not None. (default is 'png') + **kwargs : dict + axes : list of matplotlib.pyplot.axis + List of matplotlib.pyplot.axis that will be used to plot + data for each layer. If axes=None axes will be generated. + (default is None) + pcolor : bool + Boolean used to determine if matplotlib.pyplot.pcolormesh + plot will be plotted. (default is True) + colorbar : bool + Boolean used to determine if a color bar will be added to + the matplotlib.pyplot.pcolormesh. Only used if pcolor=True. + (default is False) + inactive : bool + Boolean used to determine if a black overlay in inactive + cells in a layer will be displayed. (default is True) + contour : bool + Boolean used to determine if matplotlib.pyplot.contour + plot will be plotted. (default is False) + clabel : bool + Boolean used to determine if matplotlib.pyplot.clabel + will be plotted. Only used if contour=True. (default is False) + grid : bool + Boolean used to determine if the model grid will be plotted + on the figure. (default is False) + masked_values : list + List of unique values to be excluded from the plot. + kper : str + MODFLOW zero-based stress period number to return. If + kper='all' then data for all stress period will be + extracted. (default is zero). + + Returns + ---------- + axes : list + Empty list is returned if filename_base is not None. Otherwise + a list of matplotlib.pyplot.axis is returned. + """ + from flopy.plot.plotutil import PlotUtilities + + if not self.plotable: + raise TypeError("Simulation level packages are not plotable") + + axes = PlotUtilities._plot_transient2d_helper(self, + filename_base=filename_base, + file_extension=file_extension, + kper=kper, + fignum=fignum, + **kwargs) return axes \ No newline at end of file diff --git a/flopy/mf6/data/mfdatastorage.py b/flopy/mf6/data/mfdatastorage.py index 8524426243..317ad3e537 100644 --- a/flopy/mf6/data/mfdatastorage.py +++ b/flopy/mf6/data/mfdatastorage.py @@ -1,2033 +1,2033 @@ -from copy import deepcopy -import sys -import inspect -from shutil import copyfile -from collections import OrderedDict -from enum import Enum -import numpy as np -from ..mfbase import MFDataException, VerbosityLevel -from ..data.mfstructure import DatumType, MFDataItemStructure -from ..data import mfdatautil -from ...utils.datautil import DatumUtil, FileIter, MultiListIter, PyListUtil, \ - ArrayIndexIter, MultiList -from .mfdatautil import convert_data, MFComment -from .mffileaccess import MFFileAccessArray, MFFileAccessList, MFFileAccess - - -class DataStorageType(Enum): - """ - Enumeration of different ways that data can be stored - """ - internal_array = 1 - internal_constant = 2 - external_file = 3 - - -class DataStructureType(Enum): - """ - Enumeration of different data structures used to store data - """ - ndarray = 1 - recarray = 2 - scalar = 3 - - -class LayerStorage(object): - """ - Stores a single layer of data. - - Parameters - ---------- - data_storage : DataStorage - Parent data storage object that layer is contained in - lay_num : int - Layer number of layered being stored - data_storage_type : DataStorageType - Method used to store the data - - Attributes - ---------- - internal_data : ndarray or recarray - data being stored, if full data is being stored internally in memory - data_const_value : int/float - constant value of data being stored, if data is a constant - data_storage_type : DataStorageType - method used to store the data - fname : str - file name of external file containing the data - factor : int/float - factor to multiply the data by - iprn : int - print code - binary : bool - whether the data is stored in a binary file - - Methods - ------- - get_const_val(layer) - gets the constant value of a given layer. data storage type for layer - must be "internal_constant". - get_data(layer) : ndarray/recarray/string - returns the data for the specified layer - set_data(data, layer=None, multiplier=[1.0] - sets the data being stored to "data" for layer "layer", replacing all - data for that layer. a multiplier can be specified. - - See Also - -------- - - Notes - ----- - - Examples - -------- - - - """ - - def __init__(self, data_storage, lay_indexes, - data_storage_type=DataStorageType.internal_array, - data_type=None): - self._data_storage_parent = data_storage - self._lay_indexes = lay_indexes - self.internal_data = None - self.data_const_value = None - self.data_storage_type = data_storage_type - self.data_type = data_type - self.fname = None - if self.data_type == DatumType.integer: - self.factor = 1 - else: - self.factor = 1.0 - self.iprn = None - self.binary = False - - def set_internal_constant(self): - self.data_storage_type = DataStorageType.internal_constant - - def set_internal_array(self): - self.data_storage_type = DataStorageType.internal_array - - @property - def name(self): - return self._data_storage_parent.data_dimensions.structure.name - - def __repr__(self): - if self.data_storage_type == DataStorageType.internal_constant: - return 'constant {}'.format(self.get_data_const_val()) - else: - return repr(self.get_data()) - - def __str__(self): - if self.data_storage_type == DataStorageType.internal_constant: - return '{}'.format(self.get_data_const_val()) - else: - return str(self.get_data()) - - def __getattr__(self, attr): - if attr == 'binary' or not hasattr(self, 'binary'): - raise AttributeError(attr) - - if attr == 'array': - return self._data_storage_parent.get_data(self._lay_indexes, True) - elif attr == '__getstate__': - raise AttributeError(attr) - - def set_data(self, data): - self._data_storage_parent.set_data(data, self._lay_indexes, [self.factor]) - - def get_data(self): - return self._data_storage_parent.get_data(self._lay_indexes, False) - - def get_data_const_val(self): - if isinstance(self.data_const_value, list): - return self.data_const_value[0] - else: - return self.data_const_value - - -class DataStorage(object): - """ - Stores and retrieves data. - - - Parameters - ---------- - sim_data : simulation data class - reference to the simulation data class - data_dimensions : data dimensions class - a data dimensions class for the data being stored - get_file_entry : method reference - method that returns the file entry for the stored data - data_storage_type : enum - how the data will be stored (internally, as a constant, as an external - file) - data_structure_type : enum - what internal type is the data stored in (ndarray, recarray, scalar) - layer_shape : int - number of data layers - layered : boolean - is the data layered - layer_storage : MultiList - one or more dimensional list of LayerStorage - - Attributes - ---------- - data_storage_type : list - list of data storage types, one for each layer - data_const_value : list - list of data constants, one for each layer - external_file_path : list - list of external file paths, one for each layer - multiplier : list - list of multipliers, one for each layer - print_format : list - list of print formats, one for each layer - data_structure_type : - what internal type is the data stored in (ndarray, recarray, scalar) - layered : boolean - is the data layered - pre_data_comments : string - any comments before the start of the data - comments : OrderedDict - any comments mixed in with the data, dictionary keys are data lines - post_data_comments : string - any comments after the end of the data - - Methods - ------- - override_data_type : (index, data_type) - overrides the data type used in a recarray at index "index" with data - type "data_type" - get_external_file_path(layer) - gets the path to an external file for layer "layer" - get_const_val(layer) - gets the constant value of a given layer. data storage type for layer - must be "internal_constant". - has_data(layer) : boolean - returns true if data exists for the specified layer, false otherwise - get_data(layer) : ndarray/recarray/string - returns the data for the specified layer - update_item(data, key_index) - updates the data in a recarray at index "key_index" with data "data". - data is a list containing all data for a single record in the - recarray. . data structure type must be recarray - append_data(data) - appends data "data" to the end of a recarray. data structure type must - be recarray - set_data(data, layer=None, multiplier=[1.0] - sets the data being stored to "data" for layer "layer", replacing all - data for that layer. a multiplier can be specified. - get_active_layer_indices() : list - returns the indices of all layers expected to contain data - store_internal(data, layer=None, const=False, multiplier=[1.0]) - store data "data" at layer "layer" internally - store_external(file_path, layer=None, multiplier=[1.0], print_format=None, - data=None, do_not_verify=False) store data "data" at layer "layer" - externally in file "file_path" - external_to_external(new_external_file, multiplier=None, layer=None) - copies existing external data to the new file location and points to - the new file - external_to_internal(layer_num=None, store_internal=False) : - ndarray/recarray - loads existing external data for layer "layer_num" and returns it. if - store_internal is True it also storages the data internally, - changing the storage type for "layer_num" layer to internal. - internal_to_external(new_external_file, multiplier=None, layer=None, - print_format=None) - stores existing internal data for layer "layer" to external file - "new_external_file" - read_data_from_file(layer, fd=None, multiplier=None) : (ndarray, int) - reads in data from a given file "fd" as data from layer "layer". - returns data as an ndarray along with the size of the data - to_string(val, type, is_cellid=False, possible_cellid=False) - converts data "val" of type "type" to a string. is_cellid is True if - the data type is known to be a cellid and is treated as such. when - possible_cellid is True the data is checked to see if it matches the - shape/dimensions of a cellid before using it as one. - resolve_data_size(index) : int - resolves the size of a given data element in a recarray based on the - names in the existing rec_array. assumes repeating data element - names follow the format _X. returns the number of - times the data element repeats. - flatten() - converts layered data to a non-layered data - make_layered() - converts non-layered data to layered data - - See Also - -------- - - Notes - ----- - - Examples - -------- - - - """ - def __init__(self, sim_data, model_or_sim, data_dimensions, get_file_entry, - data_storage_type=DataStorageType.internal_array, - data_structure_type=DataStructureType.ndarray, - layer_shape=(1,), layered=False, stress_period=0, - data_path=()): - self.data_dimensions = data_dimensions - self._model_or_sim = model_or_sim - self._simulation_data = sim_data - self._get_file_entry = get_file_entry - self._data_type_overrides = {} - self._data_storage_type = data_storage_type - self._stress_period = stress_period - self._data_path = data_path - if not data_structure_type == DataStructureType.recarray: - self._data_type = self.data_dimensions.structure.\ - get_datum_type(return_enum_type=True) - else: - self._data_type = None - self.layer_storage = MultiList(shape=layer_shape, - callback=self._create_layer) - #self.layer_storage = [LayerStorage(self, x, data_storage_type) - # for x in range(layer_shape)] - self.data_structure_type = data_structure_type - package_dim = self.data_dimensions.package_dim - self.in_model = self.data_dimensions is not None and \ - len(package_dim.package_path) > 1 and \ - package_dim.model_dim[0].model_name.lower() == \ - package_dim.package_path[0] - - if data_structure_type == DataStructureType.recarray: - self.build_type_list(resolve_data_shape=False) - - self.layered = layered - - # initialize comments - self.pre_data_comments = None - self.comments = OrderedDict() - - def __repr__(self): - return self.get_data_str(True) - - def __str__(self): - return self.get_data_str(False) - - def _create_layer(self, indexes): - return LayerStorage(self, indexes, self._data_storage_type, - self._data_type) - - def flatten(self): - self.layered = False - storage_type = self.layer_storage.first_item().data_storage_type - self.layer_storage = MultiList(mdlist=[LayerStorage(self, 0, - storage_type, - self._data_type)]) - - def make_layered(self): - if not self.layered: - if self.data_structure_type != DataStructureType.ndarray: - message = 'Data structure type "{}" does not support ' \ - 'layered data.'.format(self.data_structure_type) - type_, value_, traceback_ = sys.exc_info() - raise MFDataException( - self.data_dimensions.structure.get_model(), - self.data_dimensions.structure.get_package(), - self.data_dimensions.structure.path, 'making data layered', - self.data_dimensions.structure.name, inspect.stack()[0][3], - type_, value_, traceback_, message, - self._simulation_data.debug) - if self.layer_storage.first_item().data_storage_type == \ - DataStorageType.external_file: - message = 'Converting external file data into layered ' \ - 'data currently not support.' - type_, value_, traceback_ = sys.exc_info() - raise MFDataException( - self.data_dimensions.structure.get_model(), - self.data_dimensions.structure.get_package(), - self.data_dimensions.structure.path, 'making data layered', - self.data_dimensions.structure.name, inspect.stack()[0][3], - type_, value_, traceback_, message, - self._simulation_data.debug) - - previous_storage = self.layer_storage.first_item() - data = previous_storage.get_data() - data_dim = self.get_data_dimensions(None) - self.layer_storage = MultiList(shape=(data_dim[0],), - callback=self._create_layer) - if previous_storage.data_storage_type == \ - DataStorageType.internal_constant: - for storage in self.layer_storage.elements(): - storage.data_const_value = \ - previous_storage.data_const_value - elif previous_storage.data_storage_type == \ - DataStorageType.internal_array: - data_ml = MultiList(data) - if not (data_ml.get_total_size() == - self.layer_storage.get_total_size()): - message = 'Size of data ({}) does not match expected ' \ - 'value of {}' \ - '.'.format(data_ml.get_total_size(), - self.layer_storage.get_total_size()) - type_, value_, traceback_ = sys.exc_info() - raise MFDataException( - self.data_dimensions.structure.get_model(), - self.data_dimensions.structure.get_package(), - self.data_dimensions.structure.path, - 'making data layered', - self.data_dimensions.structure.name, - inspect.stack()[0][3], - type_, value_, traceback_, message, - self._simulation_data.debug) - for data_layer, storage in zip(data, - self.layer_storage.elements()): - storage.internal_data = data_layer - storage.factor = previous_storage.factor - storage.iprn = previous_storage.iprn - self.layered = True - - def get_data_str(self, formal): - data_str = '' - # Assemble strings for internal array data - for index, storage in enumerate(self.layer_storage.elements()): - if storage.data_storage_type == DataStorageType.internal_array: - if storage.internal_data is not None: - header = self._get_layer_header_str(index) - if formal: - if self.layered: - data_str = '{}Layer_{}{{{}}}' \ - '\n({})\n'.format(data_str, index + 1, - header, repr(storage)) - else: - data_str = '{}{{{}}}\n({})\n'.format(data_str, - header, - repr(storage)) - else: - data_str = '{}{{{}}}\n({})\n'.format(data_str, header, - str(storage)) - elif storage.data_storage_type == \ - DataStorageType.internal_constant: - if storage.data_const_value is not None: - data_str = '{}{{{}}}' \ - '\n'.format(data_str, - self._get_layer_header_str(index)) - return data_str - - def _get_layer_header_str(self, layer): - header_list = [] - if self.layer_storage[layer].data_storage_type == \ - DataStorageType.external_file: - header_list.append('open/close ' - '{}'.format(self.layer_storage[layer].fname)) - elif self.layer_storage[layer].data_storage_type == \ - DataStorageType.internal_constant: - header_list.append('constant {}'.format(self.layer_storage[layer])) - else: - header_list.append('internal') - if self.layer_storage[layer].factor != 1.0 and \ - self.layer_storage[layer].factor != 1 and \ - self.data_structure_type != DataStructureType.recarray: - header_list.append('factor ' - '{}'.format(self.layer_storage[layer].factor)) - if self.layer_storage[layer].iprn is not None: - header_list.append('iprn ' - '{}'.format(self.layer_storage[layer].iprn)) - if len(header_list) > 0: - return ', '.join(header_list) - else: - return '' - - def init_layers(self, dimensions): - self.layer_storage= MultiList(shape=dimensions, - callback=self._create_layer) - - def add_layer(self, dimension=2): - self.layer_storage.increment_dimension(dimension, self._create_layer) - - def override_data_type(self, index, data_type): - self._data_type_overrides[index] = data_type - - def get_external_file_path(self, layer): - if layer is None: - return self.layer_storage[0].fname - else: - return self.layer_storage[layer].fname - - def get_const_val(self, layer=None): - if layer is None: - if not self.layer_storage.get_total_size() >= 1: - message = 'Can not get constant value. No data is available.' - type_, value_, traceback_ = sys.exc_info() - raise MFDataException( - self.data_dimensions.structure.get_model(), - self.data_dimensions.structure.get_package(), - self.data_dimensions.structure.path, - 'getting constant value', - self.data_dimensions.structure.name, - inspect.stack()[0][3], - type_, value_, traceback_, message, - self._simulation_data.debug) - first_item = self.layer_storage.first_item() - if not first_item.data_storage_type == \ - DataStorageType.internal_constant: - message = 'Can not get constant value. Storage type must be ' \ - 'internal_constant.' - type_, value_, traceback_ = sys.exc_info() - raise MFDataException( - self.data_dimensions.structure.get_model(), - self.data_dimensions.structure.get_package(), - self.data_dimensions.structure.path, - 'getting constant value', - self.data_dimensions.structure.name, - inspect.stack()[0][3], - type_, value_, traceback_, message, - self._simulation_data.debug) - - return first_item.get_data_const_val() - else: - if not self.layer_storage.in_shape(layer): - message = 'Can not get constant value. Layer "{}" is not a ' \ - 'valid layer.'.format(layer) - type_, value_, traceback_ = sys.exc_info() - raise MFDataException( - self.data_dimensions.structure.get_model(), - self.data_dimensions.structure.get_package(), - self.data_dimensions.structure.path, - 'getting constant value', - self.data_dimensions.structure.name, - inspect.stack()[0][3], - type_, value_, traceback_, message, - self._simulation_data.debug) - if not self.layer_storage[layer].data_storage_type == \ - DataStorageType.internal_constant: - message = 'Can not get constant value. Storage type must be ' \ - 'internal_constant.' - type_, value_, traceback_ = sys.exc_info() - raise MFDataException( - self.data_dimensions.structure.get_model(), - self.data_dimensions.structure.get_package(), - self.data_dimensions.structure.path, - 'getting constant value', - self.data_dimensions.structure.name, - inspect.stack()[0][3], - type_, value_, traceback_, message, - self._simulation_data.debug) - return self.layer_storage[layer].get_data_const_val() - - def has_data(self, layer=None): - ret_val = self._access_data(layer, False) - return ret_val is not None and ret_val != False - - def get_data(self, layer=None, apply_mult=True): - return self._access_data(layer, True, apply_mult=apply_mult) - - def _access_data(self, layer, return_data=False, apply_mult=True): - layer_check = self._resolve_layer(layer) - if (self.layer_storage[layer_check].internal_data is None and - self.layer_storage[layer_check].data_storage_type == - DataStorageType.internal_array) or \ - (self.layer_storage[ - layer_check].data_const_value is None and - self.layer_storage[layer_check].data_storage_type == - DataStorageType.internal_constant): - return None - if layer is None and \ - (self.data_structure_type == DataStructureType.ndarray or \ - self.data_structure_type == DataStructureType.scalar) and \ - return_data: - # return data from all layers - data = self._build_full_data(apply_mult) - if data is None: - if self.layer_storage.first_item().data_storage_type == \ - DataStorageType.internal_constant: - return self.layer_storage.first_item(). \ - get_data()[0] - else: - return data - - if self.layer_storage[layer_check].data_storage_type == \ - DataStorageType.external_file: - if return_data: - return self.external_to_internal(layer) - else: - return True - else: - if self.data_structure_type == DataStructureType.ndarray and \ - self.layer_storage[layer_check].data_const_value is None and \ - self.layer_storage[layer_check].internal_data is None: - return None - if not (layer is None or self.layer_storage.in_shape(layer)): - message = 'Layer "{}" is an invalid layer.'.format(layer) - type_, value_, traceback_ = sys.exc_info() - raise MFDataException( - self.data_dimensions.structure.get_model(), - self.data_dimensions.structure.get_package(), - self.data_dimensions.structure.path, - 'accessing data', - self.data_dimensions.structure.name, - inspect.stack()[0][3], - type_, value_, traceback_, message, - self._simulation_data.debug) - if layer is None: - if self.data_structure_type == DataStructureType.ndarray or \ - self.data_structure_type == DataStructureType.scalar: - if self.data_structure_type == DataStructureType.scalar: - return self.layer_storage.first_item().\ - internal_data is not None - check_storage = self.layer_storage[layer_check] - return (check_storage.data_const_value is not None and - check_storage.data_storage_type == - DataStorageType.internal_constant) or ( - check_storage.internal_data is not None and - check_storage.data_storage_type == - DataStorageType.internal_array) - else: - if self.layer_storage[layer_check].data_storage_type == \ - DataStorageType.internal_constant: - if return_data: - # recarray stored as a constant. currently only - # support grid-based constant recarrays. build - # a recarray of all cells - data_list = [] - model_grid = self.data_dimensions.get_model_grid() - structure = self.data_dimensions.structure - package_dim = self.data_dimensions.package_dim - for cellid in model_grid.get_all_model_cells(): - data_line = (cellid,) + \ - (self.layer_storage.first_item(). - data_const_value,) - if len(structure.data_item_structures) > 2: - # append None any expected optional data - for data_item_struct in \ - structure.data_item_structures[2:]: - if (data_item_struct.name != - 'boundname' or - package_dim.boundnames()): - data_line = data_line + (None,) - data_list.append(data_line) - return np.rec.array(data_list, - self._recarray_type_list) - else: - return self.layer_storage[layer_check - ].data_const_value is not None - else: - if return_data: - return self.layer_storage.first_item().\ - internal_data - else: - return True - elif self.layer_storage[layer].data_storage_type == \ - DataStorageType.internal_array: - if return_data: - return self.layer_storage[layer].internal_data - else: - return self.layer_storage[layer].internal_data is not None - elif self.layer_storage[layer].data_storage_type == \ - DataStorageType.internal_constant: - layer_storage = self.layer_storage[layer] - if return_data: - data = self._fill_const_layer(layer) - if data is None: - if layer_storage.data_storage_type == \ - DataStructureType.internal_constant: - return layer_storage.data_const_value[0] - else: - return data - else: - return layer_storage.data_const_value is not None - else: - if return_data: - return self.get_external(layer) - else: - return True - - def append_data(self, data): - # currently only support appending to recarrays - if not (self.data_structure_type == DataStructureType.recarray): - message = 'Can not append to data structure "{}". Can only ' \ - 'append to a recarray datastructure' \ - '.'.format(self.data_structure_type) - type_, value_, traceback_ = sys.exc_info() - raise MFDataException( - self.data_dimensions.structure.get_model(), - self.data_dimensions.structure.get_package(), - self.data_dimensions.structure.path, - 'appending data', - self.data_dimensions.structure.name, - inspect.stack()[0][3], - type_, value_, traceback_, message, - self._simulation_data.debug) - internal_data = self.layer_storage.first_item().internal_data - if internal_data is None: - if len(data[0]) != len(self._recarray_type_list): - # rebuild type list using existing data as a guide - self.build_type_list(data=data) - self.set_data(np.rec.array(data, self._recarray_type_list)) - else: - if len(self.layer_storage.first_item().internal_data[0]) < \ - len(data[0]): - # Rebuild recarray to fit larger size - count = 0 - last_count = len(data[0]) - len(internal_data[0]) - while count < last_count: - self._duplicate_last_item() - count += 1 - internal_data_list = internal_data.tolist() - for data_item in data: - internal_data_list.append(data_item) - self._add_placeholders(internal_data_list) - self.set_data(np.rec.array(internal_data_list, - self._recarray_type_list)) - else: - if len(self.layer_storage.first_item().internal_data[0]) \ - > len(data[0]): - # Add placeholders to data - self._add_placeholders(data) - self.set_data(np.hstack( - (internal_data, np.rec.array(data, - self._recarray_type_list)))) - - def set_data(self, data, layer=None, multiplier=None, key=None, - autofill=False): - if multiplier is None: - multiplier = [1.0] - if self.data_structure_type == DataStructureType.recarray or \ - self.data_structure_type == DataStructureType.scalar: - self._set_list(data, layer, multiplier, key, autofill) - else: - self._set_array(data, layer, multiplier, key, autofill) - - def _set_list(self, data, layer, multiplier, key, autofill): - if isinstance(data, dict): - if 'filename' in data: - if 'binary' in data and data['binary']: - if self.data_dimensions.package_dim.boundnames(): - message = 'Unable to store list data ({}) to a binary '\ - 'file when using boundnames' \ - '.'.format(self.data_dimensions.structure. - name) - type_, value_, traceback_ = sys.exc_info() - raise MFDataException( - self.data_dimensions.structure.get_model(), - self.data_dimensions.structure.get_package(), - self.data_dimensions.structure.path, - 'writing list data to binary file', - self.data_dimensions.structure.name, - inspect.stack()[0][3], type_, value_, traceback_, - message, self._simulation_data.debug) - self.process_open_close_line(data, layer) - return - self.store_internal(data, layer, False, multiplier, key=key, - autofill=autofill) - - def _set_array(self, data, layer, multiplier, key, autofill): - # make a list out of a single item - if isinstance(data, int) or isinstance(data, float) or isinstance(data, str): - data = [data] - - # check for possibility of multi-layered data - success = False - layer_num = 0 - if layer is None and self.data_structure_type == \ - DataStructureType.ndarray and len(data) ==\ - self.layer_storage.get_total_size() and not \ - isinstance(data, dict): - # loop through list and try to store each list entry as a layer - success = True - for layer_num, layer_data in enumerate(data): - if not isinstance(layer_data, list) and \ - not isinstance(layer_data, dict) and \ - not isinstance(layer_data, np.ndarray): - layer_data = [layer_data] - layer_index = self.layer_storage.nth_index(layer_num) - success = success and self._set_array_layer(layer_data, - layer_index, - multiplier, - key) - if not success: - # try to store as a single layer - success = self._set_array_layer(data, layer, multiplier, key) - self.layered = bool(self.layer_storage.get_total_size() > 1) - if not success: - message = 'Unable to set data "{}" layer {}. Data is not ' \ - 'in a valid format' \ - '.'.format(self.data_dimensions.structure.name, - layer_num) - type_, value_, traceback_ = sys.exc_info() - raise MFDataException( - self.data_dimensions.structure.get_model(), - self.data_dimensions.structure.get_package(), - self.data_dimensions.structure.path, 'setting array data', - self.data_dimensions.structure.name, inspect.stack()[0][3], - type_, value_, traceback_, message, - self._simulation_data.debug) - - def _set_array_layer(self, data, layer, multiplier, key): - # look for a single constant value - data_type = self.data_dimensions.structure.\ - get_datum_type(return_enum_type=True) - if not isinstance(data, dict) and not isinstance(data, str): - if self._calc_data_size(data, 2) == 1 and \ - self._is_type(data[0], data_type): - # store data as const - self.store_internal(data, layer, True, multiplier, key=key) - return True - - # look for internal and open/close data - if isinstance(data, dict): - if 'data' in data: - if isinstance(data['data'], int) or \ - isinstance(data['data'], float) or \ - isinstance(data['data'], str): - # data should always in in a list/array - data['data'] = [data['data']] - - if 'filename' in data: - multiplier, iprn, binary = \ - self.process_open_close_line(data, layer)[0:3] - # store location to file - self.store_external(data['filename'], layer, [multiplier], - print_format=iprn, binary=binary, - do_not_verify=True) - return True - elif 'data' in data: - multiplier, iprn = self.process_internal_line(data) - if len(data['data']) == 1: - # merge multiplier with single value and make constant - if DatumUtil.is_float(multiplier): - mult = 1.0 - else: - mult = 1 - self.store_internal([data['data'][0] * multiplier], layer, - True, [mult], key=key, - print_format=iprn) - else: - self.store_internal(data['data'], layer, False, - [multiplier], key=key, - print_format=iprn) - return True - elif isinstance(data[0], str): - if data[0].lower() == 'internal': - multiplier, iprn = self.process_internal_line(data) - self.store_internal(data[-1], layer, False, [multiplier], - key=key, print_format=iprn) - return True - elif data[0].lower() != 'open/close': - # assume open/close is just omitted - new_data = data[:] - new_data.insert(0, 'open/close') - else: - new_data = data[:] - self.process_open_close_line(new_data, layer, True) - return True - # try to resolve as internal array - layer_storage = self.layer_storage[self._resolve_layer(layer)] - if not (layer_storage.data_storage_type == - DataStorageType.internal_constant and - PyListUtil.has_one_item(data)): - # store data as is - try: - self.store_internal(data, layer, False, multiplier, key=key) - except MFDataException: - return False - return True - return False - - def get_active_layer_indices(self): - layer_index = [] - for index in self.layer_storage.indexes(): - if self.layer_storage[index].fname is not None or \ - self.layer_storage[index].internal_data is not None: - layer_index.append(index) - return layer_index - - def get_external(self, layer=None): - if not (layer is None or self.layer_storage.in_shape(layer)): - message = 'Can not get external data for layer "{}"' \ - '.'.format(layer) - type_, value_, traceback_ = sys.exc_info() - raise MFDataException( - self.data_dimensions.structure.get_model(), - self.data_dimensions.structure.get_package(), - self.data_dimensions.structure.path, - 'getting external data', - self.data_dimensions.structure.name, - inspect.stack()[0][3], - type_, value_, traceback_, message, - self._simulation_data.debug) - - def store_internal(self, data, layer=None, const=False, multiplier=None, - key=None, autofill=False, - print_format=None): - if multiplier is None: - multiplier = [self.get_default_mult()] - if self.data_structure_type == DataStructureType.recarray: - if self.layer_storage.first_item().data_storage_type == \ - DataStorageType.internal_constant: - self.layer_storage.first_item().data_const_value = data - else: - self.layer_storage.first_item().data_storage_type = \ - DataStorageType.internal_array - if data is None or isinstance(data, np.recarray): - if self._simulation_data.verify_data: - self._verify_list(data) - self.layer_storage.first_item().internal_data = data - else: - if data is None: - self.set_data(None) - if autofill and data is not None: - if isinstance(data, tuple) and isinstance(data[0], - tuple): - # convert to list of tuples - data = list(data) - if isinstance(data, list) and \ - DatumUtil.is_basic_type(data[0]): - # this is a simple list, turn it into a tuple - # inside a list so that it is interpreted - # correctly by numpy.recarray - tupled_data = () - for data_item in data: - tupled_data += (data_item,) - data = [tupled_data] - - if not isinstance(data, list): - # put data in a list format for recarray - data = [(data,)] - # auto-fill tagged keyword - structure = self.data_dimensions.structure - data_item_structs = structure.data_item_structures - if data_item_structs[0].tagged and not \ - data_item_structs[0].type == DatumType.keyword: - for data_index, data_entry in enumerate(data): - if (data_item_structs[0].type == - DatumType.string and - data_entry[0].lower() == - data_item_structs[0].name.lower()): - break - data[data_index] = \ - (data_item_structs[0].name.lower(),) \ - + data[data_index] - if data is not None: - new_data = self._build_recarray(data, key, autofill) - self.layer_storage.first_item().internal_data = new_data - elif self.data_structure_type == DataStructureType.scalar: - self.layer_storage.first_item().internal_data = data - else: - layer, multiplier = self._store_prep(layer, multiplier) - dimensions = self.get_data_dimensions(layer) - if const: - self.layer_storage[layer].data_storage_type = \ - DataStorageType.internal_constant - self.layer_storage[layer].data_const_value = \ - [mfdatautil.get_first_val(data)] - else: - self.layer_storage[layer].data_storage_type = \ - DataStorageType.internal_array - try: - self.layer_storage[layer].internal_data = \ - np.reshape(data, dimensions) - except: - message = 'An error occurred when reshaping data ' \ - '"{}" to store. Expected data ' \ - 'dimensions: ' \ - '{}'.format(self.data_dimensions.structure.name, - dimensions) - type_, value_, traceback_ = sys.exc_info() - raise MFDataException( - self.data_dimensions.structure.get_model(), - self.data_dimensions.structure.get_package(), - self.data_dimensions.structure.path, - 'setting array data', self.data_dimensions. - structure.name, inspect.stack()[0][3], type_, - value_, traceback_, message, - self._simulation_data.debug) - self.layer_storage[layer].factor = multiplier - self.layer_storage[layer].iprn = print_format - - def _build_recarray(self, data, key, autofill): - self.build_type_list(data=data, key=key) - if autofill and data is not None: - # resolve any fields with data types that do not - # agree with the expected type list - self._resolve_multitype_fields(data) - if isinstance(data, list): - # data needs to be stored as tuples within a list. - # if this is not the case try to fix it - self._tupleize_data(data) - # add placeholders to data so it agrees with - # expected dimensions of recarray - self._add_placeholders(data) - try: - new_data = np.rec.array(data, - self._recarray_type_list) - except: - data_expected = [] - for data_type in self._recarray_type_list: - data_expected.append('<{}>'.format( - data_type[0])) - message = 'An error occurred when storing data ' \ - '"{}" in a recarray. {} data is a one ' \ - 'or two dimensional list containing ' \ - 'the variables "{}" (some variables ' \ - 'may be optional, see MF6 ' \ - 'documentation), but data "{}" was ' \ - 'supplied.'.format( - self.data_dimensions.structure.name, - self.data_dimensions.structure.name, - ' '.join(data_expected), data) - type_, value_, traceback_ = sys.exc_info() - raise MFDataException( - self.data_dimensions.structure.get_model(), - self.data_dimensions.structure.get_package(), - self.data_dimensions.structure.path, - 'setting array data', - self.data_dimensions.structure.name, - inspect.stack()[0][3], type_, value_, - traceback_, message, - self._simulation_data.debug) - if self._simulation_data.verify_data: - self._verify_list(new_data) - return new_data - - def _resolve_multitype_fields(self, data): - # find any data fields where the data is not a consistent type - itype_len = len(self._recarray_type_list) - for data_entry in data: - for index, data_val in enumerate(data_entry): - if index < itype_len and \ - self._recarray_type_list[index][1] != object and \ - not isinstance(data_val, - self._recarray_type_list[index][1]) \ - and (not isinstance(data_val, int) or - self._recarray_type_list[index][1] != float): - # for inconsistent types use generic object type - self._recarray_type_list[index] = \ - (self._recarray_type_list[index][0], object) - - def store_external(self, file_path, layer=None, multiplier=None, - print_format=None, data=None, do_not_verify=False, - binary=False): - if multiplier is None: - multiplier = [self.get_default_mult()] - layer_new, multiplier = self._store_prep(layer, multiplier) - - if data is not None: - if self.data_structure_type == DataStructureType.recarray: - - # create external file and write file entry to the file - data_dim = self.data_dimensions - model_name = data_dim.package_dim.model_dim[0].model_name - fp = self._simulation_data.mfpath.resolve_path(file_path, - model_name) - if binary: - file_access = MFFileAccessList( - self.data_dimensions.structure, self.data_dimensions, - self._simulation_data, self._data_path, - self._stress_period) - file_access.write_binary_file( - data, fp, self._model_or_sim.modeldiscrit, - precision='double') - else: - try: - fd = open(fp, 'w') - except: - message = 'Unable to open file {}. Make sure the ' \ - 'file is not locked and the folder exists' \ - '.'.format(fp) - type_, value_, traceback_ = sys.exc_info() - raise MFDataException( - self.data_dimensions.structure.get_model(), - self.data_dimensions.structure.get_package(), - self.data_dimensions.structure.path, - 'opening external file for writing', - data_dim.structure.name, inspect.stack()[0][3], - type_, value_, traceback_, message, - self._simulation_data.debug) - # store data internally first so that a file entry - # can be generated - self.store_internal(data, layer_new, False, [multiplier], None, - False, print_format) - ext_file_entry = self._get_file_entry() - fd.write(ext_file_entry) - fd.close() - # set as external data - self.layer_storage.first_item().internal_data = None - else: - # store data externally in file - data_size = self.get_data_size(layer_new) - data_dim = self.data_dimensions - data_type = data_dim.structure.data_item_structures[0].type - model_name = data_dim.package_dim.model_dim[0].model_name - fp = self._simulation_data.mfpath.resolve_path(file_path, - model_name) - - if self._calc_data_size(data, 2) == 1 and data_size > 1: - # constant data, need to expand - self.layer_storage[layer_new].data_const_value = data - self.layer_storage[layer_new].DataStorageType = \ - DataStorageType.internal_constant - data = self._fill_const_layer(layer) - elif isinstance(data, list): - data = self._to_ndarray(data, layer) - if binary: - text = self.data_dimensions.structure.name - file_access = MFFileAccessArray( - self.data_dimensions.structure, self.data_dimensions, - self._simulation_data, self._data_path, - self._stress_period) - str_layered = self.data_dimensions.structure.layered - file_access.write_binary_file( - data, fp, text, self._model_or_sim.modeldiscrit, - self._model_or_sim.modeltime, - stress_period=self._stress_period, precision='double', - write_multi_layer=(layer is None and str_layered)) - else: - file_access = MFFileAccessArray( - self.data_dimensions.structure, self.data_dimensions, - self._simulation_data, self._data_path, - self._stress_period) - file_access.write_text_file(data, fp, data_type, data_size) - self.layer_storage[layer_new].factor = multiplier - self.layer_storage[layer_new].internal_data = None - self.layer_storage[layer_new].data_const_value = None - - else: - if self.data_structure_type == DataStructureType.recarray: - self.layer_storage.first_item().internal_data = None - else: - self.layer_storage[layer_new].factor = multiplier - self.layer_storage[layer_new].internal_data = None - self.set_ext_file_attributes(layer_new, file_path, print_format, - binary) - - def set_ext_file_attributes(self, layer, file_path, - print_format, binary): - # point to the external file and set flags - self.layer_storage[layer].fname = file_path - self.layer_storage[layer].iprn = print_format - self.layer_storage[layer].binary = binary - self.layer_storage[layer].data_storage_type = \ - DataStorageType.external_file - - def point_to_existing_external_file(self, arr_line, layer): - multiplier, print_format, binary, \ - data_file = self.process_open_close_line(arr_line, layer, store=False) - self.set_ext_file_attributes(layer, data_file, print_format, binary) - self.layer_storage[layer].factor = multiplier - - def external_to_external(self, new_external_file, multiplier=None, - layer=None, binary=None): - # currently only support files containing ndarrays - if not (self.data_structure_type == DataStructureType.ndarray): - message = 'Can not copy external file of type "{}". Only ' \ - 'files containing ndarrays currently supported' \ - '.'.format(self.data_structure_type) - type_, value_, traceback_ = sys.exc_info() - raise MFDataException( - self.data_dimensions.structure.get_model(), - self.data_dimensions.structure.get_package(), - self.data_dimensions.structure.path, - 'copy external file', - self.data_dimensions.structure.name, - inspect.stack()[0][3], - type_, value_, traceback_, message, - self._simulation_data.debug) - if not ((layer is None and self.layer_storage.get_total_size() == 1) or - (layer is not None and self.layer_storage.in_shape(layer))): - if layer is None: - message = 'When no layer is supplied the data must contain ' \ - 'only one layer. Data contains {} layers' \ - '.' .format(self.layer_storage.get_total_size()) - else: - message = 'layer "{}" is not a valid layer'.format(layer) - type_, value_, traceback_ = sys.exc_info() - raise MFDataException( - self.data_dimensions.structure.get_model(), - self.data_dimensions.structure.get_package(), - self.data_dimensions.structure.path, - 'copy external file', - self.data_dimensions.structure.name, - inspect.stack()[0][3], - type_, value_, traceback_, message, - self._simulation_data.debug) - # get data storage - if layer is None: - layer = 1 - if self.layer_storage[layer].fname is None: - message = 'No file name exists for layer {}.'.format(layer) - type_, value_, traceback_ = sys.exc_info() - raise MFDataException( - self.data_dimensions.structure.get_model(), - self.data_dimensions.structure.get_package(), - self.data_dimensions.structure.path, - 'copy external file', - self.data_dimensions.structure.name, - inspect.stack()[0][3], - type_, value_, traceback_, message, - self._simulation_data.debug) - - # copy file to new location - copyfile(self.layer_storage[layer].fname, new_external_file) - - # update - if binary is None: - binary = self.layer_storage[layer].binary - self.store_external(new_external_file, layer, - [self.layer_storage[layer].factor], - self.layer_storage[layer].iprn, - binary=binary) - - def external_to_internal(self, layer, store_internal=False): - if layer is None: - layer = 0 - # load data from external file - model_name = self.data_dimensions.package_dim.model_dim[0]. \ - model_name - read_file = self._simulation_data.mfpath.resolve_path( - self.layer_storage[layer].fname, model_name) - # currently support files containing ndarrays or recarrays - if self.data_structure_type == DataStructureType.ndarray: - file_access = MFFileAccessArray( - self.data_dimensions.structure, self.data_dimensions, - self._simulation_data, self._data_path, - self._stress_period) - if self.layer_storage[layer].binary: - data_out = file_access.read_binary_data_from_file( - read_file, self.get_data_dimensions(layer), - self.get_data_size(layer), self._data_type, - self._model_or_sim.modeldiscrit)[0] - else: - data_out = file_access.read_text_data_from_file( - self.get_data_size(layer), self._data_type, - self.get_data_dimensions(layer), layer, read_file)[0] - if self.layer_storage[layer].factor is not None: - data_out = data_out * self.layer_storage[layer].factor - - if store_internal: - self.store_internal(data_out, layer) - return data_out - elif self.data_structure_type == DataStructureType.recarray: - file_access = MFFileAccessList( - self.data_dimensions.structure, self.data_dimensions, - self._simulation_data, self._data_path, - self._stress_period) - if self.layer_storage[layer].binary: - data = file_access.read_binary_data_from_file( - read_file, self._model_or_sim.modeldiscrit) - data_out = self._build_recarray(data, layer, False) - else: - with open(read_file, 'r') as fd_read_file: - data_out = file_access.read_list_data_from_file( - fd_read_file, self, self._stress_period, - store_internal=False) - if store_internal: - self.store_internal(data_out, layer) - return data_out - else: - path = self.data_dimensions.structure.path - message= 'Can not convert {} to internal data. External to ' \ - 'internal file operations currently only supported ' \ - 'for ndarrays.'.format(path[-1]) - type_, value_, traceback_ = sys.exc_info() - raise MFDataException( - self.data_dimensions.structure.get_model(), - self.data_dimensions.structure.get_package(), - self.data_dimensions.structure.path, - 'opening external file for writing', - self.data_dimensions.structure.name, inspect.stack()[0][3], - type_, value_, traceback_, message, - self._simulation_data.debug) - - def internal_to_external(self, new_external_file, multiplier=None, - layer=None, print_format=None, binary=False): - if layer is None: - layer_item = self.layer_storage.first_item() - else: - layer_item = self.layer_storage[layer] - if layer_item.data_storage_type == DataStorageType.internal_array: - data = layer_item.internal_data - else: - data = self._fill_const_layer(layer) - self.store_external(new_external_file, layer, multiplier, - print_format, data, binary=binary) - - def resolve_shape_list(self, data_item, repeat_count, current_key, - data_line, cellid_size=None): - struct = self.data_dimensions.structure - try: - resolved_shape, shape_rule = \ - self.data_dimensions.get_data_shape(data_item, struct, - data_line, - repeating_key= - current_key) - except Exception as se: - comment = 'Unable to resolve shape for data "{}" field "{}"' \ - '.'.format(struct.name, - data_item.name) - type_, value_, traceback_ = sys.exc_info() - raise MFDataException(struct.get_model(), - struct.get_package(), struct.path, - 'loading data list from package file', - struct.name, inspect.stack()[0][3], - type_, value_, traceback_, comment, - self._simulation_data.debug, se) - - if cellid_size is not None: - data_item.remove_cellid(resolved_shape, cellid_size) - - if len(resolved_shape) == 1: - if repeat_count < resolved_shape[0]: - return True, shape_rule is not None - elif resolved_shape[0] == -9999: - # repeating unknown number of times in 1-D array - return False, True - return False, False - - def _validate_cellid(self, arr_line, data_index): - if not self.data_dimensions.structure.model_data: - # not model data so this is not a cell id - return False - if arr_line is None: - return False - model_grid = self.data_dimensions.get_model_grid() - cellid_size = model_grid.get_num_spatial_coordinates() - if cellid_size + data_index > len(arr_line): - return False - for index, \ - dim_size in zip(range(data_index, cellid_size + data_index), - model_grid.get_model_dim()): - if not DatumUtil.is_int(arr_line[index]): - return False - val = int(arr_line[index]) - if val <= 0 or val > dim_size: - return False - return True - - def add_data_line_comment(self, comment, line_num): - if line_num in self.comments: - self.comments[line_num].add_text('\n') - self.comments[line_num].add_text(' '.join(comment)) - else: - self.comments[line_num] = MFComment(' '.join(comment), - self.data_dimensions.structure. - path, - self._simulation_data, - line_num) - - def process_internal_line(self, arr_line): - multiplier = self.get_default_mult() - print_format = None - if isinstance(arr_line, list): - if len(arr_line) < 2: - message = 'Data array "{}" contains an INTERNAL ' \ - 'that is not followed by a multiplier in line ' \ - '"{}".'.format(self.data_dimensions.structure.name, - ' '.join(arr_line)) - type_, value_, traceback_ = sys.exc_info() - raise MFDataException( - self.data_dimensions.structure.get_model(), - self.data_dimensions.structure.get_package(), - self.data_dimensions.structure.path, - 'processing internal data header', - self.data_dimensions.structure.name, - inspect.stack()[0][3], type_, value_, - traceback_, message, - self._simulation_data.debug) - index = 1 - while index < len(arr_line): - if isinstance(arr_line[index], str): - if arr_line[index].lower() == 'factor' and \ - index + 1 < len(arr_line): - multiplier = convert_data(arr_line[index+1], - self.data_dimensions, - self._data_type) - index += 2 - elif arr_line[index].lower() == 'iprn' and \ - index + 1 < len(arr_line): - print_format = arr_line[index+1] - index += 2 - else: - break - else: - break - elif isinstance(arr_line, dict): - for key, value in arr_line.items(): - if key.lower() == 'factor': - multiplier = convert_data(value, self.data_dimensions, - self._data_type) - if key.lower() == 'iprn': - print_format = value - return multiplier, print_format - - def process_open_close_line(self, arr_line, layer, store=True): - # process open/close line - index = 2 - if self._data_type == DatumType.integer: - multiplier = 1 - else: - multiplier = 1.0 - print_format = None - binary = False - data_file = None - data = None - - data_dim = self.data_dimensions - if isinstance(arr_line, list): - if len(arr_line) < 2 and store: - message = 'Data array "{}" contains a OPEN/CLOSE ' \ - 'that is not followed by a file. ' \ - '{}'.format(data_dim.structure.name, - data_dim.structure.path) - type_, value_, traceback_ = sys.exc_info() - raise MFDataException( - self.data_dimensions.structure.get_model(), - self.data_dimensions.structure.get_package(), - self.data_dimensions.structure.path, - 'processing open/close line', data_dim.structure.name, - inspect.stack()[0][3], type_, value_, traceback_, message, - self._simulation_data.debug) - while index < len(arr_line): - if isinstance(arr_line[index], str): - if arr_line[index].lower() == 'factor' and \ - index + 1 < len(arr_line): - try: - multiplier = convert_data(arr_line[index+1], - self.data_dimensions, - self._data_type) - except Exception as ex: - message = 'Data array {} contains an OPEN/CLOSE ' \ - 'with an invalid multiplier following ' \ - 'the "factor" keyword.' \ - '.'.format(data_dim.structure.name) - type_, value_, traceback_ = sys.exc_info() - raise MFDataException( - self.data_dimensions.structure.get_model(), - self.data_dimensions.structure.get_package(), - self.data_dimensions.structure.path, - 'processing open/close line', - data_dim.structure.name, inspect.stack()[0][3], - type_, value_, traceback_, message, - self._simulation_data.debug, ex) - index += 2 - elif arr_line[index].lower() == 'iprn' and \ - index + 1 < len(arr_line): - print_format = arr_line[index+1] - index += 2 - elif arr_line[index].lower() == 'data' and \ - index + 1 < len(arr_line): - data = arr_line[index+1] - index += 2 - elif arr_line[index].lower() == 'binary' or \ - arr_line[index].lower() == '(binary)': - binary = True - index += 1 - else: - break - else: - break - # save comments - if index < len(arr_line): - self.layer_storage[layer].comments = MFComment( - ' '.join(arr_line[index:]), - self.data_dimensions.structure.path, - self._simulation_data, layer) - if arr_line[0].lower() == 'open/close': - data_file = arr_line[1] - else: - data_file = arr_line[0] - elif isinstance(arr_line, dict): - for key, value in arr_line.items(): - if key.lower() == 'factor': - try: - multiplier = convert_data(value, self.data_dimensions, - self._data_type) - except Exception as ex: - message = 'Data array {} contains an OPEN/CLOSE ' \ - 'with an invalid multiplier following the ' \ - '"factor" keyword.' \ - '.'.format(data_dim.structure.name) - type_, value_, traceback_ = sys.exc_info() - raise MFDataException( - self.data_dimensions.structure.get_model(), - self.data_dimensions.structure.get_package(), - self.data_dimensions.structure.path, - 'processing open/close line', - data_dim.structure.name, inspect.stack()[0][3], - type_, value_, traceback_, message, - self._simulation_data.debug, ex) - if key.lower() == 'iprn': - print_format = value - if key.lower() == 'binary': - binary = bool(value) - if key.lower() == 'data': - data = value - if 'filename' in arr_line: - data_file = arr_line['filename'] - - if data_file is None: - message = 'Data array {} contains an OPEN/CLOSE without a ' \ - 'fname (file name) specified' \ - '.'.format(data_dim.structure.name) - type_, value_, traceback_ = sys.exc_info() - raise MFDataException(self.data_dimensions.structure.get_model(), - self.data_dimensions.structure.get_package(), - self.data_dimensions.structure.path, - 'processing open/close line', - data_dim.structure.name, - inspect.stack()[0][3], type_, value_, - traceback_, message, - self._simulation_data.debug) - - if store: - # store external info - self.store_external(data_file, layer, [multiplier], print_format, - binary=binary, data=data) - - # add to active list of external files - model_name = data_dim.package_dim.model_dim[0].model_name - self._simulation_data.mfpath.add_ext_file(data_file, model_name) - - return multiplier, print_format, binary, data_file - - @staticmethod - def _tupleize_data(data): - for index, data_line in enumerate(data): - if not isinstance(data_line, tuple): - if isinstance(data_line, list): - data[index] = tuple(data_line) - else: - data[index] = (data_line,) - - def _verify_list(self, data): - if data is not None: - model_grid = None - cellid_size = None - for data_line in data: - data_line_len = len(data_line) - for index in range(0, min(data_line_len, - len(self._recarray_type_list))): - if self._recarray_type_list[index][0] == 'cellid' and \ - self.data_dimensions.get_model_dim(None).model_name\ - is not None and data_line[index] is not None: - # this is a cell id. verify that it contains the - # correct number of integers - if cellid_size is None: - model_grid = self.data_dimensions.get_model_grid() - cellid_size = model_grid.\ - get_num_spatial_coordinates() - if cellid_size != 1 and \ - len(data_line[index]) != cellid_size and \ - isinstance(data_line[index], int): - message = 'Cellid "{}" contains {} integer(s). ' \ - 'Expected a cellid containing {} ' \ - 'integer(s) for grid type' \ - ' {}.'.format(data_line[index], - len(data_line[index]), - cellid_size, - str( - model_grid.grid_type())) - type_, value_, traceback_ = sys.exc_info() - raise MFDataException( - self.data_dimensions.structure.get_model(), - self.data_dimensions.structure.get_package(), - self.data_dimensions.structure.path, - 'verifying cellid', - self.data_dimensions.structure.name, - inspect.stack()[0][3], - type_, value_, traceback_, message, - self._simulation_data.debug) - - def _add_placeholders(self, data): - idx = 0 - for data_line in data: - data_line_len = len(data_line) - if data_line_len < len(self._recarray_type_list): - for index in range(data_line_len, - len(self._recarray_type_list)): - if self._recarray_type_list[index][1] == int: - self._recarray_type_list[index] = \ - (self._recarray_type_list[index][0], object) - data_line += (None,) - elif self._recarray_type_list[index][1] == float: - data_line += (np.nan,) - else: - data_line += (None,) - data[idx] = data_line - idx += 1 - - def _duplicate_last_item(self): - last_item = self._recarray_type_list[-1] - arr_item_name = last_item[0].split('_') - if DatumUtil.is_int(arr_item_name[-1]): - new_item_num = int(arr_item_name[-1]) + 1 - new_item_name = '_'.join(arr_item_name[0:-1]) - new_item_name = '{}_{}'.format(new_item_name, new_item_num) - else: - new_item_name = '{}_1'.format(last_item[0]) - self._recarray_type_list.append((new_item_name, last_item[1])) - - def _build_full_data(self, apply_multiplier=False): - if self.data_structure_type == DataStructureType.scalar: - return self.layer_storage.first_item().internal_data - dimensions = self.get_data_dimensions(None) - if dimensions[0] < 0: - return None - all_none = True - np_data_type = self.data_dimensions.structure.get_datum_type() - full_data = np.full(dimensions, np.nan, - self.data_dimensions.structure.get_datum_type(True)) - is_aux = self.data_dimensions.structure.name == 'aux' - if is_aux: - aux_data = [] - if not self.layered: - layers_to_process = [0] - else: - layers_to_process = self.layer_storage.indexes() - for layer in layers_to_process: - if self.layer_storage[layer].factor is not None and \ - apply_multiplier: - mult = self.layer_storage[layer].factor - elif self._data_type == DatumType.integer: - mult = 1 - else: - mult = 1.0 - - if self.layer_storage[layer].data_storage_type == \ - DataStorageType.internal_array: - if self.layer_storage[layer].internal_data is None or \ - len(self.layer_storage[layer].internal_data) > 0 and \ - self.layer_storage[layer].internal_data[0] is None: - if is_aux: - full_data = None - else: - return None - elif self.layer_storage.get_total_size() == 1 or \ - not self.layered or not self._has_layer_dim(): - full_data = self.layer_storage[layer].internal_data * mult - else: - full_data[layer] = \ - self.layer_storage[layer].internal_data * mult - elif self.layer_storage[layer].data_storage_type == \ - DataStorageType.internal_constant: - if self.layer_storage.get_total_size() == 1 or \ - not self.layered or not self._has_layer_dim(): - full_data = self._fill_const_layer(layer) * mult - else: - full_data[layer] = self._fill_const_layer(layer) * mult - else: - file_access = MFFileAccessArray( - self.data_dimensions.structure, self.data_dimensions, - self._simulation_data, self._data_path, - self._stress_period) - model_name = self.data_dimensions.package_dim.model_dim[0]. \ - model_name - read_file = self._simulation_data.mfpath.resolve_path( - self.layer_storage[layer].fname, model_name) - - if self.layer_storage[layer].binary: - data_out = file_access.read_binary_data_from_file( - read_file, self.get_data_dimensions(layer), - self.get_data_size(layer), self._data_type, - self._model_or_sim.modeldiscrit, - not self.layered)[0] * mult - else: - data_out = file_access.read_text_data_from_file( - self.get_data_size(layer), np_data_type, - self.get_data_dimensions(layer), layer, - read_file)[0] * mult - if self.layer_storage.get_total_size() == 1 or \ - not self.layered: - full_data = data_out - else: - full_data[layer] = data_out - if is_aux: - if full_data is not None: - all_none = False - aux_data.append(full_data) - full_data = np.full( - dimensions, np.nan, - self.data_dimensions.structure.get_datum_type(True)) - if is_aux: - if all_none: - return None - else: - return np.stack(aux_data, axis=0) - else: - return full_data - - def _resolve_layer(self, layer): - if layer is None: - return self.layer_storage.first_index() - else: - return layer - - def _to_ndarray(self, data, layer): - data_dimensions = self.get_data_dimensions(layer) - data_iter = MultiListIter(data) - return self._fill_dimensions(data_iter, data_dimensions) - - def _fill_const_layer(self, layer): - data_dimensions = self.get_data_dimensions(layer) - if layer is None: - ls = self.layer_storage.first_item() - else: - ls = self.layer_storage[layer] - if data_dimensions[0] < 0: - return ls.data_const_value - else: - data_type = self.data_dimensions.structure. \ - get_datum_type(numpy_type=True) - return np.full(data_dimensions, ls.data_const_value[0], data_type) - - def _is_type(self, data_item, data_type): - if data_type == DatumType.string or data_type == DatumType.keyword: - return True - elif data_type == DatumType.integer: - return DatumUtil.is_int(data_item) - elif data_type == DatumType.double_precision: - return DatumUtil.is_float(data_item) - elif data_type == DatumType.keystring: - # TODO: support keystring type - if self._simulation_data.verbosity_level.value >= \ - VerbosityLevel.normal.value: - print('Keystring type currently not supported.') - return True - else: - if self._simulation_data.verbosity_level.value >= \ - VerbosityLevel.normal.value: - print('{} type checking currently not supported'.format(data_type)) - return True - - def _fill_dimensions(self, data_iter, dimensions): - if self.data_structure_type == DataStructureType.ndarray: - np_dtype = MFFileAccess.datum_to_numpy_type(self._data_type)[0] - # initialize array - data_array = np.ndarray(shape=dimensions, dtype=np_dtype) - # fill array - for index in ArrayIndexIter(dimensions): - data_array.itemset(index, data_iter.__next__()) - return data_array - elif self.data_structure_type == DataStructureType.scalar: - return data_iter.__next__() - else: - data_array = None - data_line = () - # fill array - array_index_iter = ArrayIndexIter(dimensions) - current_col = 0 - for index in array_index_iter: - data_line += (index,) - if current_col == dimensions[1] - 1: - try: - if data_array is None: - data_array = np.rec.array(data_line, - self._recarray_type_list) - else: - rec_array = np.rec.array(data_line, - self._recarray_type_list) - data_array = np.hstack((data_array, - rec_array)) - except: - message = 'An error occurred when storing data ' \ - '"{}" in a recarray. Data line being ' \ - 'stored: {}'.format( - self.data_dimensions.structure.name, - data_line) - - type_, value_, traceback_ = sys.exc_info() - raise MFDataException( - self.data_dimensions.structure.get_model(), - self.data_dimensions.structure.get_package(), - self.data_dimensions.structure.path, - 'processing open/close line', - dimensions.structure.name, inspect.stack()[0][3], - type_, value_, traceback_, message, - self._simulation_data.debug) - current_col = 0 - data_line = () - data_array[index] = data_iter.next() - return data_array - - def set_tas(self, tas_name, tas_label, current_key): - # move to storage - package_dim = self.data_dimensions.package_dim - tas_names = package_dim.get_tasnames() - if tas_name.lower() not in tas_names and \ - self._simulation_data.verbosity_level.value >= \ - VerbosityLevel.normal.value: - print('WARNING: Time array series name {} not found in any ' - 'time series file'.format(tas_name)) - # this is a time series array with a valid tas variable - self.data_structure_type = DataStructureType.scalar - try: - self.set_data('{} {}'.format(tas_label, tas_name), 0, - key=current_key) - except Exception as ex: - type_, value_, traceback_ = sys.exc_info() - structure = self.data_dimensions.structure - raise MFDataException(structure.get_model(), - structure.get_package(), - structure.path, - 'storing data', - structure.name, - inspect.stack()[0][3], type_, - value_, traceback_, None, - self._simulation_data.debug, ex) - - def resolve_data_size(self, index): - # Resolves the size of a given data element based on the names in the - # existing rec_array. Assumes repeating data element names follow the - # format _X - if self.data_structure_type != DataStructureType.recarray: - message = 'Data structure type is {}. Data structure type must ' \ - 'be recarray.'.format(self.data_structure_type) - type_, value_, traceback_ = sys.exc_info() - raise MFDataException( - self.data_dimensions.structure.get_model(), - self.data_dimensions.structure.get_package(), - self.data_dimensions.structure.path, - 'resolving data size', - self.data_dimensions.structure.name, - inspect.stack()[0][3], - type_, value_, traceback_, message, - self._simulation_data.debug) - - if len(self.layer_storage.first_item().internal_data[0]) <= index: - return 0 - label = self.layer_storage.first_item().\ - internal_data.dtype.names[index] - label_list = label.split('_') - if len(label_list) == 1: - return 1 - internal_data = self.layer_storage.first_item().internal_data - for forward_index in range(index+1, len(internal_data.dtype.names)): - forward_label = internal_data.dtype.names[forward_index] - forward_label_list = forward_label.split('_') - if forward_label_list[0] != label_list[0]: - return forward_index - index - return len(internal_data.dtype.names) - index - - def build_type_list(self, data_set=None, data=None, - resolve_data_shape=True, key=None, - nseg=None): - if data_set is None: - self._recarray_type_list = [] - data_set = self.data_dimensions.structure - initial_keyword = True - package_dim = self.data_dimensions.package_dim - for data_item, index in zip(data_set.data_item_structures, - range(0, - len(data_set.data_item_structures))): - # handle optional mnames - if not data_item.optional or len(data_item.name) < 5 or \ - data_item.name.lower()[0:5] != 'mname' \ - or not self.in_model: - overrides = self._data_type_overrides - if len(self._recarray_type_list) in overrides: - data_type = overrides[len(self._recarray_type_list)] - elif isinstance(data_item, MFDataItemStructure): - data_type = data_item.get_rec_type() - else: - data_type = None - if data_item.name.lower() == 'aux' and resolve_data_shape: - aux_var_names = package_dim.get_aux_variables() - if aux_var_names is not None: - for aux_var_name in aux_var_names[0]: - if aux_var_name.lower() != 'auxiliary': - self._recarray_type_list.append((aux_var_name, - data_type)) - - elif data_item.type == DatumType.record: - # record within a record, recurse - self.build_type_list(data_item, True, data) - elif data_item.type == DatumType.keystring: - self._recarray_type_list.append((data_item.name, - data_type)) - # add potential data after keystring to type list - ks_data_item = deepcopy(data_item) - ks_data_item.type = DatumType.string - ks_data_item.name = '{}_data'.format(ks_data_item.name) - ks_rec_type = ks_data_item.get_rec_type() - self._recarray_type_list.append((ks_data_item.name, - ks_rec_type)) - if index == len(data_set.data_item_structures) - 1: - idx = 1 - data_line_max_size = self._get_max_data_line_size(data) - while data is not None and \ - len(self._recarray_type_list) < \ - data_line_max_size: - # keystrings at the end of a line can contain items - # of variable length. assume everything at the - # end of the data line is related to the last - # keystring - self._recarray_type_list.append( - ('{}_{}'.format(ks_data_item.name, idx), - ks_rec_type)) - idx += 1 - - elif data_item.name != 'boundname' or \ - self.data_dimensions.package_dim.boundnames(): - # don't include initial keywords - if data_item.type != DatumType.keyword or \ - initial_keyword == \ - False or data_set.block_variable == True: - initial_keyword = False - shape_rule = None - if data_item.tagged: - if data_item.type != DatumType.string and \ - data_item.type != DatumType.keyword: - self._recarray_type_list.append( - ('{}_label'.format(data_item.name), - object)) - if nseg is not None and len(data_item.shape) > 0 and \ - isinstance(data_item.shape[0], str) and \ - data_item.shape[0][0:4] == 'nseg': - # nseg explicitly specified. resolve any formula - # nseg is in - model_dim = \ - self.data_dimensions.get_model_dim(None) - expression_array = \ - model_dim.build_shape_expression(data_item. - shape) - if isinstance(expression_array, list) and \ - len(expression_array) == 1: - exp = expression_array[0] - resolved_shape = \ - [model_dim.resolve_exp(exp, nseg)] - else: - resolved_shape = [1] - else: - if resolve_data_shape: - data_dim = self.data_dimensions - resolved_shape, shape_rule = \ - data_dim.get_data_shape(data_item, - data_set, - data, key) - else: - resolved_shape = [1] - if not resolved_shape or len(resolved_shape) == 0 or \ - resolved_shape[0] == -1: - # could not resolve shape - resolved_shape = [1] - elif resolved_shape[0] == -9999 or \ - shape_rule is not None: - if data is not None: - # shape is an indeterminate 1-d array and - # should consume the remainder of the data - max_s = PyListUtil.max_multi_dim_list_size(data) - resolved_shape[0] = \ - max_s - len(self._recarray_type_list) - else: - # shape is indeterminate 1-d array and no data - # provided to resolve - resolved_shape[0] = 1 - if data_item.is_cellid: - if data_item.shape is not None and \ - len(data_item.shape) > 0 and \ - data_item.shape[0] == 'ncelldim': - # A cellid is a single entry (tuple) in the - # recarray. Adjust dimensions accordingly. - data_dim = self.data_dimensions - model_grid = data_dim.get_model_grid() - size = model_grid.get_num_spatial_coordinates() - data_item.remove_cellid(resolved_shape, - size) - for index in range(0, resolved_shape[0]): - if resolved_shape[0] > 1: - # type list fields must have unique names - self._recarray_type_list.append( - ('{}_{}'.format(data_item.name, - index), data_type)) - else: - self._recarray_type_list.append( - (data_item.name, data_type)) - return self._recarray_type_list - - def get_default_mult(self): - if self._data_type == DatumType.integer: - return 1 - else: - return 1.0 - - @staticmethod - def _calc_data_size(data, count_to=None, current_length=None): - if current_length is None: - current_length = [0] - if isinstance(data, np.ndarray): - current_length[0] += data.size - return data.size - if isinstance(data, str) or isinstance(data, dict): - return 1 - try: - for data_item in data: - if hasattr(data_item, '__len__'): - DataStorage._calc_data_size(data_item, count_to, - current_length) - else: - current_length[0] += 1 - if count_to is not None and current_length[0] >= count_to: - return current_length[0] - except (ValueError, IndexError, TypeError): - return 1 - return current_length[0] - - @staticmethod - def _get_max_data_line_size(data): - max_size = 0 - if data is not None: - for value in data: - if len(value) > max_size: - max_size = len(value) - return max_size - - def get_data_dimensions(self, layer): - data_dimensions = self.data_dimensions.get_data_shape()[0] - if layer is not None and self.layer_storage.get_total_size() > 1 and \ - self._has_layer_dim(): - # remove all "layer" dimensions from the list - layer_dims = self.data_dimensions.structure.\ - data_item_structures[0].layer_dims - data_dimensions = data_dimensions[len(layer_dims):] - return data_dimensions - - def _has_layer_dim(self): - return ('nlay' in self.data_dimensions.structure.shape or 'nodes' - in self.data_dimensions.structure.shape) - - def _store_prep(self, layer, multiplier): - if not (layer is None or self.layer_storage.in_shape(layer)): - message = 'Layer {} is not a valid layer.'.format(layer) - type_, value_, traceback_ = sys.exc_info() - raise MFDataException( - self.data_dimensions.structure.get_model(), - self.data_dimensions.structure.get_package(), - self.data_dimensions.structure.path, - 'storing data', - self.data_dimensions.structure.name, - inspect.stack()[0][3], - type_, value_, traceback_, message, - self._simulation_data.debug) - if layer is None: - # layer is none means the data provided is for all layers or this - # is not layered data - layer = (0,) - self.layer_storage.list_shape = (1,) - self.layer_storage.multi_dim_list = [ - self.layer_storage.first_item()] - mult_ml = MultiList(multiplier) - if not mult_ml.in_shape(layer): - if multiplier[0] is None: - multiplier = self.get_default_mult() - else: - multiplier = multiplier[0] - else: - if mult_ml.first_item() is None: - multiplier = self.get_default_mult() - else: - multiplier = mult_ml.first_item() - - return layer, multiplier - - def get_data_size(self, layer): - dimensions = self.get_data_dimensions(layer) - data_size = 1 - for dimension in dimensions: - data_size = data_size * dimension - return data_size +from copy import deepcopy +import sys +import inspect +from shutil import copyfile +from collections import OrderedDict +from enum import Enum +import numpy as np +from ..mfbase import MFDataException, VerbosityLevel +from ..data.mfstructure import DatumType, MFDataItemStructure +from ..data import mfdatautil +from ...utils.datautil import DatumUtil, FileIter, MultiListIter, PyListUtil, \ + ArrayIndexIter, MultiList +from .mfdatautil import convert_data, MFComment +from .mffileaccess import MFFileAccessArray, MFFileAccessList, MFFileAccess + + +class DataStorageType(Enum): + """ + Enumeration of different ways that data can be stored + """ + internal_array = 1 + internal_constant = 2 + external_file = 3 + + +class DataStructureType(Enum): + """ + Enumeration of different data structures used to store data + """ + ndarray = 1 + recarray = 2 + scalar = 3 + + +class LayerStorage(object): + """ + Stores a single layer of data. + + Parameters + ---------- + data_storage : DataStorage + Parent data storage object that layer is contained in + lay_num : int + Layer number of layered being stored + data_storage_type : DataStorageType + Method used to store the data + + Attributes + ---------- + internal_data : ndarray or recarray + data being stored, if full data is being stored internally in memory + data_const_value : int/float + constant value of data being stored, if data is a constant + data_storage_type : DataStorageType + method used to store the data + fname : str + file name of external file containing the data + factor : int/float + factor to multiply the data by + iprn : int + print code + binary : bool + whether the data is stored in a binary file + + Methods + ------- + get_const_val(layer) + gets the constant value of a given layer. data storage type for layer + must be "internal_constant". + get_data(layer) : ndarray/recarray/string + returns the data for the specified layer + set_data(data, layer=None, multiplier=[1.0] + sets the data being stored to "data" for layer "layer", replacing all + data for that layer. a multiplier can be specified. + + See Also + -------- + + Notes + ----- + + Examples + -------- + + + """ + + def __init__(self, data_storage, lay_indexes, + data_storage_type=DataStorageType.internal_array, + data_type=None): + self._data_storage_parent = data_storage + self._lay_indexes = lay_indexes + self.internal_data = None + self.data_const_value = None + self.data_storage_type = data_storage_type + self.data_type = data_type + self.fname = None + if self.data_type == DatumType.integer: + self.factor = 1 + else: + self.factor = 1.0 + self.iprn = None + self.binary = False + + def set_internal_constant(self): + self.data_storage_type = DataStorageType.internal_constant + + def set_internal_array(self): + self.data_storage_type = DataStorageType.internal_array + + @property + def name(self): + return self._data_storage_parent.data_dimensions.structure.name + + def __repr__(self): + if self.data_storage_type == DataStorageType.internal_constant: + return 'constant {}'.format(self.get_data_const_val()) + else: + return repr(self.get_data()) + + def __str__(self): + if self.data_storage_type == DataStorageType.internal_constant: + return '{}'.format(self.get_data_const_val()) + else: + return str(self.get_data()) + + def __getattr__(self, attr): + if attr == 'binary' or not hasattr(self, 'binary'): + raise AttributeError(attr) + + if attr == 'array': + return self._data_storage_parent.get_data(self._lay_indexes, True) + elif attr == '__getstate__': + raise AttributeError(attr) + + def set_data(self, data): + self._data_storage_parent.set_data(data, self._lay_indexes, [self.factor]) + + def get_data(self): + return self._data_storage_parent.get_data(self._lay_indexes, False) + + def get_data_const_val(self): + if isinstance(self.data_const_value, list): + return self.data_const_value[0] + else: + return self.data_const_value + + +class DataStorage(object): + """ + Stores and retrieves data. + + + Parameters + ---------- + sim_data : simulation data class + reference to the simulation data class + data_dimensions : data dimensions class + a data dimensions class for the data being stored + get_file_entry : method reference + method that returns the file entry for the stored data + data_storage_type : enum + how the data will be stored (internally, as a constant, as an external + file) + data_structure_type : enum + what internal type is the data stored in (ndarray, recarray, scalar) + layer_shape : int + number of data layers + layered : boolean + is the data layered + layer_storage : MultiList + one or more dimensional list of LayerStorage + + Attributes + ---------- + data_storage_type : list + list of data storage types, one for each layer + data_const_value : list + list of data constants, one for each layer + external_file_path : list + list of external file paths, one for each layer + multiplier : list + list of multipliers, one for each layer + print_format : list + list of print formats, one for each layer + data_structure_type : + what internal type is the data stored in (ndarray, recarray, scalar) + layered : boolean + is the data layered + pre_data_comments : string + any comments before the start of the data + comments : OrderedDict + any comments mixed in with the data, dictionary keys are data lines + post_data_comments : string + any comments after the end of the data + + Methods + ------- + override_data_type : (index, data_type) + overrides the data type used in a recarray at index "index" with data + type "data_type" + get_external_file_path(layer) + gets the path to an external file for layer "layer" + get_const_val(layer) + gets the constant value of a given layer. data storage type for layer + must be "internal_constant". + has_data(layer) : boolean + returns true if data exists for the specified layer, false otherwise + get_data(layer) : ndarray/recarray/string + returns the data for the specified layer + update_item(data, key_index) + updates the data in a recarray at index "key_index" with data "data". + data is a list containing all data for a single record in the + recarray. . data structure type must be recarray + append_data(data) + appends data "data" to the end of a recarray. data structure type must + be recarray + set_data(data, layer=None, multiplier=[1.0] + sets the data being stored to "data" for layer "layer", replacing all + data for that layer. a multiplier can be specified. + get_active_layer_indices() : list + returns the indices of all layers expected to contain data + store_internal(data, layer=None, const=False, multiplier=[1.0]) + store data "data" at layer "layer" internally + store_external(file_path, layer=None, multiplier=[1.0], print_format=None, + data=None, do_not_verify=False) store data "data" at layer "layer" + externally in file "file_path" + external_to_external(new_external_file, multiplier=None, layer=None) + copies existing external data to the new file location and points to + the new file + external_to_internal(layer_num=None, store_internal=False) : + ndarray/recarray + loads existing external data for layer "layer_num" and returns it. if + store_internal is True it also storages the data internally, + changing the storage type for "layer_num" layer to internal. + internal_to_external(new_external_file, multiplier=None, layer=None, + print_format=None) + stores existing internal data for layer "layer" to external file + "new_external_file" + read_data_from_file(layer, fd=None, multiplier=None) : (ndarray, int) + reads in data from a given file "fd" as data from layer "layer". + returns data as an ndarray along with the size of the data + to_string(val, type, is_cellid=False, possible_cellid=False) + converts data "val" of type "type" to a string. is_cellid is True if + the data type is known to be a cellid and is treated as such. when + possible_cellid is True the data is checked to see if it matches the + shape/dimensions of a cellid before using it as one. + resolve_data_size(index) : int + resolves the size of a given data element in a recarray based on the + names in the existing rec_array. assumes repeating data element + names follow the format _X. returns the number of + times the data element repeats. + flatten() + converts layered data to a non-layered data + make_layered() + converts non-layered data to layered data + + See Also + -------- + + Notes + ----- + + Examples + -------- + + + """ + def __init__(self, sim_data, model_or_sim, data_dimensions, get_file_entry, + data_storage_type=DataStorageType.internal_array, + data_structure_type=DataStructureType.ndarray, + layer_shape=(1,), layered=False, stress_period=0, + data_path=()): + self.data_dimensions = data_dimensions + self._model_or_sim = model_or_sim + self._simulation_data = sim_data + self._get_file_entry = get_file_entry + self._data_type_overrides = {} + self._data_storage_type = data_storage_type + self._stress_period = stress_period + self._data_path = data_path + if not data_structure_type == DataStructureType.recarray: + self._data_type = self.data_dimensions.structure.\ + get_datum_type(return_enum_type=True) + else: + self._data_type = None + self.layer_storage = MultiList(shape=layer_shape, + callback=self._create_layer) + #self.layer_storage = [LayerStorage(self, x, data_storage_type) + # for x in range(layer_shape)] + self.data_structure_type = data_structure_type + package_dim = self.data_dimensions.package_dim + self.in_model = self.data_dimensions is not None and \ + len(package_dim.package_path) > 1 and \ + package_dim.model_dim[0].model_name.lower() == \ + package_dim.package_path[0] + + if data_structure_type == DataStructureType.recarray: + self.build_type_list(resolve_data_shape=False) + + self.layered = layered + + # initialize comments + self.pre_data_comments = None + self.comments = OrderedDict() + + def __repr__(self): + return self.get_data_str(True) + + def __str__(self): + return self.get_data_str(False) + + def _create_layer(self, indexes): + return LayerStorage(self, indexes, self._data_storage_type, + self._data_type) + + def flatten(self): + self.layered = False + storage_type = self.layer_storage.first_item().data_storage_type + self.layer_storage = MultiList(mdlist=[LayerStorage(self, 0, + storage_type, + self._data_type)]) + + def make_layered(self): + if not self.layered: + if self.data_structure_type != DataStructureType.ndarray: + message = 'Data structure type "{}" does not support ' \ + 'layered data.'.format(self.data_structure_type) + type_, value_, traceback_ = sys.exc_info() + raise MFDataException( + self.data_dimensions.structure.get_model(), + self.data_dimensions.structure.get_package(), + self.data_dimensions.structure.path, 'making data layered', + self.data_dimensions.structure.name, inspect.stack()[0][3], + type_, value_, traceback_, message, + self._simulation_data.debug) + if self.layer_storage.first_item().data_storage_type == \ + DataStorageType.external_file: + message = 'Converting external file data into layered ' \ + 'data currently not support.' + type_, value_, traceback_ = sys.exc_info() + raise MFDataException( + self.data_dimensions.structure.get_model(), + self.data_dimensions.structure.get_package(), + self.data_dimensions.structure.path, 'making data layered', + self.data_dimensions.structure.name, inspect.stack()[0][3], + type_, value_, traceback_, message, + self._simulation_data.debug) + + previous_storage = self.layer_storage.first_item() + data = previous_storage.get_data() + data_dim = self.get_data_dimensions(None) + self.layer_storage = MultiList(shape=(data_dim[0],), + callback=self._create_layer) + if previous_storage.data_storage_type == \ + DataStorageType.internal_constant: + for storage in self.layer_storage.elements(): + storage.data_const_value = \ + previous_storage.data_const_value + elif previous_storage.data_storage_type == \ + DataStorageType.internal_array: + data_ml = MultiList(data) + if not (data_ml.get_total_size() == + self.layer_storage.get_total_size()): + message = 'Size of data ({}) does not match expected ' \ + 'value of {}' \ + '.'.format(data_ml.get_total_size(), + self.layer_storage.get_total_size()) + type_, value_, traceback_ = sys.exc_info() + raise MFDataException( + self.data_dimensions.structure.get_model(), + self.data_dimensions.structure.get_package(), + self.data_dimensions.structure.path, + 'making data layered', + self.data_dimensions.structure.name, + inspect.stack()[0][3], + type_, value_, traceback_, message, + self._simulation_data.debug) + for data_layer, storage in zip(data, + self.layer_storage.elements()): + storage.internal_data = data_layer + storage.factor = previous_storage.factor + storage.iprn = previous_storage.iprn + self.layered = True + + def get_data_str(self, formal): + data_str = '' + # Assemble strings for internal array data + for index, storage in enumerate(self.layer_storage.elements()): + if storage.data_storage_type == DataStorageType.internal_array: + if storage.internal_data is not None: + header = self._get_layer_header_str(index) + if formal: + if self.layered: + data_str = '{}Layer_{}{{{}}}' \ + '\n({})\n'.format(data_str, index + 1, + header, repr(storage)) + else: + data_str = '{}{{{}}}\n({})\n'.format(data_str, + header, + repr(storage)) + else: + data_str = '{}{{{}}}\n({})\n'.format(data_str, header, + str(storage)) + elif storage.data_storage_type == \ + DataStorageType.internal_constant: + if storage.data_const_value is not None: + data_str = '{}{{{}}}' \ + '\n'.format(data_str, + self._get_layer_header_str(index)) + return data_str + + def _get_layer_header_str(self, layer): + header_list = [] + if self.layer_storage[layer].data_storage_type == \ + DataStorageType.external_file: + header_list.append('open/close ' + '{}'.format(self.layer_storage[layer].fname)) + elif self.layer_storage[layer].data_storage_type == \ + DataStorageType.internal_constant: + header_list.append('constant {}'.format(self.layer_storage[layer])) + else: + header_list.append('internal') + if self.layer_storage[layer].factor != 1.0 and \ + self.layer_storage[layer].factor != 1 and \ + self.data_structure_type != DataStructureType.recarray: + header_list.append('factor ' + '{}'.format(self.layer_storage[layer].factor)) + if self.layer_storage[layer].iprn is not None: + header_list.append('iprn ' + '{}'.format(self.layer_storage[layer].iprn)) + if len(header_list) > 0: + return ', '.join(header_list) + else: + return '' + + def init_layers(self, dimensions): + self.layer_storage= MultiList(shape=dimensions, + callback=self._create_layer) + + def add_layer(self, dimension=2): + self.layer_storage.increment_dimension(dimension, self._create_layer) + + def override_data_type(self, index, data_type): + self._data_type_overrides[index] = data_type + + def get_external_file_path(self, layer): + if layer is None: + return self.layer_storage[0].fname + else: + return self.layer_storage[layer].fname + + def get_const_val(self, layer=None): + if layer is None: + if not self.layer_storage.get_total_size() >= 1: + message = 'Can not get constant value. No data is available.' + type_, value_, traceback_ = sys.exc_info() + raise MFDataException( + self.data_dimensions.structure.get_model(), + self.data_dimensions.structure.get_package(), + self.data_dimensions.structure.path, + 'getting constant value', + self.data_dimensions.structure.name, + inspect.stack()[0][3], + type_, value_, traceback_, message, + self._simulation_data.debug) + first_item = self.layer_storage.first_item() + if not first_item.data_storage_type == \ + DataStorageType.internal_constant: + message = 'Can not get constant value. Storage type must be ' \ + 'internal_constant.' + type_, value_, traceback_ = sys.exc_info() + raise MFDataException( + self.data_dimensions.structure.get_model(), + self.data_dimensions.structure.get_package(), + self.data_dimensions.structure.path, + 'getting constant value', + self.data_dimensions.structure.name, + inspect.stack()[0][3], + type_, value_, traceback_, message, + self._simulation_data.debug) + + return first_item.get_data_const_val() + else: + if not self.layer_storage.in_shape(layer): + message = 'Can not get constant value. Layer "{}" is not a ' \ + 'valid layer.'.format(layer) + type_, value_, traceback_ = sys.exc_info() + raise MFDataException( + self.data_dimensions.structure.get_model(), + self.data_dimensions.structure.get_package(), + self.data_dimensions.structure.path, + 'getting constant value', + self.data_dimensions.structure.name, + inspect.stack()[0][3], + type_, value_, traceback_, message, + self._simulation_data.debug) + if not self.layer_storage[layer].data_storage_type == \ + DataStorageType.internal_constant: + message = 'Can not get constant value. Storage type must be ' \ + 'internal_constant.' + type_, value_, traceback_ = sys.exc_info() + raise MFDataException( + self.data_dimensions.structure.get_model(), + self.data_dimensions.structure.get_package(), + self.data_dimensions.structure.path, + 'getting constant value', + self.data_dimensions.structure.name, + inspect.stack()[0][3], + type_, value_, traceback_, message, + self._simulation_data.debug) + return self.layer_storage[layer].get_data_const_val() + + def has_data(self, layer=None): + ret_val = self._access_data(layer, False) + return ret_val is not None and ret_val != False + + def get_data(self, layer=None, apply_mult=True): + return self._access_data(layer, True, apply_mult=apply_mult) + + def _access_data(self, layer, return_data=False, apply_mult=True): + layer_check = self._resolve_layer(layer) + if (self.layer_storage[layer_check].internal_data is None and + self.layer_storage[layer_check].data_storage_type == + DataStorageType.internal_array) or \ + (self.layer_storage[ + layer_check].data_const_value is None and + self.layer_storage[layer_check].data_storage_type == + DataStorageType.internal_constant): + return None + if layer is None and \ + (self.data_structure_type == DataStructureType.ndarray or \ + self.data_structure_type == DataStructureType.scalar) and \ + return_data: + # return data from all layers + data = self._build_full_data(apply_mult) + if data is None: + if self.layer_storage.first_item().data_storage_type == \ + DataStorageType.internal_constant: + return self.layer_storage.first_item(). \ + get_data()[0] + else: + return data + + if self.layer_storage[layer_check].data_storage_type == \ + DataStorageType.external_file: + if return_data: + return self.external_to_internal(layer) + else: + return True + else: + if self.data_structure_type == DataStructureType.ndarray and \ + self.layer_storage[layer_check].data_const_value is None and \ + self.layer_storage[layer_check].internal_data is None: + return None + if not (layer is None or self.layer_storage.in_shape(layer)): + message = 'Layer "{}" is an invalid layer.'.format(layer) + type_, value_, traceback_ = sys.exc_info() + raise MFDataException( + self.data_dimensions.structure.get_model(), + self.data_dimensions.structure.get_package(), + self.data_dimensions.structure.path, + 'accessing data', + self.data_dimensions.structure.name, + inspect.stack()[0][3], + type_, value_, traceback_, message, + self._simulation_data.debug) + if layer is None: + if self.data_structure_type == DataStructureType.ndarray or \ + self.data_structure_type == DataStructureType.scalar: + if self.data_structure_type == DataStructureType.scalar: + return self.layer_storage.first_item().\ + internal_data is not None + check_storage = self.layer_storage[layer_check] + return (check_storage.data_const_value is not None and + check_storage.data_storage_type == + DataStorageType.internal_constant) or ( + check_storage.internal_data is not None and + check_storage.data_storage_type == + DataStorageType.internal_array) + else: + if self.layer_storage[layer_check].data_storage_type == \ + DataStorageType.internal_constant: + if return_data: + # recarray stored as a constant. currently only + # support grid-based constant recarrays. build + # a recarray of all cells + data_list = [] + model_grid = self.data_dimensions.get_model_grid() + structure = self.data_dimensions.structure + package_dim = self.data_dimensions.package_dim + for cellid in model_grid.get_all_model_cells(): + data_line = (cellid,) + \ + (self.layer_storage.first_item(). + data_const_value,) + if len(structure.data_item_structures) > 2: + # append None any expected optional data + for data_item_struct in \ + structure.data_item_structures[2:]: + if (data_item_struct.name != + 'boundname' or + package_dim.boundnames()): + data_line = data_line + (None,) + data_list.append(data_line) + return np.rec.array(data_list, + self._recarray_type_list) + else: + return self.layer_storage[layer_check + ].data_const_value is not None + else: + if return_data: + return self.layer_storage.first_item().\ + internal_data + else: + return True + elif self.layer_storage[layer].data_storage_type == \ + DataStorageType.internal_array: + if return_data: + return self.layer_storage[layer].internal_data + else: + return self.layer_storage[layer].internal_data is not None + elif self.layer_storage[layer].data_storage_type == \ + DataStorageType.internal_constant: + layer_storage = self.layer_storage[layer] + if return_data: + data = self._fill_const_layer(layer) + if data is None: + if layer_storage.data_storage_type == \ + DataStructureType.internal_constant: + return layer_storage.data_const_value[0] + else: + return data + else: + return layer_storage.data_const_value is not None + else: + if return_data: + return self.get_external(layer) + else: + return True + + def append_data(self, data): + # currently only support appending to recarrays + if not (self.data_structure_type == DataStructureType.recarray): + message = 'Can not append to data structure "{}". Can only ' \ + 'append to a recarray datastructure' \ + '.'.format(self.data_structure_type) + type_, value_, traceback_ = sys.exc_info() + raise MFDataException( + self.data_dimensions.structure.get_model(), + self.data_dimensions.structure.get_package(), + self.data_dimensions.structure.path, + 'appending data', + self.data_dimensions.structure.name, + inspect.stack()[0][3], + type_, value_, traceback_, message, + self._simulation_data.debug) + internal_data = self.layer_storage.first_item().internal_data + if internal_data is None: + if len(data[0]) != len(self._recarray_type_list): + # rebuild type list using existing data as a guide + self.build_type_list(data=data) + self.set_data(np.rec.array(data, self._recarray_type_list)) + else: + if len(self.layer_storage.first_item().internal_data[0]) < \ + len(data[0]): + # Rebuild recarray to fit larger size + count = 0 + last_count = len(data[0]) - len(internal_data[0]) + while count < last_count: + self._duplicate_last_item() + count += 1 + internal_data_list = internal_data.tolist() + for data_item in data: + internal_data_list.append(data_item) + self._add_placeholders(internal_data_list) + self.set_data(np.rec.array(internal_data_list, + self._recarray_type_list)) + else: + if len(self.layer_storage.first_item().internal_data[0]) \ + > len(data[0]): + # Add placeholders to data + self._add_placeholders(data) + self.set_data(np.hstack( + (internal_data, np.rec.array(data, + self._recarray_type_list)))) + + def set_data(self, data, layer=None, multiplier=None, key=None, + autofill=False): + if multiplier is None: + multiplier = [1.0] + if self.data_structure_type == DataStructureType.recarray or \ + self.data_structure_type == DataStructureType.scalar: + self._set_list(data, layer, multiplier, key, autofill) + else: + self._set_array(data, layer, multiplier, key, autofill) + + def _set_list(self, data, layer, multiplier, key, autofill): + if isinstance(data, dict): + if 'filename' in data: + if 'binary' in data and data['binary']: + if self.data_dimensions.package_dim.boundnames(): + message = 'Unable to store list data ({}) to a binary '\ + 'file when using boundnames' \ + '.'.format(self.data_dimensions.structure. + name) + type_, value_, traceback_ = sys.exc_info() + raise MFDataException( + self.data_dimensions.structure.get_model(), + self.data_dimensions.structure.get_package(), + self.data_dimensions.structure.path, + 'writing list data to binary file', + self.data_dimensions.structure.name, + inspect.stack()[0][3], type_, value_, traceback_, + message, self._simulation_data.debug) + self.process_open_close_line(data, layer) + return + self.store_internal(data, layer, False, multiplier, key=key, + autofill=autofill) + + def _set_array(self, data, layer, multiplier, key, autofill): + # make a list out of a single item + if isinstance(data, int) or isinstance(data, float) or isinstance(data, str): + data = [data] + + # check for possibility of multi-layered data + success = False + layer_num = 0 + if layer is None and self.data_structure_type == \ + DataStructureType.ndarray and len(data) ==\ + self.layer_storage.get_total_size() and not \ + isinstance(data, dict): + # loop through list and try to store each list entry as a layer + success = True + for layer_num, layer_data in enumerate(data): + if not isinstance(layer_data, list) and \ + not isinstance(layer_data, dict) and \ + not isinstance(layer_data, np.ndarray): + layer_data = [layer_data] + layer_index = self.layer_storage.nth_index(layer_num) + success = success and self._set_array_layer(layer_data, + layer_index, + multiplier, + key) + if not success: + # try to store as a single layer + success = self._set_array_layer(data, layer, multiplier, key) + self.layered = bool(self.layer_storage.get_total_size() > 1) + if not success: + message = 'Unable to set data "{}" layer {}. Data is not ' \ + 'in a valid format' \ + '.'.format(self.data_dimensions.structure.name, + layer_num) + type_, value_, traceback_ = sys.exc_info() + raise MFDataException( + self.data_dimensions.structure.get_model(), + self.data_dimensions.structure.get_package(), + self.data_dimensions.structure.path, 'setting array data', + self.data_dimensions.structure.name, inspect.stack()[0][3], + type_, value_, traceback_, message, + self._simulation_data.debug) + + def _set_array_layer(self, data, layer, multiplier, key): + # look for a single constant value + data_type = self.data_dimensions.structure.\ + get_datum_type(return_enum_type=True) + if not isinstance(data, dict) and not isinstance(data, str): + if self._calc_data_size(data, 2) == 1 and \ + self._is_type(data[0], data_type): + # store data as const + self.store_internal(data, layer, True, multiplier, key=key) + return True + + # look for internal and open/close data + if isinstance(data, dict): + if 'data' in data: + if isinstance(data['data'], int) or \ + isinstance(data['data'], float) or \ + isinstance(data['data'], str): + # data should always in in a list/array + data['data'] = [data['data']] + + if 'filename' in data: + multiplier, iprn, binary = \ + self.process_open_close_line(data, layer)[0:3] + # store location to file + self.store_external(data['filename'], layer, [multiplier], + print_format=iprn, binary=binary, + do_not_verify=True) + return True + elif 'data' in data: + multiplier, iprn = self.process_internal_line(data) + if len(data['data']) == 1: + # merge multiplier with single value and make constant + if DatumUtil.is_float(multiplier): + mult = 1.0 + else: + mult = 1 + self.store_internal([data['data'][0] * multiplier], layer, + True, [mult], key=key, + print_format=iprn) + else: + self.store_internal(data['data'], layer, False, + [multiplier], key=key, + print_format=iprn) + return True + elif isinstance(data[0], str): + if data[0].lower() == 'internal': + multiplier, iprn = self.process_internal_line(data) + self.store_internal(data[-1], layer, False, [multiplier], + key=key, print_format=iprn) + return True + elif data[0].lower() != 'open/close': + # assume open/close is just omitted + new_data = data[:] + new_data.insert(0, 'open/close') + else: + new_data = data[:] + self.process_open_close_line(new_data, layer, True) + return True + # try to resolve as internal array + layer_storage = self.layer_storage[self._resolve_layer(layer)] + if not (layer_storage.data_storage_type == + DataStorageType.internal_constant and + PyListUtil.has_one_item(data)): + # store data as is + try: + self.store_internal(data, layer, False, multiplier, key=key) + except MFDataException: + return False + return True + return False + + def get_active_layer_indices(self): + layer_index = [] + for index in self.layer_storage.indexes(): + if self.layer_storage[index].fname is not None or \ + self.layer_storage[index].internal_data is not None: + layer_index.append(index) + return layer_index + + def get_external(self, layer=None): + if not (layer is None or self.layer_storage.in_shape(layer)): + message = 'Can not get external data for layer "{}"' \ + '.'.format(layer) + type_, value_, traceback_ = sys.exc_info() + raise MFDataException( + self.data_dimensions.structure.get_model(), + self.data_dimensions.structure.get_package(), + self.data_dimensions.structure.path, + 'getting external data', + self.data_dimensions.structure.name, + inspect.stack()[0][3], + type_, value_, traceback_, message, + self._simulation_data.debug) + + def store_internal(self, data, layer=None, const=False, multiplier=None, + key=None, autofill=False, + print_format=None): + if multiplier is None: + multiplier = [self.get_default_mult()] + if self.data_structure_type == DataStructureType.recarray: + if self.layer_storage.first_item().data_storage_type == \ + DataStorageType.internal_constant: + self.layer_storage.first_item().data_const_value = data + else: + self.layer_storage.first_item().data_storage_type = \ + DataStorageType.internal_array + if data is None or isinstance(data, np.recarray): + if self._simulation_data.verify_data: + self._verify_list(data) + self.layer_storage.first_item().internal_data = data + else: + if data is None: + self.set_data(None) + if autofill and data is not None: + if isinstance(data, tuple) and isinstance(data[0], + tuple): + # convert to list of tuples + data = list(data) + if isinstance(data, list) and \ + DatumUtil.is_basic_type(data[0]): + # this is a simple list, turn it into a tuple + # inside a list so that it is interpreted + # correctly by numpy.recarray + tupled_data = () + for data_item in data: + tupled_data += (data_item,) + data = [tupled_data] + + if not isinstance(data, list): + # put data in a list format for recarray + data = [(data,)] + # auto-fill tagged keyword + structure = self.data_dimensions.structure + data_item_structs = structure.data_item_structures + if data_item_structs[0].tagged and not \ + data_item_structs[0].type == DatumType.keyword: + for data_index, data_entry in enumerate(data): + if (data_item_structs[0].type == + DatumType.string and + data_entry[0].lower() == + data_item_structs[0].name.lower()): + break + data[data_index] = \ + (data_item_structs[0].name.lower(),) \ + + data[data_index] + if data is not None: + new_data = self._build_recarray(data, key, autofill) + self.layer_storage.first_item().internal_data = new_data + elif self.data_structure_type == DataStructureType.scalar: + self.layer_storage.first_item().internal_data = data + else: + layer, multiplier = self._store_prep(layer, multiplier) + dimensions = self.get_data_dimensions(layer) + if const: + self.layer_storage[layer].data_storage_type = \ + DataStorageType.internal_constant + self.layer_storage[layer].data_const_value = \ + [mfdatautil.get_first_val(data)] + else: + self.layer_storage[layer].data_storage_type = \ + DataStorageType.internal_array + try: + self.layer_storage[layer].internal_data = \ + np.reshape(data, dimensions) + except: + message = 'An error occurred when reshaping data ' \ + '"{}" to store. Expected data ' \ + 'dimensions: ' \ + '{}'.format(self.data_dimensions.structure.name, + dimensions) + type_, value_, traceback_ = sys.exc_info() + raise MFDataException( + self.data_dimensions.structure.get_model(), + self.data_dimensions.structure.get_package(), + self.data_dimensions.structure.path, + 'setting array data', self.data_dimensions. + structure.name, inspect.stack()[0][3], type_, + value_, traceback_, message, + self._simulation_data.debug) + self.layer_storage[layer].factor = multiplier + self.layer_storage[layer].iprn = print_format + + def _build_recarray(self, data, key, autofill): + self.build_type_list(data=data, key=key) + if autofill and data is not None: + # resolve any fields with data types that do not + # agree with the expected type list + self._resolve_multitype_fields(data) + if isinstance(data, list): + # data needs to be stored as tuples within a list. + # if this is not the case try to fix it + self._tupleize_data(data) + # add placeholders to data so it agrees with + # expected dimensions of recarray + self._add_placeholders(data) + try: + new_data = np.rec.array(data, + self._recarray_type_list) + except: + data_expected = [] + for data_type in self._recarray_type_list: + data_expected.append('<{}>'.format( + data_type[0])) + message = 'An error occurred when storing data ' \ + '"{}" in a recarray. {} data is a one ' \ + 'or two dimensional list containing ' \ + 'the variables "{}" (some variables ' \ + 'may be optional, see MF6 ' \ + 'documentation), but data "{}" was ' \ + 'supplied.'.format( + self.data_dimensions.structure.name, + self.data_dimensions.structure.name, + ' '.join(data_expected), data) + type_, value_, traceback_ = sys.exc_info() + raise MFDataException( + self.data_dimensions.structure.get_model(), + self.data_dimensions.structure.get_package(), + self.data_dimensions.structure.path, + 'setting array data', + self.data_dimensions.structure.name, + inspect.stack()[0][3], type_, value_, + traceback_, message, + self._simulation_data.debug) + if self._simulation_data.verify_data: + self._verify_list(new_data) + return new_data + + def _resolve_multitype_fields(self, data): + # find any data fields where the data is not a consistent type + itype_len = len(self._recarray_type_list) + for data_entry in data: + for index, data_val in enumerate(data_entry): + if index < itype_len and \ + self._recarray_type_list[index][1] != object and \ + not isinstance(data_val, + self._recarray_type_list[index][1]) \ + and (not isinstance(data_val, int) or + self._recarray_type_list[index][1] != float): + # for inconsistent types use generic object type + self._recarray_type_list[index] = \ + (self._recarray_type_list[index][0], object) + + def store_external(self, file_path, layer=None, multiplier=None, + print_format=None, data=None, do_not_verify=False, + binary=False): + if multiplier is None: + multiplier = [self.get_default_mult()] + layer_new, multiplier = self._store_prep(layer, multiplier) + + if data is not None: + if self.data_structure_type == DataStructureType.recarray: + + # create external file and write file entry to the file + data_dim = self.data_dimensions + model_name = data_dim.package_dim.model_dim[0].model_name + fp = self._simulation_data.mfpath.resolve_path(file_path, + model_name) + if binary: + file_access = MFFileAccessList( + self.data_dimensions.structure, self.data_dimensions, + self._simulation_data, self._data_path, + self._stress_period) + file_access.write_binary_file( + data, fp, self._model_or_sim.modeldiscrit, + precision='double') + else: + try: + fd = open(fp, 'w') + except: + message = 'Unable to open file {}. Make sure the ' \ + 'file is not locked and the folder exists' \ + '.'.format(fp) + type_, value_, traceback_ = sys.exc_info() + raise MFDataException( + self.data_dimensions.structure.get_model(), + self.data_dimensions.structure.get_package(), + self.data_dimensions.structure.path, + 'opening external file for writing', + data_dim.structure.name, inspect.stack()[0][3], + type_, value_, traceback_, message, + self._simulation_data.debug) + # store data internally first so that a file entry + # can be generated + self.store_internal(data, layer_new, False, [multiplier], None, + False, print_format) + ext_file_entry = self._get_file_entry() + fd.write(ext_file_entry) + fd.close() + # set as external data + self.layer_storage.first_item().internal_data = None + else: + # store data externally in file + data_size = self.get_data_size(layer_new) + data_dim = self.data_dimensions + data_type = data_dim.structure.data_item_structures[0].type + model_name = data_dim.package_dim.model_dim[0].model_name + fp = self._simulation_data.mfpath.resolve_path(file_path, + model_name) + + if self._calc_data_size(data, 2) == 1 and data_size > 1: + # constant data, need to expand + self.layer_storage[layer_new].data_const_value = data + self.layer_storage[layer_new].DataStorageType = \ + DataStorageType.internal_constant + data = self._fill_const_layer(layer) + elif isinstance(data, list): + data = self._to_ndarray(data, layer) + if binary: + text = self.data_dimensions.structure.name + file_access = MFFileAccessArray( + self.data_dimensions.structure, self.data_dimensions, + self._simulation_data, self._data_path, + self._stress_period) + str_layered = self.data_dimensions.structure.layered + file_access.write_binary_file( + data, fp, text, self._model_or_sim.modeldiscrit, + self._model_or_sim.modeltime, + stress_period=self._stress_period, precision='double', + write_multi_layer=(layer is None and str_layered)) + else: + file_access = MFFileAccessArray( + self.data_dimensions.structure, self.data_dimensions, + self._simulation_data, self._data_path, + self._stress_period) + file_access.write_text_file(data, fp, data_type, data_size) + self.layer_storage[layer_new].factor = multiplier + self.layer_storage[layer_new].internal_data = None + self.layer_storage[layer_new].data_const_value = None + + else: + if self.data_structure_type == DataStructureType.recarray: + self.layer_storage.first_item().internal_data = None + else: + self.layer_storage[layer_new].factor = multiplier + self.layer_storage[layer_new].internal_data = None + self.set_ext_file_attributes(layer_new, file_path, print_format, + binary) + + def set_ext_file_attributes(self, layer, file_path, + print_format, binary): + # point to the external file and set flags + self.layer_storage[layer].fname = file_path + self.layer_storage[layer].iprn = print_format + self.layer_storage[layer].binary = binary + self.layer_storage[layer].data_storage_type = \ + DataStorageType.external_file + + def point_to_existing_external_file(self, arr_line, layer): + multiplier, print_format, binary, \ + data_file = self.process_open_close_line(arr_line, layer, store=False) + self.set_ext_file_attributes(layer, data_file, print_format, binary) + self.layer_storage[layer].factor = multiplier + + def external_to_external(self, new_external_file, multiplier=None, + layer=None, binary=None): + # currently only support files containing ndarrays + if not (self.data_structure_type == DataStructureType.ndarray): + message = 'Can not copy external file of type "{}". Only ' \ + 'files containing ndarrays currently supported' \ + '.'.format(self.data_structure_type) + type_, value_, traceback_ = sys.exc_info() + raise MFDataException( + self.data_dimensions.structure.get_model(), + self.data_dimensions.structure.get_package(), + self.data_dimensions.structure.path, + 'copy external file', + self.data_dimensions.structure.name, + inspect.stack()[0][3], + type_, value_, traceback_, message, + self._simulation_data.debug) + if not ((layer is None and self.layer_storage.get_total_size() == 1) or + (layer is not None and self.layer_storage.in_shape(layer))): + if layer is None: + message = 'When no layer is supplied the data must contain ' \ + 'only one layer. Data contains {} layers' \ + '.' .format(self.layer_storage.get_total_size()) + else: + message = 'layer "{}" is not a valid layer'.format(layer) + type_, value_, traceback_ = sys.exc_info() + raise MFDataException( + self.data_dimensions.structure.get_model(), + self.data_dimensions.structure.get_package(), + self.data_dimensions.structure.path, + 'copy external file', + self.data_dimensions.structure.name, + inspect.stack()[0][3], + type_, value_, traceback_, message, + self._simulation_data.debug) + # get data storage + if layer is None: + layer = 1 + if self.layer_storage[layer].fname is None: + message = 'No file name exists for layer {}.'.format(layer) + type_, value_, traceback_ = sys.exc_info() + raise MFDataException( + self.data_dimensions.structure.get_model(), + self.data_dimensions.structure.get_package(), + self.data_dimensions.structure.path, + 'copy external file', + self.data_dimensions.structure.name, + inspect.stack()[0][3], + type_, value_, traceback_, message, + self._simulation_data.debug) + + # copy file to new location + copyfile(self.layer_storage[layer].fname, new_external_file) + + # update + if binary is None: + binary = self.layer_storage[layer].binary + self.store_external(new_external_file, layer, + [self.layer_storage[layer].factor], + self.layer_storage[layer].iprn, + binary=binary) + + def external_to_internal(self, layer, store_internal=False): + if layer is None: + layer = 0 + # load data from external file + model_name = self.data_dimensions.package_dim.model_dim[0]. \ + model_name + read_file = self._simulation_data.mfpath.resolve_path( + self.layer_storage[layer].fname, model_name) + # currently support files containing ndarrays or recarrays + if self.data_structure_type == DataStructureType.ndarray: + file_access = MFFileAccessArray( + self.data_dimensions.structure, self.data_dimensions, + self._simulation_data, self._data_path, + self._stress_period) + if self.layer_storage[layer].binary: + data_out = file_access.read_binary_data_from_file( + read_file, self.get_data_dimensions(layer), + self.get_data_size(layer), self._data_type, + self._model_or_sim.modeldiscrit)[0] + else: + data_out = file_access.read_text_data_from_file( + self.get_data_size(layer), self._data_type, + self.get_data_dimensions(layer), layer, read_file)[0] + if self.layer_storage[layer].factor is not None: + data_out = data_out * self.layer_storage[layer].factor + + if store_internal: + self.store_internal(data_out, layer) + return data_out + elif self.data_structure_type == DataStructureType.recarray: + file_access = MFFileAccessList( + self.data_dimensions.structure, self.data_dimensions, + self._simulation_data, self._data_path, + self._stress_period) + if self.layer_storage[layer].binary: + data = file_access.read_binary_data_from_file( + read_file, self._model_or_sim.modeldiscrit) + data_out = self._build_recarray(data, layer, False) + else: + with open(read_file, 'r') as fd_read_file: + data_out = file_access.read_list_data_from_file( + fd_read_file, self, self._stress_period, + store_internal=False) + if store_internal: + self.store_internal(data_out, layer) + return data_out + else: + path = self.data_dimensions.structure.path + message= 'Can not convert {} to internal data. External to ' \ + 'internal file operations currently only supported ' \ + 'for ndarrays.'.format(path[-1]) + type_, value_, traceback_ = sys.exc_info() + raise MFDataException( + self.data_dimensions.structure.get_model(), + self.data_dimensions.structure.get_package(), + self.data_dimensions.structure.path, + 'opening external file for writing', + self.data_dimensions.structure.name, inspect.stack()[0][3], + type_, value_, traceback_, message, + self._simulation_data.debug) + + def internal_to_external(self, new_external_file, multiplier=None, + layer=None, print_format=None, binary=False): + if layer is None: + layer_item = self.layer_storage.first_item() + else: + layer_item = self.layer_storage[layer] + if layer_item.data_storage_type == DataStorageType.internal_array: + data = layer_item.internal_data + else: + data = self._fill_const_layer(layer) + self.store_external(new_external_file, layer, multiplier, + print_format, data, binary=binary) + + def resolve_shape_list(self, data_item, repeat_count, current_key, + data_line, cellid_size=None): + struct = self.data_dimensions.structure + try: + resolved_shape, shape_rule = \ + self.data_dimensions.get_data_shape(data_item, struct, + data_line, + repeating_key= + current_key) + except Exception as se: + comment = 'Unable to resolve shape for data "{}" field "{}"' \ + '.'.format(struct.name, + data_item.name) + type_, value_, traceback_ = sys.exc_info() + raise MFDataException(struct.get_model(), + struct.get_package(), struct.path, + 'loading data list from package file', + struct.name, inspect.stack()[0][3], + type_, value_, traceback_, comment, + self._simulation_data.debug, se) + + if cellid_size is not None: + data_item.remove_cellid(resolved_shape, cellid_size) + + if len(resolved_shape) == 1: + if repeat_count < resolved_shape[0]: + return True, shape_rule is not None + elif resolved_shape[0] == -9999: + # repeating unknown number of times in 1-D array + return False, True + return False, False + + def _validate_cellid(self, arr_line, data_index): + if not self.data_dimensions.structure.model_data: + # not model data so this is not a cell id + return False + if arr_line is None: + return False + model_grid = self.data_dimensions.get_model_grid() + cellid_size = model_grid.get_num_spatial_coordinates() + if cellid_size + data_index > len(arr_line): + return False + for index, \ + dim_size in zip(range(data_index, cellid_size + data_index), + model_grid.get_model_dim()): + if not DatumUtil.is_int(arr_line[index]): + return False + val = int(arr_line[index]) + if val <= 0 or val > dim_size: + return False + return True + + def add_data_line_comment(self, comment, line_num): + if line_num in self.comments: + self.comments[line_num].add_text('\n') + self.comments[line_num].add_text(' '.join(comment)) + else: + self.comments[line_num] = MFComment(' '.join(comment), + self.data_dimensions.structure. + path, + self._simulation_data, + line_num) + + def process_internal_line(self, arr_line): + multiplier = self.get_default_mult() + print_format = None + if isinstance(arr_line, list): + if len(arr_line) < 2: + message = 'Data array "{}" contains an INTERNAL ' \ + 'that is not followed by a multiplier in line ' \ + '"{}".'.format(self.data_dimensions.structure.name, + ' '.join(arr_line)) + type_, value_, traceback_ = sys.exc_info() + raise MFDataException( + self.data_dimensions.structure.get_model(), + self.data_dimensions.structure.get_package(), + self.data_dimensions.structure.path, + 'processing internal data header', + self.data_dimensions.structure.name, + inspect.stack()[0][3], type_, value_, + traceback_, message, + self._simulation_data.debug) + index = 1 + while index < len(arr_line): + if isinstance(arr_line[index], str): + if arr_line[index].lower() == 'factor' and \ + index + 1 < len(arr_line): + multiplier = convert_data(arr_line[index+1], + self.data_dimensions, + self._data_type) + index += 2 + elif arr_line[index].lower() == 'iprn' and \ + index + 1 < len(arr_line): + print_format = arr_line[index+1] + index += 2 + else: + break + else: + break + elif isinstance(arr_line, dict): + for key, value in arr_line.items(): + if key.lower() == 'factor': + multiplier = convert_data(value, self.data_dimensions, + self._data_type) + if key.lower() == 'iprn': + print_format = value + return multiplier, print_format + + def process_open_close_line(self, arr_line, layer, store=True): + # process open/close line + index = 2 + if self._data_type == DatumType.integer: + multiplier = 1 + else: + multiplier = 1.0 + print_format = None + binary = False + data_file = None + data = None + + data_dim = self.data_dimensions + if isinstance(arr_line, list): + if len(arr_line) < 2 and store: + message = 'Data array "{}" contains a OPEN/CLOSE ' \ + 'that is not followed by a file. ' \ + '{}'.format(data_dim.structure.name, + data_dim.structure.path) + type_, value_, traceback_ = sys.exc_info() + raise MFDataException( + self.data_dimensions.structure.get_model(), + self.data_dimensions.structure.get_package(), + self.data_dimensions.structure.path, + 'processing open/close line', data_dim.structure.name, + inspect.stack()[0][3], type_, value_, traceback_, message, + self._simulation_data.debug) + while index < len(arr_line): + if isinstance(arr_line[index], str): + if arr_line[index].lower() == 'factor' and \ + index + 1 < len(arr_line): + try: + multiplier = convert_data(arr_line[index+1], + self.data_dimensions, + self._data_type) + except Exception as ex: + message = 'Data array {} contains an OPEN/CLOSE ' \ + 'with an invalid multiplier following ' \ + 'the "factor" keyword.' \ + '.'.format(data_dim.structure.name) + type_, value_, traceback_ = sys.exc_info() + raise MFDataException( + self.data_dimensions.structure.get_model(), + self.data_dimensions.structure.get_package(), + self.data_dimensions.structure.path, + 'processing open/close line', + data_dim.structure.name, inspect.stack()[0][3], + type_, value_, traceback_, message, + self._simulation_data.debug, ex) + index += 2 + elif arr_line[index].lower() == 'iprn' and \ + index + 1 < len(arr_line): + print_format = arr_line[index+1] + index += 2 + elif arr_line[index].lower() == 'data' and \ + index + 1 < len(arr_line): + data = arr_line[index+1] + index += 2 + elif arr_line[index].lower() == 'binary' or \ + arr_line[index].lower() == '(binary)': + binary = True + index += 1 + else: + break + else: + break + # save comments + if index < len(arr_line): + self.layer_storage[layer].comments = MFComment( + ' '.join(arr_line[index:]), + self.data_dimensions.structure.path, + self._simulation_data, layer) + if arr_line[0].lower() == 'open/close': + data_file = arr_line[1] + else: + data_file = arr_line[0] + elif isinstance(arr_line, dict): + for key, value in arr_line.items(): + if key.lower() == 'factor': + try: + multiplier = convert_data(value, self.data_dimensions, + self._data_type) + except Exception as ex: + message = 'Data array {} contains an OPEN/CLOSE ' \ + 'with an invalid multiplier following the ' \ + '"factor" keyword.' \ + '.'.format(data_dim.structure.name) + type_, value_, traceback_ = sys.exc_info() + raise MFDataException( + self.data_dimensions.structure.get_model(), + self.data_dimensions.structure.get_package(), + self.data_dimensions.structure.path, + 'processing open/close line', + data_dim.structure.name, inspect.stack()[0][3], + type_, value_, traceback_, message, + self._simulation_data.debug, ex) + if key.lower() == 'iprn': + print_format = value + if key.lower() == 'binary': + binary = bool(value) + if key.lower() == 'data': + data = value + if 'filename' in arr_line: + data_file = arr_line['filename'] + + if data_file is None: + message = 'Data array {} contains an OPEN/CLOSE without a ' \ + 'fname (file name) specified' \ + '.'.format(data_dim.structure.name) + type_, value_, traceback_ = sys.exc_info() + raise MFDataException(self.data_dimensions.structure.get_model(), + self.data_dimensions.structure.get_package(), + self.data_dimensions.structure.path, + 'processing open/close line', + data_dim.structure.name, + inspect.stack()[0][3], type_, value_, + traceback_, message, + self._simulation_data.debug) + + if store: + # store external info + self.store_external(data_file, layer, [multiplier], print_format, + binary=binary, data=data) + + # add to active list of external files + model_name = data_dim.package_dim.model_dim[0].model_name + self._simulation_data.mfpath.add_ext_file(data_file, model_name) + + return multiplier, print_format, binary, data_file + + @staticmethod + def _tupleize_data(data): + for index, data_line in enumerate(data): + if not isinstance(data_line, tuple): + if isinstance(data_line, list): + data[index] = tuple(data_line) + else: + data[index] = (data_line,) + + def _verify_list(self, data): + if data is not None: + model_grid = None + cellid_size = None + for data_line in data: + data_line_len = len(data_line) + for index in range(0, min(data_line_len, + len(self._recarray_type_list))): + if self._recarray_type_list[index][0] == 'cellid' and \ + self.data_dimensions.get_model_dim(None).model_name\ + is not None and data_line[index] is not None: + # this is a cell id. verify that it contains the + # correct number of integers + if cellid_size is None: + model_grid = self.data_dimensions.get_model_grid() + cellid_size = model_grid.\ + get_num_spatial_coordinates() + if cellid_size != 1 and \ + len(data_line[index]) != cellid_size and \ + isinstance(data_line[index], int): + message = 'Cellid "{}" contains {} integer(s). ' \ + 'Expected a cellid containing {} ' \ + 'integer(s) for grid type' \ + ' {}.'.format(data_line[index], + len(data_line[index]), + cellid_size, + str( + model_grid.grid_type())) + type_, value_, traceback_ = sys.exc_info() + raise MFDataException( + self.data_dimensions.structure.get_model(), + self.data_dimensions.structure.get_package(), + self.data_dimensions.structure.path, + 'verifying cellid', + self.data_dimensions.structure.name, + inspect.stack()[0][3], + type_, value_, traceback_, message, + self._simulation_data.debug) + + def _add_placeholders(self, data): + idx = 0 + for data_line in data: + data_line_len = len(data_line) + if data_line_len < len(self._recarray_type_list): + for index in range(data_line_len, + len(self._recarray_type_list)): + if self._recarray_type_list[index][1] == int: + self._recarray_type_list[index] = \ + (self._recarray_type_list[index][0], object) + data_line += (None,) + elif self._recarray_type_list[index][1] == float: + data_line += (np.nan,) + else: + data_line += (None,) + data[idx] = data_line + idx += 1 + + def _duplicate_last_item(self): + last_item = self._recarray_type_list[-1] + arr_item_name = last_item[0].split('_') + if DatumUtil.is_int(arr_item_name[-1]): + new_item_num = int(arr_item_name[-1]) + 1 + new_item_name = '_'.join(arr_item_name[0:-1]) + new_item_name = '{}_{}'.format(new_item_name, new_item_num) + else: + new_item_name = '{}_1'.format(last_item[0]) + self._recarray_type_list.append((new_item_name, last_item[1])) + + def _build_full_data(self, apply_multiplier=False): + if self.data_structure_type == DataStructureType.scalar: + return self.layer_storage.first_item().internal_data + dimensions = self.get_data_dimensions(None) + if dimensions[0] < 0: + return None + all_none = True + np_data_type = self.data_dimensions.structure.get_datum_type() + full_data = np.full(dimensions, np.nan, + self.data_dimensions.structure.get_datum_type(True)) + is_aux = self.data_dimensions.structure.name == 'aux' + if is_aux: + aux_data = [] + if not self.layered: + layers_to_process = [0] + else: + layers_to_process = self.layer_storage.indexes() + for layer in layers_to_process: + if self.layer_storage[layer].factor is not None and \ + apply_multiplier: + mult = self.layer_storage[layer].factor + elif self._data_type == DatumType.integer: + mult = 1 + else: + mult = 1.0 + + if self.layer_storage[layer].data_storage_type == \ + DataStorageType.internal_array: + if self.layer_storage[layer].internal_data is None or \ + len(self.layer_storage[layer].internal_data) > 0 and \ + self.layer_storage[layer].internal_data[0] is None: + if is_aux: + full_data = None + else: + return None + elif self.layer_storage.get_total_size() == 1 or \ + not self.layered or not self._has_layer_dim(): + full_data = self.layer_storage[layer].internal_data * mult + else: + full_data[layer] = \ + self.layer_storage[layer].internal_data * mult + elif self.layer_storage[layer].data_storage_type == \ + DataStorageType.internal_constant: + if self.layer_storage.get_total_size() == 1 or \ + not self.layered or not self._has_layer_dim(): + full_data = self._fill_const_layer(layer) * mult + else: + full_data[layer] = self._fill_const_layer(layer) * mult + else: + file_access = MFFileAccessArray( + self.data_dimensions.structure, self.data_dimensions, + self._simulation_data, self._data_path, + self._stress_period) + model_name = self.data_dimensions.package_dim.model_dim[0]. \ + model_name + read_file = self._simulation_data.mfpath.resolve_path( + self.layer_storage[layer].fname, model_name) + + if self.layer_storage[layer].binary: + data_out = file_access.read_binary_data_from_file( + read_file, self.get_data_dimensions(layer), + self.get_data_size(layer), self._data_type, + self._model_or_sim.modeldiscrit, + not self.layered)[0] * mult + else: + data_out = file_access.read_text_data_from_file( + self.get_data_size(layer), np_data_type, + self.get_data_dimensions(layer), layer, + read_file)[0] * mult + if self.layer_storage.get_total_size() == 1 or \ + not self.layered: + full_data = data_out + else: + full_data[layer] = data_out + if is_aux: + if full_data is not None: + all_none = False + aux_data.append(full_data) + full_data = np.full( + dimensions, np.nan, + self.data_dimensions.structure.get_datum_type(True)) + if is_aux: + if all_none: + return None + else: + return np.stack(aux_data, axis=0) + else: + return full_data + + def _resolve_layer(self, layer): + if layer is None: + return self.layer_storage.first_index() + else: + return layer + + def _to_ndarray(self, data, layer): + data_dimensions = self.get_data_dimensions(layer) + data_iter = MultiListIter(data) + return self._fill_dimensions(data_iter, data_dimensions) + + def _fill_const_layer(self, layer): + data_dimensions = self.get_data_dimensions(layer) + if layer is None: + ls = self.layer_storage.first_item() + else: + ls = self.layer_storage[layer] + if data_dimensions[0] < 0: + return ls.data_const_value + else: + data_type = self.data_dimensions.structure. \ + get_datum_type(numpy_type=True) + return np.full(data_dimensions, ls.data_const_value[0], data_type) + + def _is_type(self, data_item, data_type): + if data_type == DatumType.string or data_type == DatumType.keyword: + return True + elif data_type == DatumType.integer: + return DatumUtil.is_int(data_item) + elif data_type == DatumType.double_precision: + return DatumUtil.is_float(data_item) + elif data_type == DatumType.keystring: + # TODO: support keystring type + if self._simulation_data.verbosity_level.value >= \ + VerbosityLevel.normal.value: + print('Keystring type currently not supported.') + return True + else: + if self._simulation_data.verbosity_level.value >= \ + VerbosityLevel.normal.value: + print('{} type checking currently not supported'.format(data_type)) + return True + + def _fill_dimensions(self, data_iter, dimensions): + if self.data_structure_type == DataStructureType.ndarray: + np_dtype = MFFileAccess.datum_to_numpy_type(self._data_type)[0] + # initialize array + data_array = np.ndarray(shape=dimensions, dtype=np_dtype) + # fill array + for index in ArrayIndexIter(dimensions): + data_array.itemset(index, data_iter.__next__()) + return data_array + elif self.data_structure_type == DataStructureType.scalar: + return data_iter.__next__() + else: + data_array = None + data_line = () + # fill array + array_index_iter = ArrayIndexIter(dimensions) + current_col = 0 + for index in array_index_iter: + data_line += (index,) + if current_col == dimensions[1] - 1: + try: + if data_array is None: + data_array = np.rec.array(data_line, + self._recarray_type_list) + else: + rec_array = np.rec.array(data_line, + self._recarray_type_list) + data_array = np.hstack((data_array, + rec_array)) + except: + message = 'An error occurred when storing data ' \ + '"{}" in a recarray. Data line being ' \ + 'stored: {}'.format( + self.data_dimensions.structure.name, + data_line) + + type_, value_, traceback_ = sys.exc_info() + raise MFDataException( + self.data_dimensions.structure.get_model(), + self.data_dimensions.structure.get_package(), + self.data_dimensions.structure.path, + 'processing open/close line', + dimensions.structure.name, inspect.stack()[0][3], + type_, value_, traceback_, message, + self._simulation_data.debug) + current_col = 0 + data_line = () + data_array[index] = data_iter.next() + return data_array + + def set_tas(self, tas_name, tas_label, current_key): + # move to storage + package_dim = self.data_dimensions.package_dim + tas_names = package_dim.get_tasnames() + if tas_name.lower() not in tas_names and \ + self._simulation_data.verbosity_level.value >= \ + VerbosityLevel.normal.value: + print('WARNING: Time array series name {} not found in any ' + 'time series file'.format(tas_name)) + # this is a time series array with a valid tas variable + self.data_structure_type = DataStructureType.scalar + try: + self.set_data('{} {}'.format(tas_label, tas_name), 0, + key=current_key) + except Exception as ex: + type_, value_, traceback_ = sys.exc_info() + structure = self.data_dimensions.structure + raise MFDataException(structure.get_model(), + structure.get_package(), + structure.path, + 'storing data', + structure.name, + inspect.stack()[0][3], type_, + value_, traceback_, None, + self._simulation_data.debug, ex) + + def resolve_data_size(self, index): + # Resolves the size of a given data element based on the names in the + # existing rec_array. Assumes repeating data element names follow the + # format _X + if self.data_structure_type != DataStructureType.recarray: + message = 'Data structure type is {}. Data structure type must ' \ + 'be recarray.'.format(self.data_structure_type) + type_, value_, traceback_ = sys.exc_info() + raise MFDataException( + self.data_dimensions.structure.get_model(), + self.data_dimensions.structure.get_package(), + self.data_dimensions.structure.path, + 'resolving data size', + self.data_dimensions.structure.name, + inspect.stack()[0][3], + type_, value_, traceback_, message, + self._simulation_data.debug) + + if len(self.layer_storage.first_item().internal_data[0]) <= index: + return 0 + label = self.layer_storage.first_item().\ + internal_data.dtype.names[index] + label_list = label.split('_') + if len(label_list) == 1: + return 1 + internal_data = self.layer_storage.first_item().internal_data + for forward_index in range(index+1, len(internal_data.dtype.names)): + forward_label = internal_data.dtype.names[forward_index] + forward_label_list = forward_label.split('_') + if forward_label_list[0] != label_list[0]: + return forward_index - index + return len(internal_data.dtype.names) - index + + def build_type_list(self, data_set=None, data=None, + resolve_data_shape=True, key=None, + nseg=None): + if data_set is None: + self._recarray_type_list = [] + data_set = self.data_dimensions.structure + initial_keyword = True + package_dim = self.data_dimensions.package_dim + for data_item, index in zip(data_set.data_item_structures, + range(0, + len(data_set.data_item_structures))): + # handle optional mnames + if not data_item.optional or len(data_item.name) < 5 or \ + data_item.name.lower()[0:5] != 'mname' \ + or not self.in_model: + overrides = self._data_type_overrides + if len(self._recarray_type_list) in overrides: + data_type = overrides[len(self._recarray_type_list)] + elif isinstance(data_item, MFDataItemStructure): + data_type = data_item.get_rec_type() + else: + data_type = None + if data_item.name.lower() == 'aux' and resolve_data_shape: + aux_var_names = package_dim.get_aux_variables() + if aux_var_names is not None: + for aux_var_name in aux_var_names[0]: + if aux_var_name.lower() != 'auxiliary': + self._recarray_type_list.append((aux_var_name, + data_type)) + + elif data_item.type == DatumType.record: + # record within a record, recurse + self.build_type_list(data_item, True, data) + elif data_item.type == DatumType.keystring: + self._recarray_type_list.append((data_item.name, + data_type)) + # add potential data after keystring to type list + ks_data_item = deepcopy(data_item) + ks_data_item.type = DatumType.string + ks_data_item.name = '{}_data'.format(ks_data_item.name) + ks_rec_type = ks_data_item.get_rec_type() + self._recarray_type_list.append((ks_data_item.name, + ks_rec_type)) + if index == len(data_set.data_item_structures) - 1: + idx = 1 + data_line_max_size = self._get_max_data_line_size(data) + while data is not None and \ + len(self._recarray_type_list) < \ + data_line_max_size: + # keystrings at the end of a line can contain items + # of variable length. assume everything at the + # end of the data line is related to the last + # keystring + self._recarray_type_list.append( + ('{}_{}'.format(ks_data_item.name, idx), + ks_rec_type)) + idx += 1 + + elif data_item.name != 'boundname' or \ + self.data_dimensions.package_dim.boundnames(): + # don't include initial keywords + if data_item.type != DatumType.keyword or \ + initial_keyword == \ + False or data_set.block_variable == True: + initial_keyword = False + shape_rule = None + if data_item.tagged: + if data_item.type != DatumType.string and \ + data_item.type != DatumType.keyword: + self._recarray_type_list.append( + ('{}_label'.format(data_item.name), + object)) + if nseg is not None and len(data_item.shape) > 0 and \ + isinstance(data_item.shape[0], str) and \ + data_item.shape[0][0:4] == 'nseg': + # nseg explicitly specified. resolve any formula + # nseg is in + model_dim = \ + self.data_dimensions.get_model_dim(None) + expression_array = \ + model_dim.build_shape_expression(data_item. + shape) + if isinstance(expression_array, list) and \ + len(expression_array) == 1: + exp = expression_array[0] + resolved_shape = \ + [model_dim.resolve_exp(exp, nseg)] + else: + resolved_shape = [1] + else: + if resolve_data_shape: + data_dim = self.data_dimensions + resolved_shape, shape_rule = \ + data_dim.get_data_shape(data_item, + data_set, + data, key) + else: + resolved_shape = [1] + if not resolved_shape or len(resolved_shape) == 0 or \ + resolved_shape[0] == -1: + # could not resolve shape + resolved_shape = [1] + elif resolved_shape[0] == -9999 or \ + shape_rule is not None: + if data is not None: + # shape is an indeterminate 1-d array and + # should consume the remainder of the data + max_s = PyListUtil.max_multi_dim_list_size(data) + resolved_shape[0] = \ + max_s - len(self._recarray_type_list) + else: + # shape is indeterminate 1-d array and no data + # provided to resolve + resolved_shape[0] = 1 + if data_item.is_cellid: + if data_item.shape is not None and \ + len(data_item.shape) > 0 and \ + data_item.shape[0] == 'ncelldim': + # A cellid is a single entry (tuple) in the + # recarray. Adjust dimensions accordingly. + data_dim = self.data_dimensions + model_grid = data_dim.get_model_grid() + size = model_grid.get_num_spatial_coordinates() + data_item.remove_cellid(resolved_shape, + size) + for index in range(0, resolved_shape[0]): + if resolved_shape[0] > 1: + # type list fields must have unique names + self._recarray_type_list.append( + ('{}_{}'.format(data_item.name, + index), data_type)) + else: + self._recarray_type_list.append( + (data_item.name, data_type)) + return self._recarray_type_list + + def get_default_mult(self): + if self._data_type == DatumType.integer: + return 1 + else: + return 1.0 + + @staticmethod + def _calc_data_size(data, count_to=None, current_length=None): + if current_length is None: + current_length = [0] + if isinstance(data, np.ndarray): + current_length[0] += data.size + return data.size + if isinstance(data, str) or isinstance(data, dict): + return 1 + try: + for data_item in data: + if hasattr(data_item, '__len__'): + DataStorage._calc_data_size(data_item, count_to, + current_length) + else: + current_length[0] += 1 + if count_to is not None and current_length[0] >= count_to: + return current_length[0] + except (ValueError, IndexError, TypeError): + return 1 + return current_length[0] + + @staticmethod + def _get_max_data_line_size(data): + max_size = 0 + if data is not None: + for value in data: + if len(value) > max_size: + max_size = len(value) + return max_size + + def get_data_dimensions(self, layer): + data_dimensions = self.data_dimensions.get_data_shape()[0] + if layer is not None and self.layer_storage.get_total_size() > 1 and \ + self._has_layer_dim(): + # remove all "layer" dimensions from the list + layer_dims = self.data_dimensions.structure.\ + data_item_structures[0].layer_dims + data_dimensions = data_dimensions[len(layer_dims):] + return data_dimensions + + def _has_layer_dim(self): + return ('nlay' in self.data_dimensions.structure.shape or 'nodes' + in self.data_dimensions.structure.shape) + + def _store_prep(self, layer, multiplier): + if not (layer is None or self.layer_storage.in_shape(layer)): + message = 'Layer {} is not a valid layer.'.format(layer) + type_, value_, traceback_ = sys.exc_info() + raise MFDataException( + self.data_dimensions.structure.get_model(), + self.data_dimensions.structure.get_package(), + self.data_dimensions.structure.path, + 'storing data', + self.data_dimensions.structure.name, + inspect.stack()[0][3], + type_, value_, traceback_, message, + self._simulation_data.debug) + if layer is None: + # layer is none means the data provided is for all layers or this + # is not layered data + layer = (0,) + self.layer_storage.list_shape = (1,) + self.layer_storage.multi_dim_list = [ + self.layer_storage.first_item()] + mult_ml = MultiList(multiplier) + if not mult_ml.in_shape(layer): + if multiplier[0] is None: + multiplier = self.get_default_mult() + else: + multiplier = multiplier[0] + else: + if mult_ml.first_item() is None: + multiplier = self.get_default_mult() + else: + multiplier = mult_ml.first_item() + + return layer, multiplier + + def get_data_size(self, layer): + dimensions = self.get_data_dimensions(layer) + data_size = 1 + for dimension in dimensions: + data_size = data_size * dimension + return data_size diff --git a/flopy/mf6/data/mfdatautil.py b/flopy/mf6/data/mfdatautil.py index 25532a087b..bf532d47cb 100644 --- a/flopy/mf6/data/mfdatautil.py +++ b/flopy/mf6/data/mfdatautil.py @@ -1,713 +1,713 @@ -import sys, inspect -import numpy as np -from copy import deepcopy -from collections import Iterable -from ..mfbase import MFDataException, FlopyException -from .mfstructure import DatumType -from ...utils.datautil import PyListUtil, DatumUtil -import struct - - -def iterable(obj): - return isinstance(obj, Iterable) - - -def get_first_val(arr): - while isinstance(arr, list) or isinstance(arr, np.ndarray): - arr = arr[0] - return arr - - -# convert_data(data, type) : type -# converts data "data" to type "type" and returns the converted data -def convert_data(data, data_dimensions, data_type, data_item=None): - if data_type == DatumType.double_precision: - if data_item is not None and data_item.support_negative_index: - val = int(PyListUtil.clean_numeric(data)) - if val == -1: - return -0.0 - elif val == 1: - return 0.0 - elif val < 0: - val += 1 - else: - val -= 1 - try: - return float(val) - except (ValueError, TypeError): - message = 'Data "{}" with value "{}" can ' \ - 'not be converted to float' \ - '.'.format(data_dimensions.structure.name, - data) - type_, value_, traceback_ = sys.exc_info() - raise MFDataException( - data_dimensions.structure.get_model(), - data_dimensions.structure.get_package(), - data_dimensions.structure.path, 'converting data', - data_dimensions.structure.name, - inspect.stack()[0][3], type_, value_, traceback_, - message, False) - else: - try: - if isinstance(data, str): - # fix any scientific formatting that python can't handle - data = data.replace('d', 'e') - return float(data) - except (ValueError, TypeError): - try: - return float(PyListUtil.clean_numeric(data)) - except (ValueError, TypeError): - message = 'Data "{}" with value "{}" can ' \ - 'not be converted to float' \ - '.'.format(data_dimensions.structure. - name, - data) - type_, value_, traceback_ = sys.exc_info() - raise MFDataException( - data_dimensions.structure.get_model(), - data_dimensions.structure.get_package(), - data_dimensions.structure.path, - 'converting data', - data_dimensions.structure.name, - inspect.stack()[0][3], type_, value_, - traceback_, message, False) - elif data_type == DatumType.integer: - if data_item is not None and data_item.numeric_index: - return int(PyListUtil.clean_numeric(data)) - 1 - try: - return int(data) - except (ValueError, TypeError): - try: - return int(PyListUtil.clean_numeric(data)) - except (ValueError, TypeError): - message = 'Data "{}" with value "{}" can not be ' \ - 'converted to int' \ - '.'.format(data_dimensions.structure.name, - data) - type_, value_, traceback_ = sys.exc_info() - raise MFDataException( - data_dimensions.structure.get_model(), - data_dimensions.structure.get_package(), - data_dimensions.structure.path, 'converting data', - data_dimensions.structure.name, - inspect.stack()[0][3], type_, value_, traceback_, - message, False) - elif data_type == DatumType.string and data is not None: - if data_item is None or not data_item.preserve_case: - # keep strings lower case - return data.lower() - return data - - -def to_string(val, data_type, sim_data, data_dim, is_cellid=False, - possible_cellid=False, data_item=None): - if data_type == DatumType.double_precision: - if data_item is not None and data_item.support_negative_index: - if val > 0: - return (str(int(val + 1))) - elif val == 0.0: - if struct.pack('>d', val) == \ - b'\x80\x00\x00\x00\x00\x00\x00\x00': - # value is negative zero - return (str(int(val - 1))) - else: - # value is positive zero - return (str(int(val + 1))) - else: - return (str(int(val - 1))) - else: - try: - abs_val = abs(val) - except TypeError: - return str(val) - if (abs_val > sim_data._sci_note_upper_thres or - abs_val < sim_data._sci_note_lower_thres) \ - and abs_val != 0: - return sim_data.reg_format_str.format(val) - else: - return sim_data.sci_format_str.format(val) - elif is_cellid or (possible_cellid and isinstance(val, tuple)): - if DatumUtil.is_int(val): - return str(val + 1) - if len(val) > 0 and isinstance(val, str) and \ - val.lower() == 'none': - # handle case that cellid is 'none' - return val - if is_cellid and \ - data_dim.get_model_dim(None).model_name is not \ - None: - model_grid = data_dim.get_model_grid() - cellid_size = model_grid.get_num_spatial_coordinates() - if len(val) != cellid_size: - message = 'Cellid "{}" contains {} integer(s). Expected a' \ - ' cellid containing {} integer(s) for grid type' \ - ' {}.'.format(val, len(val), cellid_size, - str(model_grid.grid_type())) - type_, value_, traceback_ = sys.exc_info() - raise MFDataException( - data_dim.structure.get_model(), - data_dim.structure.get_package(), - data_dim.structure.path, - 'converting cellid to string', - data_dim.structure.name, inspect.stack()[0][3], - type_, value_, traceback_, message, - sim_data.debug) - - string_val = [] - if isinstance(val, str): - string_val.append(val) - else: - for item in val: - string_val.append(str(item + 1)) - return ' '.join(string_val) - elif data_type == DatumType.integer: - if data_item is not None and data_item.numeric_index: - if isinstance(val, str): - return str(int(val) + 1) - else: - return str(int(val)+1) - return str(int(val)) - elif data_type == DatumType.string: - try: - arr_val = val.split() - except AttributeError: - return str(val) - if len(arr_val) > 1: - # quote any string with spaces - string_val = "'{}'".format(val) - if data_item is not None and data_item.ucase: - return string_val.upper() - else: - return string_val - if data_item is not None and data_item.ucase: - return str(val).upper() - else: - return str(val) - - -class MFComment(object): - """ - Represents a variable in a MF6 input file - - - Parameters - ---------- - comment : string or list - comment to be displayed in output file - path : string - tuple representing location in the output file - line_number : integer - line number to display comment in output file - - Attributes - ---------- - comment : string or list - comment to be displayed in output file - path : string - tuple representing location in the output file - line_number : integer - line number to display comment in output file - - Methods - ------- - write : (file) - writes the comment to file - add_text(additional_text) - adds text to the comment - get_file_entry(eoln_suffix=True) - returns the comment text in the format to write to package files - is_empty(include_whitespace=True) - checks to see if comment is just an empty string ''. if - include_whitespace is set to false a string with only whitespace is - considered empty - is_comment(text, include_empty_line=False) : boolean - returns true if text is a comment. an empty line is considered a - comment if include_empty_line is true. - - See Also - -------- - - Notes - ----- - - Examples - -------- - - - """ - def __init__(self, comment, path, sim_data, line_number=0): - if not (isinstance(comment, str) or isinstance(comment, list) or - comment is None): - raise FlopyException('Comment "{}" not valid. Comment must be ' - 'of type str of list.'.format(comment)) - self.text = comment - self.path = path - self.line_number = line_number - self.sim_data = sim_data - - """ - Add text to the comment string. - - Parameters - ---------- - additional_text: string - text to add - """ - def add_text(self, additional_text): - if additional_text: - if isinstance(self.text, list): - self.text.append(additional_text) - else: - self.text = '{} {}'.format(self.text, additional_text) - - """ - Get the comment text in the format to write to package files. - - Parameters - ---------- - eoln_suffix: boolean - have comment text end with end of line character - Returns - ------- - string : comment text - """ - def get_file_entry(self, eoln_suffix=True): - file_entry = '' - if self.text and self.sim_data.comments_on: - if not isinstance(self.text, str) and isinstance(self.text, list): - file_entry = self._recursive_get(self.text) - else: - if self.text.strip(): - file_entry = self.text - if eoln_suffix: - file_entry = '{}\n'.format(file_entry) - return file_entry - - def _recursive_get(self, base_list): - file_entry = '' - if base_list and self.sim_data.comments_on: - for item in base_list: - if not isinstance(item, str) and isinstance(item, list): - file_entry = '{}{}'.format(file_entry, - self._recursive_get(item)) - else: - file_entry = '{} {}'.format(file_entry, item) - return file_entry - - """ - Write the comment text to a file. - - Parameters - ---------- - fd : file - file to write to - eoln_suffix: boolean - have comment text end with end of line character - """ - def write(self, fd, eoln_suffix=True): - if self.text and self.sim_data.comments_on: - if not isinstance(self.text, str) and isinstance(self.text, list): - self._recursive_write(fd, self.text) - else: - if self.text.strip(): - fd.write(self.text) - if eoln_suffix: - fd.write('\n') - - """ - Check for comment text - - Parameters - ---------- - include_whitespace : boolean - include whitespace as text - Returns - ------- - boolean : True if comment text exists - """ - def is_empty(self, include_whitespace=True): - if include_whitespace: - if self.text(): - return True - return False - else: - if self.text.strip(): - return True - return False - - """ - Check text to see if it is valid comment text - - Parameters - ---------- - text : string - potential comment text - include_empty_line : boolean - allow empty line to be valid - Returns - ------- - boolean : True if text is valid comment text - """ - @staticmethod - def is_comment(text, include_empty_line=False): - if not text: - return include_empty_line - if text and isinstance(text, list): - # look for comment mark in first item of list - text_clean = text[0].strip() - else: - text_clean = text.strip() - if include_empty_line and not text_clean: - return True - if text_clean and (text_clean[0] == '#' or text_clean[0] == '!' or - text_clean[0] == '//'): - return True - return False - - # recursively writes a nested list to a file - def _recursive_write(self, fd, base_list): - if base_list: - for item in base_list: - if not isinstance(item, str) and isinstance(item, list): - self._recursive_write(fd, item) - else: - fd.write(' {}'.format(item)) - - -class TemplateGenerator(object): - """ - Abstract base class for building a data template for different data types. - This is a generic class that is initialized with a path that identifies - the data to be built. - - Parameters - ---------- - path : string - tuple containing path of data is described in dfn files - (,,,) - """ - def __init__(self, path): - self.path = path - - def _get_data_dimensions(self, model): - from ..data import mfstructure - from ..coordinates import modeldimensions - - # get structure info - sim_struct = mfstructure.MFStructure().sim_struct - package_struct = sim_struct.get_data_structure(self.path[0:-2]) - - # get dimension info - data_struct = sim_struct.get_data_structure(self.path) - package_dim = modeldimensions.PackageDimensions([model.dimensions], - package_struct, - self.path[0:-1]) - return data_struct, modeldimensions.DataDimensions(package_dim, - data_struct) - - def build_type_header(self, ds_type, data=None): - from ..data.mfdatastorage import DataStorageType - - if ds_type == DataStorageType.internal_array: - if isinstance(self, ArrayTemplateGenerator): - return {'factor':1.0, 'iprn':1, 'data':data} - else: - return None - elif ds_type == DataStorageType.internal_constant: - return data - elif ds_type == DataStorageType.external_file: - return {'filename':'', 'factor':1.0, 'iprn':1} - return None - - -class ArrayTemplateGenerator(TemplateGenerator): - """ - Class that builds a data template for MFArrays. This is a generic class - that is initialized with a path that identifies the data to be built. - - Parameters - ---------- - path : string - tuple containing path of data is described in dfn files - (,,,) - - Methods - ------- - empty: (model: MFModel, layered: boolean, data_storage_type_list: boolean, - default_value: int/float) : variable - Builds a template for the data you need to specify for a specific data - type (ie. "hk") in a specific model. The data type and dimensions - is determined by "path" during initialization of this class and the - model is passed in to this method as the "model" parameter. If the - data is transient a dictionary containing a single stress period - will be returned. If "layered" is set to true, data will be returned - as a list ndarrays, one for each layer. data_storage_type_list is a - list of DataStorageType, one type for each layer. If "default_value" - is specified the data template will be populated with that value, - otherwise each ndarray in the data template will be populated with - np.empty (0 or 0.0 if the DataStorageType is a constant). - """ - def __init__(self, path): - super(ArrayTemplateGenerator, self).__init__(path) - - def empty(self, model=None, layered=False, data_storage_type_list=None, - default_value=None): - from ..data import mfdatastorage, mfstructure - from ..data.mfdatastorage import DataStorageType, DataStructureType - - # get the expected dimensions of the data - data_struct, data_dimensions = self._get_data_dimensions(model) - datum_type = data_struct.get_datum_type() - data_type = data_struct.get_datatype() - # build a temporary data storage object - data_storage = mfdatastorage.DataStorage( - model.simulation_data, model, data_dimensions, None, - DataStorageType.internal_array, - DataStructureType.recarray, data_path=self.path) - dimension_list = data_storage.get_data_dimensions(None) - - # if layered data - if layered and dimension_list[0] > 1: - if data_storage_type_list is not None and \ - len(data_storage_type_list) != dimension_list[0]: - comment = 'data_storage_type_list specified with the ' \ - 'wrong size. Size {} but expected to be ' \ - 'the same as the number of layers, ' \ - '{}.'.format(len(data_storage_type_list), - dimension_list[0]) - type_, value_, traceback_ = sys.exc_info() - - raise MFDataException(data_struct.get_model(), - data_struct.get_package(), - data_struct.path, - 'generating array template', - data_struct.name, - inspect.stack()[0][3], - type_, value_, traceback_, comment, - model.simulation_data.debug) - # build each layer - data_with_header = [] - for layer in range(0, dimension_list[0]): - # determine storage type - if data_storage_type_list is None: - data_storage_type = DataStorageType.internal_array - else: - data_storage_type = data_storage_type_list[layer] - # build data type header - data_with_header.append(self._build_layer(datum_type, - data_storage_type, - default_value, - dimension_list)) - else: - if data_storage_type_list is None or \ - data_storage_type_list[0] == \ - DataStorageType.internal_array: - data_storage_type = DataStorageType.internal_array - else: - data_storage_type = data_storage_type_list[0] - # build data type header - data_with_header = self._build_layer(datum_type, - data_storage_type, - default_value, - dimension_list, True) - - # if transient/multiple list - if data_type == mfstructure.DataType.array_transient: - # Return as dictionary - return {0:data_with_header} - else: - return data_with_header - - def _build_layer(self, data_type, data_storage_type, default_value, - dimension_list, all_layers=False): - from ..data.mfdatastorage import DataStorageType - - # build data - if data_storage_type == DataStorageType.internal_array: - if default_value is None: - if all_layers: - data = np.empty(dimension_list, data_type) - else: - data = np.empty(dimension_list[1:], data_type) - else: - if all_layers: - data = np.full(dimension_list, default_value, data_type) - else: - data = np.full(dimension_list[1:], default_value, - data_type) - elif data_storage_type == DataStorageType.internal_constant: - if default_value is None: - if data_type == np.int32: - data = 0 - else: - data = 0.0 - else: - data = default_value - else: - data = None - # build data type header - return self.build_type_header(data_storage_type, data) - - -class ListTemplateGenerator(TemplateGenerator): - """ - Class that builds a data template for MFLists. This is a generic class - that is initialized with a path that identifies the data to be built. - - Parameters - ---------- - path : string - tuple containing path of data is described in dfn files - (,,,) - - Methods - ------- - empty: (maxbound: int, aux_vars: list, boundnames: boolean, nseg: int) : - dictionary - Builds a template for the data you need to specify for a specific data - type (ie. "stress_period_data") in a specific model. The data type is - determined by "path" during initialization of this class. If the data - is transient a dictionary containing a single stress period will be - returned. The number of entries in the recarray are determined by - the "maxbound" parameter. The "aux_vars" parameter is a list of aux - var names to be used in this data list. If boundnames is set to - true and boundname field will be included in the recarray. nseg is - only used on list data that contains segments. If timeseries is true, - a template that is compatible with time series data is returned. - """ - def __init__(self, path): - super(ListTemplateGenerator, self).__init__(path) - - def _build_template_data(self, type_list): - template_data = [] - for type in type_list: - if type[1] == int: - template_data.append(0) - elif type[1] == float: - template_data.append(np.nan) - else: - template_data.append(None) - return tuple(template_data) - - def empty(self, model, maxbound=None, aux_vars=None, boundnames=False, - nseg=None, timeseries=False, stress_periods=None): - from ..data import mfdatastorage, mfstructure - - data_struct, data_dimensions = self._get_data_dimensions(model) - data_type = data_struct.get_datatype() - # build a temporary data storage object - data_storage = mfdatastorage.DataStorage( - model.simulation_data, model, data_dimensions, None, - mfdatastorage.DataStorageType.internal_array, - mfdatastorage.DataStructureType.recarray) - - # build type list - type_list = data_storage.build_type_list(nseg=nseg) - if aux_vars is not None: - if len(aux_vars) > 0 and (isinstance(aux_vars[0], list) or - isinstance(aux_vars[0], tuple)): - aux_vars = aux_vars[0] - for aux_var in aux_vars: - type_list.append((aux_var, object)) - if boundnames: - type_list.append(('boundname', object)) - - if timeseries: - # fix type list to make all types objects - for index, d_type in enumerate(type_list): - type_list[index] = (d_type[0], object) - - # build recarray - template_data = self._build_template_data(type_list) - rec_array_data = [] - if maxbound is not None: - for index in range(0, maxbound): - rec_array_data.append(template_data) - else: - rec_array_data.append(template_data) - rec_array = np.rec.array(rec_array_data, type_list) - - # if transient/multiple list - if data_type == mfstructure.DataType.list_transient or \ - data_type == mfstructure.DataType.list_multiple: - # Return as dictionary - if stress_periods is None: - return {0:rec_array} - else: - template = {} - for stress_period in stress_periods: - template[stress_period] = deepcopy(rec_array) - return template - else: - return rec_array - - -class MFDocString(object): - """ - Helps build a python class doc string - - Parameters - ---------- - description : string - description of the class - - Attributes - ---------- - indent: string - indent to use in doc string - description : string - description of the class - parameter_header : string - header for parameter section of doc string - parameters : list - list of docstrings for class parameters - - Methods - ------- - add_parameter : (param_descr : string, beginning_of_list : bool) - adds doc string for a parameter with description 'param_descr' to the - end of the list unless beginning_of_list is True - get_doc_string : () : string - builds and returns the docstring for the class - """ - def __init__(self, description): - self.indent = ' ' - self.description = description - self.parameter_header = '{}Parameters\n{}' \ - '----------'.format(self.indent, self.indent) - self.parameters = [] - self.model_parameters = [] - - def add_parameter(self, param_descr, beginning_of_list=False, - model_parameter=False): - if beginning_of_list: - self.parameters.insert(0, param_descr) - if model_parameter: - self.model_parameters.insert(0, param_descr) - else: - self.parameters.append(param_descr) - if model_parameter: - self.model_parameters.append(param_descr) - - def get_doc_string(self, model_doc_string=False): - doc_string = '{}"""\n{}{}\n\n{}\n'.format(self.indent, self.indent, - self.description, - self.parameter_header) - if model_doc_string: - param_list = self.model_parameters - doc_string = '{} modelname : string\n name of the ' \ - 'model\n model_nam_file : string\n' \ - ' relative path to the model name file from ' \ - 'model working folder\n version : string\n' \ - ' version of modflow\n exe_name : string\n'\ - ' model executable name\n' \ - ' model_ws : string\n' \ - ' model working folder path' \ - '\n'.format(doc_string) - else: - param_list = self.parameters - for parameter in param_list: - doc_string += '{}\n'.format(parameter) - if not model_doc_string: - doc_string += '\n{}"""'.format(self.indent) - return doc_string +import sys, inspect +import numpy as np +from copy import deepcopy +from collections import Iterable +from ..mfbase import MFDataException, FlopyException +from .mfstructure import DatumType +from ...utils.datautil import PyListUtil, DatumUtil +import struct + + +def iterable(obj): + return isinstance(obj, Iterable) + + +def get_first_val(arr): + while isinstance(arr, list) or isinstance(arr, np.ndarray): + arr = arr[0] + return arr + + +# convert_data(data, type) : type +# converts data "data" to type "type" and returns the converted data +def convert_data(data, data_dimensions, data_type, data_item=None): + if data_type == DatumType.double_precision: + if data_item is not None and data_item.support_negative_index: + val = int(PyListUtil.clean_numeric(data)) + if val == -1: + return -0.0 + elif val == 1: + return 0.0 + elif val < 0: + val += 1 + else: + val -= 1 + try: + return float(val) + except (ValueError, TypeError): + message = 'Data "{}" with value "{}" can ' \ + 'not be converted to float' \ + '.'.format(data_dimensions.structure.name, + data) + type_, value_, traceback_ = sys.exc_info() + raise MFDataException( + data_dimensions.structure.get_model(), + data_dimensions.structure.get_package(), + data_dimensions.structure.path, 'converting data', + data_dimensions.structure.name, + inspect.stack()[0][3], type_, value_, traceback_, + message, False) + else: + try: + if isinstance(data, str): + # fix any scientific formatting that python can't handle + data = data.replace('d', 'e') + return float(data) + except (ValueError, TypeError): + try: + return float(PyListUtil.clean_numeric(data)) + except (ValueError, TypeError): + message = 'Data "{}" with value "{}" can ' \ + 'not be converted to float' \ + '.'.format(data_dimensions.structure. + name, + data) + type_, value_, traceback_ = sys.exc_info() + raise MFDataException( + data_dimensions.structure.get_model(), + data_dimensions.structure.get_package(), + data_dimensions.structure.path, + 'converting data', + data_dimensions.structure.name, + inspect.stack()[0][3], type_, value_, + traceback_, message, False) + elif data_type == DatumType.integer: + if data_item is not None and data_item.numeric_index: + return int(PyListUtil.clean_numeric(data)) - 1 + try: + return int(data) + except (ValueError, TypeError): + try: + return int(PyListUtil.clean_numeric(data)) + except (ValueError, TypeError): + message = 'Data "{}" with value "{}" can not be ' \ + 'converted to int' \ + '.'.format(data_dimensions.structure.name, + data) + type_, value_, traceback_ = sys.exc_info() + raise MFDataException( + data_dimensions.structure.get_model(), + data_dimensions.structure.get_package(), + data_dimensions.structure.path, 'converting data', + data_dimensions.structure.name, + inspect.stack()[0][3], type_, value_, traceback_, + message, False) + elif data_type == DatumType.string and data is not None: + if data_item is None or not data_item.preserve_case: + # keep strings lower case + return data.lower() + return data + + +def to_string(val, data_type, sim_data, data_dim, is_cellid=False, + possible_cellid=False, data_item=None): + if data_type == DatumType.double_precision: + if data_item is not None and data_item.support_negative_index: + if val > 0: + return (str(int(val + 1))) + elif val == 0.0: + if struct.pack('>d', val) == \ + b'\x80\x00\x00\x00\x00\x00\x00\x00': + # value is negative zero + return (str(int(val - 1))) + else: + # value is positive zero + return (str(int(val + 1))) + else: + return (str(int(val - 1))) + else: + try: + abs_val = abs(val) + except TypeError: + return str(val) + if (abs_val > sim_data._sci_note_upper_thres or + abs_val < sim_data._sci_note_lower_thres) \ + and abs_val != 0: + return sim_data.reg_format_str.format(val) + else: + return sim_data.sci_format_str.format(val) + elif is_cellid or (possible_cellid and isinstance(val, tuple)): + if DatumUtil.is_int(val): + return str(val + 1) + if len(val) > 0 and isinstance(val, str) and \ + val.lower() == 'none': + # handle case that cellid is 'none' + return val + if is_cellid and \ + data_dim.get_model_dim(None).model_name is not \ + None: + model_grid = data_dim.get_model_grid() + cellid_size = model_grid.get_num_spatial_coordinates() + if len(val) != cellid_size: + message = 'Cellid "{}" contains {} integer(s). Expected a' \ + ' cellid containing {} integer(s) for grid type' \ + ' {}.'.format(val, len(val), cellid_size, + str(model_grid.grid_type())) + type_, value_, traceback_ = sys.exc_info() + raise MFDataException( + data_dim.structure.get_model(), + data_dim.structure.get_package(), + data_dim.structure.path, + 'converting cellid to string', + data_dim.structure.name, inspect.stack()[0][3], + type_, value_, traceback_, message, + sim_data.debug) + + string_val = [] + if isinstance(val, str): + string_val.append(val) + else: + for item in val: + string_val.append(str(item + 1)) + return ' '.join(string_val) + elif data_type == DatumType.integer: + if data_item is not None and data_item.numeric_index: + if isinstance(val, str): + return str(int(val) + 1) + else: + return str(int(val)+1) + return str(int(val)) + elif data_type == DatumType.string: + try: + arr_val = val.split() + except AttributeError: + return str(val) + if len(arr_val) > 1: + # quote any string with spaces + string_val = "'{}'".format(val) + if data_item is not None and data_item.ucase: + return string_val.upper() + else: + return string_val + if data_item is not None and data_item.ucase: + return str(val).upper() + else: + return str(val) + + +class MFComment(object): + """ + Represents a variable in a MF6 input file + + + Parameters + ---------- + comment : string or list + comment to be displayed in output file + path : string + tuple representing location in the output file + line_number : integer + line number to display comment in output file + + Attributes + ---------- + comment : string or list + comment to be displayed in output file + path : string + tuple representing location in the output file + line_number : integer + line number to display comment in output file + + Methods + ------- + write : (file) + writes the comment to file + add_text(additional_text) + adds text to the comment + get_file_entry(eoln_suffix=True) + returns the comment text in the format to write to package files + is_empty(include_whitespace=True) + checks to see if comment is just an empty string ''. if + include_whitespace is set to false a string with only whitespace is + considered empty + is_comment(text, include_empty_line=False) : boolean + returns true if text is a comment. an empty line is considered a + comment if include_empty_line is true. + + See Also + -------- + + Notes + ----- + + Examples + -------- + + + """ + def __init__(self, comment, path, sim_data, line_number=0): + if not (isinstance(comment, str) or isinstance(comment, list) or + comment is None): + raise FlopyException('Comment "{}" not valid. Comment must be ' + 'of type str of list.'.format(comment)) + self.text = comment + self.path = path + self.line_number = line_number + self.sim_data = sim_data + + """ + Add text to the comment string. + + Parameters + ---------- + additional_text: string + text to add + """ + def add_text(self, additional_text): + if additional_text: + if isinstance(self.text, list): + self.text.append(additional_text) + else: + self.text = '{} {}'.format(self.text, additional_text) + + """ + Get the comment text in the format to write to package files. + + Parameters + ---------- + eoln_suffix: boolean + have comment text end with end of line character + Returns + ------- + string : comment text + """ + def get_file_entry(self, eoln_suffix=True): + file_entry = '' + if self.text and self.sim_data.comments_on: + if not isinstance(self.text, str) and isinstance(self.text, list): + file_entry = self._recursive_get(self.text) + else: + if self.text.strip(): + file_entry = self.text + if eoln_suffix: + file_entry = '{}\n'.format(file_entry) + return file_entry + + def _recursive_get(self, base_list): + file_entry = '' + if base_list and self.sim_data.comments_on: + for item in base_list: + if not isinstance(item, str) and isinstance(item, list): + file_entry = '{}{}'.format(file_entry, + self._recursive_get(item)) + else: + file_entry = '{} {}'.format(file_entry, item) + return file_entry + + """ + Write the comment text to a file. + + Parameters + ---------- + fd : file + file to write to + eoln_suffix: boolean + have comment text end with end of line character + """ + def write(self, fd, eoln_suffix=True): + if self.text and self.sim_data.comments_on: + if not isinstance(self.text, str) and isinstance(self.text, list): + self._recursive_write(fd, self.text) + else: + if self.text.strip(): + fd.write(self.text) + if eoln_suffix: + fd.write('\n') + + """ + Check for comment text + + Parameters + ---------- + include_whitespace : boolean + include whitespace as text + Returns + ------- + boolean : True if comment text exists + """ + def is_empty(self, include_whitespace=True): + if include_whitespace: + if self.text(): + return True + return False + else: + if self.text.strip(): + return True + return False + + """ + Check text to see if it is valid comment text + + Parameters + ---------- + text : string + potential comment text + include_empty_line : boolean + allow empty line to be valid + Returns + ------- + boolean : True if text is valid comment text + """ + @staticmethod + def is_comment(text, include_empty_line=False): + if not text: + return include_empty_line + if text and isinstance(text, list): + # look for comment mark in first item of list + text_clean = text[0].strip() + else: + text_clean = text.strip() + if include_empty_line and not text_clean: + return True + if text_clean and (text_clean[0] == '#' or text_clean[0] == '!' or + text_clean[0] == '//'): + return True + return False + + # recursively writes a nested list to a file + def _recursive_write(self, fd, base_list): + if base_list: + for item in base_list: + if not isinstance(item, str) and isinstance(item, list): + self._recursive_write(fd, item) + else: + fd.write(' {}'.format(item)) + + +class TemplateGenerator(object): + """ + Abstract base class for building a data template for different data types. + This is a generic class that is initialized with a path that identifies + the data to be built. + + Parameters + ---------- + path : string + tuple containing path of data is described in dfn files + (,,,) + """ + def __init__(self, path): + self.path = path + + def _get_data_dimensions(self, model): + from ..data import mfstructure + from ..coordinates import modeldimensions + + # get structure info + sim_struct = mfstructure.MFStructure().sim_struct + package_struct = sim_struct.get_data_structure(self.path[0:-2]) + + # get dimension info + data_struct = sim_struct.get_data_structure(self.path) + package_dim = modeldimensions.PackageDimensions([model.dimensions], + package_struct, + self.path[0:-1]) + return data_struct, modeldimensions.DataDimensions(package_dim, + data_struct) + + def build_type_header(self, ds_type, data=None): + from ..data.mfdatastorage import DataStorageType + + if ds_type == DataStorageType.internal_array: + if isinstance(self, ArrayTemplateGenerator): + return {'factor':1.0, 'iprn':1, 'data':data} + else: + return None + elif ds_type == DataStorageType.internal_constant: + return data + elif ds_type == DataStorageType.external_file: + return {'filename':'', 'factor':1.0, 'iprn':1} + return None + + +class ArrayTemplateGenerator(TemplateGenerator): + """ + Class that builds a data template for MFArrays. This is a generic class + that is initialized with a path that identifies the data to be built. + + Parameters + ---------- + path : string + tuple containing path of data is described in dfn files + (,,,) + + Methods + ------- + empty: (model: MFModel, layered: boolean, data_storage_type_list: boolean, + default_value: int/float) : variable + Builds a template for the data you need to specify for a specific data + type (ie. "hk") in a specific model. The data type and dimensions + is determined by "path" during initialization of this class and the + model is passed in to this method as the "model" parameter. If the + data is transient a dictionary containing a single stress period + will be returned. If "layered" is set to true, data will be returned + as a list ndarrays, one for each layer. data_storage_type_list is a + list of DataStorageType, one type for each layer. If "default_value" + is specified the data template will be populated with that value, + otherwise each ndarray in the data template will be populated with + np.empty (0 or 0.0 if the DataStorageType is a constant). + """ + def __init__(self, path): + super(ArrayTemplateGenerator, self).__init__(path) + + def empty(self, model=None, layered=False, data_storage_type_list=None, + default_value=None): + from ..data import mfdatastorage, mfstructure + from ..data.mfdatastorage import DataStorageType, DataStructureType + + # get the expected dimensions of the data + data_struct, data_dimensions = self._get_data_dimensions(model) + datum_type = data_struct.get_datum_type() + data_type = data_struct.get_datatype() + # build a temporary data storage object + data_storage = mfdatastorage.DataStorage( + model.simulation_data, model, data_dimensions, None, + DataStorageType.internal_array, + DataStructureType.recarray, data_path=self.path) + dimension_list = data_storage.get_data_dimensions(None) + + # if layered data + if layered and dimension_list[0] > 1: + if data_storage_type_list is not None and \ + len(data_storage_type_list) != dimension_list[0]: + comment = 'data_storage_type_list specified with the ' \ + 'wrong size. Size {} but expected to be ' \ + 'the same as the number of layers, ' \ + '{}.'.format(len(data_storage_type_list), + dimension_list[0]) + type_, value_, traceback_ = sys.exc_info() + + raise MFDataException(data_struct.get_model(), + data_struct.get_package(), + data_struct.path, + 'generating array template', + data_struct.name, + inspect.stack()[0][3], + type_, value_, traceback_, comment, + model.simulation_data.debug) + # build each layer + data_with_header = [] + for layer in range(0, dimension_list[0]): + # determine storage type + if data_storage_type_list is None: + data_storage_type = DataStorageType.internal_array + else: + data_storage_type = data_storage_type_list[layer] + # build data type header + data_with_header.append(self._build_layer(datum_type, + data_storage_type, + default_value, + dimension_list)) + else: + if data_storage_type_list is None or \ + data_storage_type_list[0] == \ + DataStorageType.internal_array: + data_storage_type = DataStorageType.internal_array + else: + data_storage_type = data_storage_type_list[0] + # build data type header + data_with_header = self._build_layer(datum_type, + data_storage_type, + default_value, + dimension_list, True) + + # if transient/multiple list + if data_type == mfstructure.DataType.array_transient: + # Return as dictionary + return {0:data_with_header} + else: + return data_with_header + + def _build_layer(self, data_type, data_storage_type, default_value, + dimension_list, all_layers=False): + from ..data.mfdatastorage import DataStorageType + + # build data + if data_storage_type == DataStorageType.internal_array: + if default_value is None: + if all_layers: + data = np.empty(dimension_list, data_type) + else: + data = np.empty(dimension_list[1:], data_type) + else: + if all_layers: + data = np.full(dimension_list, default_value, data_type) + else: + data = np.full(dimension_list[1:], default_value, + data_type) + elif data_storage_type == DataStorageType.internal_constant: + if default_value is None: + if data_type == np.int32: + data = 0 + else: + data = 0.0 + else: + data = default_value + else: + data = None + # build data type header + return self.build_type_header(data_storage_type, data) + + +class ListTemplateGenerator(TemplateGenerator): + """ + Class that builds a data template for MFLists. This is a generic class + that is initialized with a path that identifies the data to be built. + + Parameters + ---------- + path : string + tuple containing path of data is described in dfn files + (,,,) + + Methods + ------- + empty: (maxbound: int, aux_vars: list, boundnames: boolean, nseg: int) : + dictionary + Builds a template for the data you need to specify for a specific data + type (ie. "stress_period_data") in a specific model. The data type is + determined by "path" during initialization of this class. If the data + is transient a dictionary containing a single stress period will be + returned. The number of entries in the recarray are determined by + the "maxbound" parameter. The "aux_vars" parameter is a list of aux + var names to be used in this data list. If boundnames is set to + true and boundname field will be included in the recarray. nseg is + only used on list data that contains segments. If timeseries is true, + a template that is compatible with time series data is returned. + """ + def __init__(self, path): + super(ListTemplateGenerator, self).__init__(path) + + def _build_template_data(self, type_list): + template_data = [] + for type in type_list: + if type[1] == int: + template_data.append(0) + elif type[1] == float: + template_data.append(np.nan) + else: + template_data.append(None) + return tuple(template_data) + + def empty(self, model, maxbound=None, aux_vars=None, boundnames=False, + nseg=None, timeseries=False, stress_periods=None): + from ..data import mfdatastorage, mfstructure + + data_struct, data_dimensions = self._get_data_dimensions(model) + data_type = data_struct.get_datatype() + # build a temporary data storage object + data_storage = mfdatastorage.DataStorage( + model.simulation_data, model, data_dimensions, None, + mfdatastorage.DataStorageType.internal_array, + mfdatastorage.DataStructureType.recarray) + + # build type list + type_list = data_storage.build_type_list(nseg=nseg) + if aux_vars is not None: + if len(aux_vars) > 0 and (isinstance(aux_vars[0], list) or + isinstance(aux_vars[0], tuple)): + aux_vars = aux_vars[0] + for aux_var in aux_vars: + type_list.append((aux_var, object)) + if boundnames: + type_list.append(('boundname', object)) + + if timeseries: + # fix type list to make all types objects + for index, d_type in enumerate(type_list): + type_list[index] = (d_type[0], object) + + # build recarray + template_data = self._build_template_data(type_list) + rec_array_data = [] + if maxbound is not None: + for index in range(0, maxbound): + rec_array_data.append(template_data) + else: + rec_array_data.append(template_data) + rec_array = np.rec.array(rec_array_data, type_list) + + # if transient/multiple list + if data_type == mfstructure.DataType.list_transient or \ + data_type == mfstructure.DataType.list_multiple: + # Return as dictionary + if stress_periods is None: + return {0:rec_array} + else: + template = {} + for stress_period in stress_periods: + template[stress_period] = deepcopy(rec_array) + return template + else: + return rec_array + + +class MFDocString(object): + """ + Helps build a python class doc string + + Parameters + ---------- + description : string + description of the class + + Attributes + ---------- + indent: string + indent to use in doc string + description : string + description of the class + parameter_header : string + header for parameter section of doc string + parameters : list + list of docstrings for class parameters + + Methods + ------- + add_parameter : (param_descr : string, beginning_of_list : bool) + adds doc string for a parameter with description 'param_descr' to the + end of the list unless beginning_of_list is True + get_doc_string : () : string + builds and returns the docstring for the class + """ + def __init__(self, description): + self.indent = ' ' + self.description = description + self.parameter_header = '{}Parameters\n{}' \ + '----------'.format(self.indent, self.indent) + self.parameters = [] + self.model_parameters = [] + + def add_parameter(self, param_descr, beginning_of_list=False, + model_parameter=False): + if beginning_of_list: + self.parameters.insert(0, param_descr) + if model_parameter: + self.model_parameters.insert(0, param_descr) + else: + self.parameters.append(param_descr) + if model_parameter: + self.model_parameters.append(param_descr) + + def get_doc_string(self, model_doc_string=False): + doc_string = '{}"""\n{}{}\n\n{}\n'.format(self.indent, self.indent, + self.description, + self.parameter_header) + if model_doc_string: + param_list = self.model_parameters + doc_string = '{} modelname : string\n name of the ' \ + 'model\n model_nam_file : string\n' \ + ' relative path to the model name file from ' \ + 'model working folder\n version : string\n' \ + ' version of modflow\n exe_name : string\n'\ + ' model executable name\n' \ + ' model_ws : string\n' \ + ' model working folder path' \ + '\n'.format(doc_string) + else: + param_list = self.parameters + for parameter in param_list: + doc_string += '{}\n'.format(parameter) + if not model_doc_string: + doc_string += '\n{}"""'.format(self.indent) + return doc_string diff --git a/flopy/mf6/data/mffileaccess.py b/flopy/mf6/data/mffileaccess.py index 311bb9470b..7bb20ebb1d 100644 --- a/flopy/mf6/data/mffileaccess.py +++ b/flopy/mf6/data/mffileaccess.py @@ -1,1670 +1,1670 @@ -import sys, inspect -from copy import deepcopy -import numpy as np -from ..mfbase import MFDataException, VerbosityLevel -from ...utils.datautil import PyListUtil, find_keyword, DatumUtil, MultiListIter -from .mfdatautil import convert_data, to_string, MFComment -from ...utils.binaryfile import BinaryHeader -from ...utils import datautil -from ..data.mfstructure import DatumType, MFDataStructure, DataType - - -class MFFileAccess(object): - def __init__(self, structure, data_dimensions, simulation_data, path, - current_key): - self.structure = structure - self._data_dimensions = data_dimensions - self._simulation_data = simulation_data - self._path = path - self._current_key = current_key - - @staticmethod - def _get_bintype(modelgrid): - if modelgrid.grid_type == 'vertex': - return 'vardisv' - elif modelgrid.grid_type == 'unstructured': - return 'vardisu' - else: - return 'vardis' - - def _get_next_data_line(self, file_handle): - end_of_file = False - while not end_of_file: - line = file_handle.readline() - if line == '': - message = 'More data expected when reading {} from file ' \ - '{}'.format(self.structure.name, file_handle.name) - type_, value_, traceback_ = sys.exc_info() - raise MFDataException( - self.structure.get_model(), - self.structure.get_package(), - self.structure.path, 'reading data from file', - self.structure.name, inspect.stack()[0][3], - type_, value_, traceback_, message, - self._simulation_data.debug) - clean_line = line.strip() - # If comment or empty line - if not MFComment.is_comment(clean_line, True): - return datautil.PyListUtil.split_data_line(clean_line) - - def _read_pre_data_comments(self, line, file_handle, pre_data_comments, - storage): - line_num = 0 - if pre_data_comments: - storage.pre_data_comments = MFComment(pre_data_comments.text, - self._path, - self._simulation_data, - line_num) - else: - storage.pre_data_comments = None - - # read through any fully commented or empty lines - PyListUtil.reset_delimiter_used() - arr_line = PyListUtil.split_data_line(line) - while MFComment.is_comment(arr_line, True) and line != '': - if storage.pre_data_comments: - storage.pre_data_comments.add_text('\n') - storage.pre_data_comments.add_text(' '.join(arr_line)) - else: - storage.pre_data_comments = MFComment(arr_line, self._path, - self._simulation_data, - line_num) - - storage.add_data_line_comment(arr_line, line_num) - - line = file_handle.readline() - arr_line = PyListUtil.split_data_line(line) - return line - - def _get_aux_var_index(self, aux_name): - aux_var_index = None - # confirm whether the keyword found is an auxiliary variable name - aux_var_names = self._data_dimensions.package_dim.get_aux_variables() - if aux_var_names: - for aux_var_name, index in zip(aux_var_names[0], - range(0,len(aux_var_names[0]))): - if aux_name.lower() == aux_var_name.lower(): - aux_var_index = index - 1 - return aux_var_index - - def _load_keyword(self, arr_line, index_num, keyword): - aux_var_index = None - if keyword != '': - # verify keyword - keyword_found = arr_line[index_num].lower() - keyword_match = keyword.lower() == keyword_found - aux_var_names = None - if not keyword_match: - aux_var_index = self._get_aux_var_index(keyword_found) - if not keyword_match and aux_var_index is None: - aux_text = '' - if aux_var_names is not None: - aux_text = ' or auxiliary variables ' \ - '{}'.format(aux_var_names[0]) - message = 'Error reading variable "{}". Expected ' \ - 'variable keyword "{}"{} not found ' \ - 'at line "{}". {}'.format(self.structure.name, - keyword, - aux_text, - ' '.join(arr_line), - self._path) - type_, value_, traceback_ = sys.exc_info() - raise MFDataException( - self.structure.get_model(), - self.structure.get_package(), - self.structure.path, 'loading keyword', - self.structure.name, inspect.stack()[0][3], - type_, value_, traceback_, message, - self._simulation_data.debug) - return (index_num + 1, aux_var_index) - return (index_num, aux_var_index) - - def _open_ext_file(self, fname, binary=False, write=False): - model_dim = self._data_dimensions.package_dim.model_dim[0] - read_file = self._simulation_data.mfpath.resolve_path( - fname, model_dim.model_name) - if write: - options = 'w' - else: - options = 'r' - if binary: - options = '{}b'.format(options) - try: - fd = open(read_file, options) - return fd - except: - message = 'Unable to open file {} in mode {}. Make sure the ' \ - 'file is not locked and the folder exists' \ - '.'.format(read_file, options) - type_, value_, traceback_ = sys.exc_info() - raise MFDataException( - self._data_dimensions.structure.get_model(), - self._data_dimensions.structure.get_package(), - self._data_dimensions.structure.path, - 'opening external file for writing', - self._data_dimensions.structure.name, inspect.stack()[0][3], - type_, value_, traceback_, message, - self._simulation_data.debug) - - @staticmethod - def datum_to_numpy_type(datum_type): - if datum_type == DatumType.integer: - return np.int32, 'int' - elif datum_type == DatumType.double_precision: - return np.float64, 'double' - elif datum_type == DatumType.string or \ - datum_type == DatumType.keyword: - return np.str, 'str' - else: - return None, None - - -class MFFileAccessArray(MFFileAccess): - def __init__(self, structure, data_dimensions, simulation_data, path, - current_key): - super(MFFileAccessArray, self).__init__( - structure, data_dimensions, simulation_data, path, current_key) - - def write_binary_file(self, data, fname, text, modelgrid=None, - modeltime=None, stress_period=0, - precision='double', write_multi_layer=False): - data = self._resolve_cellid_numbers_to_file(data) - fd = self._open_ext_file(fname, binary=True, write=True) - if write_multi_layer: - for layer, value in enumerate(data): - self._write_layer(fd, value, modelgrid, modeltime, - stress_period, precision, text, fname, - layer+1) - else: - self._write_layer(fd, data, modelgrid, modeltime, stress_period, - precision, text, fname) - data.tofile(fd) - fd.close() - - def _write_layer(self, fd, data, modelgrid, modeltime, stress_period, - precision, text, fname, ilay=None): - header_data = self._get_header(modelgrid, modeltime, stress_period, - precision, text, fname, ilay) - header_data.tofile(fd) - data.tofile(fd) - - def _get_header(self, modelgrid, modeltime, stress_period, precision, text, - fname, ilay=None): - # handle dis (row, col, lay), disv (ncpl, lay), and disu (nodes) cases - if modelgrid is not None and modeltime is not None: - pertim = modeltime.perlen[stress_period] - totim = modeltime.perlen.sum() - if ilay is None: - ilay = modelgrid.nlay - if modelgrid.grid_type == 'structured': - return BinaryHeader.create( - bintype='vardis', precision=precision, text=text, - nrow=modelgrid.nrow, ncol=modelgrid.ncol, - ilay=ilay, pertim=pertim, - totim=totim, kstp=1, kper=stress_period+1) - elif modelgrid.grid_type == 'vertex': - if ilay is None: - ilay = modelgrid.nlay - return BinaryHeader.create( - bintype='vardisv', precision=precision, text=text, - ncpl=modelgrid.ncpl, ilay=ilay, m3=1, - pertim=pertim, totim=totim, kstp=1, - kper=stress_period) - elif modelgrid.grid_type == 'unstructured': - return BinaryHeader.create( - bintype='vardisu', precision=precision, text=text, - nodes=modelgrid.nnodes, m2=1, m3=1, - pertim=pertim, totim=totim, kstp=1, kper=stress_period) - else: - if ilay is None: - ilay = 1 - header = BinaryHeader.create( - bintype='vardis', precision=precision, text=text, - nrow=1, ncol=1, ilay=ilay, pertim=pertim, - totim=totim, kstp=1, kper=stress_period) - if self._simulation_data.verbosity_level.value >= \ - VerbosityLevel.normal.value: - print('Model grid does not have a valid type. Using ' - 'default spatial discretization header values for ' - 'binary file {}.'.format(fname)) - else: - pertim = np.float64(1.0) - header = BinaryHeader.create( - bintype='vardis', precision=precision, text=text, - nrow=1, ncol=1, ilay=1, pertim=pertim, - totim=pertim, kstp=1, kper=stress_period) - if self._simulation_data.verbosity_level.value >= \ - VerbosityLevel.normal.value: - print('Binary file data not part of a model. Using default ' - 'spatial discretization header values for binary file ' - '{}.'.format(fname)) - return header - - def write_text_file(self, data, fp, data_type, data_size): - try: - fd = open(fp, 'w') - except: - message = 'Unable to open file {}. Make sure the file ' \ - 'is not locked and the folder exists' \ - '.'.format(fp) - type_, value_, traceback_ = sys.exc_info() - raise MFDataException( - self._data_dimensions.structure.get_model(), - self._data_dimensions.structure.get_package(), - self._data_dimensions.structure.path, - 'opening external file for writing', - self.structure.name, inspect.stack()[0][3], type_, - value_, traceback_, message, - self._simulation_data.debug) - fd.write(self.get_data_string(data, data_type, '')) - fd.close() - - def read_binary_data_from_file(self, fname, data_shape, data_size, - data_type, modelgrid, - read_multi_layer=False): - import flopy.utils.binaryfile as bf - fd = self._open_ext_file(fname, True) - numpy_type, name = self.datum_to_numpy_type(data_type) - header_dtype = bf.BinaryHeader.set_dtype( - bintype=self._get_bintype(modelgrid), - precision='double') - if read_multi_layer and len(data_shape) > 1: - all_data = np.empty(data_shape, numpy_type) - headers = [] - layer_shape = data_shape[1:] - data_size = int(data_size / data_shape[0]) - for index in range(0, data_shape[0]): - layer_data = self._read_binary_file_layer( - fd, fname, header_dtype, numpy_type, data_size, layer_shape) - all_data[index, :] = layer_data[0] - headers.append(layer_data[1]) - fd.close() - return all_data, headers - else: - bin_data = self._read_binary_file_layer( - fd, fname, header_dtype, numpy_type, data_size, data_shape) - fd.close() - return bin_data - - def get_data_string(self, data, data_type, data_indent=''): - layer_data_string = ['{}'.format(data_indent)] - line_data_count = 0 - indent_str = self._simulation_data.indent_string - data_iter = datautil.PyListUtil.next_item(data) - is_cellid = self.structure.data_item_structures[0].numeric_index or \ - self.structure.data_item_structures[0].is_cellid - - jag_arr = self.structure.data_item_structures[0].jagged_array - jagged_def = None - jagged_def_index = 0 - if jag_arr is not None: - # get jagged array definition - jagged_def_path = self._path[0:-1] + (jag_arr,) - if jagged_def_path in self._simulation_data.mfdata: - jagged_def = self._simulation_data.mfdata[jagged_def_path].array - - for item, last_item, new_list, nesting_change in data_iter: - # increment data/layer counts - line_data_count += 1 - try: - data_lyr = to_string(item, data_type, - self._simulation_data, - self._data_dimensions, is_cellid) - except Exception as ex: - type_, value_, traceback_ = sys.exc_info() - comment = 'Could not convert data "{}" of type "{}" to a ' \ - 'string.'.format(item, data_type) - raise MFDataException(self.structure.get_model(), - self.structure.get_package(), - self._path, - 'converting data', - self.structure.name, - inspect.stack()[0][3], type_, - value_, traceback_, comment, - self._simulation_data.debug, ex) - layer_data_string[-1] = '{}{}{}'.format(layer_data_string[-1], - indent_str, - data_lyr) - - if jagged_def is not None: - if line_data_count == jagged_def[jagged_def_index]: - layer_data_string.append('{}'.format(data_indent)) - line_data_count = 0 - jagged_def_index += 1 - else: - if self._simulation_data.wrap_multidim_arrays and \ - (line_data_count == self._simulation_data. - max_columns_of_data or last_item): - layer_data_string.append('{}'.format(data_indent)) - line_data_count = 0 - if len(layer_data_string) > 0: - # clean up the text at the end of the array - layer_data_string[-1] = layer_data_string[-1].strip() - if len(layer_data_string) == 1: - return '{}{}\n'.format(data_indent, layer_data_string[0].rstrip()) - else: - return '\n'.join(layer_data_string) - - def _read_binary_file_layer(self, fd, fname, header_dtype, numpy_type, - data_size, data_shape): - header_data = np.fromfile(fd, dtype=header_dtype, count=1) - data = np.fromfile(fd, dtype=numpy_type, count=data_size) - data = self._resolve_cellid_numbers_from_file(data) - if data.size != data_size: - message = 'Binary file {} does not contain expected data. ' \ - 'Expected array size {} but found size ' \ - '{}.'.format(fname, data_size, data.size) - type_, value_, traceback_ = sys.exc_info() - raise MFDataException( - self._data_dimensions.structure.get_model(), - self._data_dimensions.structure.get_package(), - self._data_dimensions.structure.path, - 'opening external file for writing', - self.structure.name, inspect.stack()[0][3], type_, - value_, traceback_, message, - self._simulation_data.debug) - return data.reshape(data_shape), header_data - - def read_text_data_from_file(self, data_size, data_type, data_dim, layer, - fname=None, fd=None, data_item=None): - # load variable data from file - current_size = 0 - if layer is None: - layer = 0 - close_file = False - if fd is None: - close_file = True - fd = self._open_ext_file(fname) - data_raw = [] - line = ' ' - PyListUtil.reset_delimiter_used() - while line != '' and len(data_raw) < data_size: - line = fd.readline() - arr_line = PyListUtil.split_data_line(line, True) - if not MFComment.is_comment(arr_line, True): - data_raw += arr_line - else: - PyListUtil.reset_delimiter_used() - - if len(data_raw) < data_size: - message = 'Not enough data in file {} for data "{}". ' \ - 'Expected data size {} but only found ' \ - '{}.'.format(fd.name, - self._data_dimensions.structure.name, - data_size, current_size) - type_, value_, traceback_ = sys.exc_info() - if close_file: - fd.close() - raise MFDataException( - self._data_dimensions.structure.get_model(), - self._data_dimensions.structure.get_package(), - self._data_dimensions.structure.path, - 'reading data file', - self._data_dimensions.structure.name, - inspect.stack()[0][3], type_, value_, - traceback_, message, - self._simulation_data.debug) - - if data_type == DatumType.double_precision: - data_type = np.float64 - elif data_type == DatumType.integer: - data_type = np.int32 - - data_out = np.fromiter(data_raw, dtype=data_type, - count=data_size) - data_out = self._resolve_cellid_numbers_from_file(data_out) - if close_file: - fd.close() - - data_out = np.reshape(data_out, data_dim) - return data_out, current_size - - def load_from_package(self, first_line, file_handle, layer_shape, - storage, keyword, pre_data_comments=None): - # read in any pre data comments - current_line = self._read_pre_data_comments(first_line, file_handle, - pre_data_comments, storage) - datautil.PyListUtil.reset_delimiter_used() - arr_line = datautil.PyListUtil.\ - split_data_line(current_line) - package_dim = self._data_dimensions.package_dim - if len(arr_line) > 2: - # check for time array series - if arr_line[1].upper() == 'TIMEARRAYSERIES': - storage.set_tas(arr_line[2], arr_line[1], self._current_key) - return layer_shape, [False, None] - if not self.structure.data_item_structures[0].just_data: - # verify keyword - index_num, aux_var_index = self._load_keyword(arr_line, 0, keyword) - else: - index_num = 0 - aux_var_index = None - - # TODO: Add species support - # if layered supported, look for layered flag - if self.structure.layered or aux_var_index is not None: - if (len(arr_line) > index_num and - arr_line[index_num].lower() == 'layered'): - storage.layered = True - try: - layers = layer_shape - except Exception as ex: - type_, value_, traceback_ = sys.exc_info() - raise MFDataException(self.structure.get_model(), - self.structure.get_package(), - self._path, - 'resolving layer dimensions', - self.structure.name, - inspect.stack()[0][3], type_, - value_, traceback_, None, - self._simulation_data.debug, ex) - if len(layers) > 0: - storage.init_layers(layers) - elif aux_var_index is not None: - # each layer stores a different aux variable - layers = len(package_dim.get_aux_variables()[0]) - 1 - layer_shape = (layers,) - storage.layered = True - while storage.layer_storage.list_shape[0] < layers: - storage.add_layer() - else: - storage.flatten() - try: - dimensions = storage.get_data_dimensions( - layer_shape) - except Exception as ex: - type_, value_, traceback_ = sys.exc_info() - comment = 'Could not get data shape for key "{}".'.format( - self._current_key) - raise MFDataException(self.structure.get_model(), - self.structure.get_package(), - self._path, - 'getting data shape', - self.structure.name, - inspect.stack()[0][3], type_, - value_, traceback_, comment, - self._simulation_data.debug, ex) - layer_size = 1 - for dimension in dimensions: - layer_size *= dimension - - if aux_var_index is None: - # loop through the number of layers - for layer in storage.layer_storage.indexes(): - self._load_layer(layer, layer_size, storage, arr_line, - file_handle, layer_shape) - else: - # write the aux var to it's unique index - self._load_layer((aux_var_index,), layer_size, storage, arr_line, - file_handle, layer_shape) - return layer_shape, [False, None] - - def _load_layer(self, layer, layer_size, storage, arr_line, file_handle, - layer_shape): - di_struct = self.structure.data_item_structures[0] - if not di_struct.just_data or datautil.max_tuple_abs_size(layer) > 0: - arr_line = self._get_next_data_line(file_handle) - - layer_storage = storage.layer_storage[layer] - # if constant - if arr_line[0].upper() == 'CONSTANT': - if len(arr_line) < 2: - message = 'MFArray "{}" contains a CONSTANT that is not ' \ - 'followed by a number.'.format(self.structure.name) - type_, value_, traceback_ = sys.exc_info() - raise MFDataException(self.structure.get_model(), - self.structure.get_package(), - self._path, - 'loading data layer from file', - self.structure.name, - inspect.stack()[0][3], type_, - value_, traceback_, message, - self._simulation_data.debug) - # store data - layer_storage.set_internal_constant() - try: - storage.store_internal([convert_data( - arr_line[1], self._data_dimensions, self.structure.type, - di_struct)], layer, const=True) - except Exception as ex: - type_, value_, traceback_ = sys.exc_info() - raise MFDataException(self.structure.get_model(), - self.structure.get_package(), - self._path, - 'storing data', - self.structure.name, - inspect.stack()[0][3], type_, - value_, traceback_, None, - self._simulation_data.debug, ex) - # store anything else as a comment - if len(arr_line) > 2: - layer_storage.comments = \ - MFComment(' '.join(arr_line[2:]), self._path, - self._simulation_data, layer) - # if internal - elif arr_line[0].upper() == 'INTERNAL': - if len(arr_line) < 2: - message = 'Data array "{}" contains a INTERNAL that is not ' \ - 'followed by a multiplier' \ - '.'.format(self.structure.name) - type_, value_, traceback_ = sys.exc_info() - raise MFDataException(self.structure.get_model(), - self.structure.get_package(), - self._path, - 'loading data layer from file', - self.structure.name, - inspect.stack()[0][3], type_, - value_, traceback_, message, - self._simulation_data.debug) - - try: - multiplier, print_format = \ - storage.process_internal_line(arr_line) - except Exception as ex: - type_, value_, traceback_ = sys.exc_info() - raise MFDataException(self.structure.get_model(), - self.structure.get_package(), - self._path, - 'processing line of data', - self.structure.name, - inspect.stack()[0][3], type_, - value_, traceback_, None, - self._simulation_data.debug, ex) - storage.layer_storage[layer].set_internal_array() - - # store anything else as a comment - if len(arr_line) > 5: - layer_storage.comments = \ - MFComment(' '.join(arr_line[5:]), self._path, - self._simulation_data, layer) - - try: - # load variable data from current file - if multiplier is not None: - storage.layer_storage[layer].factor = multiplier - if print_format is not None: - storage.layer_storage[layer].iprn = print_format - data_type = storage.data_dimensions.structure.\ - get_datum_type(True) - data_from_file = self.read_text_data_from_file( - storage.get_data_size(layer), data_type, - storage.get_data_dimensions(layer), layer, - fd=file_handle) - except Exception as ex: - type_, value_, traceback_ = sys.exc_info() - raise MFDataException(self.structure.get_model(), - self.structure.get_package(), - self._path, - 'reading data from file ' - '{}'.format(file_handle.name), - self.structure.name, - inspect.stack()[0][3], type_, - value_, traceback_, None, - self._simulation_data.debug, ex) - data_shaped = self._resolve_data_shape(data_from_file[0], - layer_shape, storage) - try: - storage.store_internal(data_shaped, layer, const=False, - multiplier=[multiplier], - print_format=print_format) - except Exception as ex: - comment = 'Could not store data: "{}"'.format(data_shaped) - type_, value_, traceback_ = sys.exc_info() - raise MFDataException(self.structure.get_model(), - self.structure.get_package(), - self._path, - 'storing data', - self.structure.name, - inspect.stack()[0][3], type_, - value_, traceback_, comment, - self._simulation_data.debug, ex) - elif arr_line[0].upper() == 'OPEN/CLOSE': - try: - storage.process_open_close_line(arr_line, layer) - except Exception as ex: - comment = 'Could not open open/close file specified by' \ - ' "{}".'.format(' '.join(arr_line)) - type_, value_, traceback_ = sys.exc_info() - raise MFDataException(self.structure.get_model(), - self.structure.get_package(), - self._path, - 'storing data', - self.structure.name, - inspect.stack()[0][3], type_, - value_, traceback_, comment, - self._simulation_data.debug, ex) - - def _is_cellid_or_numeric_index(self): - if self.structure.data_item_structures[0].numeric_index or \ - self.structure.data_item_structures[0].is_cellid: - return True - return False - - def _resolve_cellid_numbers_to_file(self, data): - if self._is_cellid_or_numeric_index(): - return abs(data) + 1 - else: - return data - - def _resolve_cellid_numbers_from_file(self, data): - if self._is_cellid_or_numeric_index(): - return abs(data) - 1 - else: - return data - - def _resolve_data_shape(self, data, layer_shape, storage): - try: - dimensions = storage.get_data_dimensions(layer_shape) - except Exception as ex: - type_, value_, traceback_ = sys.exc_info() - comment = 'Could not get data shape for key "{}".'.format( - self._current_key) - raise MFDataException(self.structure.get_model(), - self.structure.get_package(), - self._path, - 'getting data shape', - self.structure.name, - inspect.stack()[0][3], type_, - value_, traceback_, comment, - self._simulation_data.debug, ex) - if isinstance(data, list) or isinstance(data, np.ndarray): - try: - return np.reshape(data, dimensions).tolist() - except Exception as ex: - type_, value_, traceback_ = sys.exc_info() - comment = 'Could not reshape data to dimensions ' \ - '"{}".'.format(dimensions) - raise MFDataException(self.structure.get_model(), - self.structure.get_package(), - self._path, - 'reshaping data', - self.structure.name, - inspect.stack()[0][3], type_, - value_, traceback_, comment, - self._simulation_data.debug, ex) - else: - return data - - -class MFFileAccessList(MFFileAccess): - def __init__(self, structure, data_dimensions, simulation_data, path, - current_key): - super(MFFileAccessList, self).__init__( - structure, data_dimensions, simulation_data, path, current_key) - - def read_binary_data_from_file(self, read_file, modelgrid, - precision='double'): - # read from file - header, int_cellid_indexes, \ - ext_cellid_indexes = self._get_header(modelgrid, precision) - file_array = np.fromfile(read_file, dtype=header, count=-1) - # build data list for recarray - cellid_size = len(self._get_cell_header(modelgrid)) - data_list = [] - for record in file_array: - data_record = () - current_cellid_size = 0 - current_cellid = () - for index, data_item in enumerate(record): - if index in ext_cellid_indexes: - current_cellid += (data_item - 1,) - current_cellid_size += 1 - if current_cellid_size == cellid_size: - data_record += current_cellid - data_record = (data_record,) - current_cellid = () - current_cellid_size = 0 - else: - data_record += (data_item,) - data_list.append(data_record) - return data_list - - def write_binary_file(self, data, fname, modelgrid=None, - precision='double'): - fd = self._open_ext_file(fname, binary=True, write=True) - data_array = self._build_data_array(data, modelgrid, precision) - data_array.tofile(fd) - fd.close() - - def _build_data_array(self, data, modelgrid, precision): - header, int_cellid_indexes,\ - ext_cellid_indexes = self._get_header(modelgrid, precision) - data_list = [] - for record in data: - new_record = () - for index, column in enumerate(record): - if index in int_cellid_indexes: - if isinstance(column, int): - new_record += (column + 1,) - else: - for item in column: - new_record += (item + 1,) - else: - new_record += (column,) - data_list.append(new_record) - return np.array(data_list, dtype=header) - - def _get_header(self, modelgrid, precision): - np_flt_type = np.float64 - header = [] - int_cellid_indexes = {} - ext_cellid_indexes = {} - ext_index = 0 - for index, di_struct in enumerate(self.structure.data_item_structures): - if di_struct.is_cellid: - cell_header = self._get_cell_header(modelgrid) - header += cell_header - int_cellid_indexes[index] = True - for index in range(ext_index, ext_index + len(cell_header)): - ext_cellid_indexes[index] = True - ext_index += len(cell_header) - elif not di_struct.optional: - header.append((di_struct.name, np_flt_type)) - ext_index += 1 - elif di_struct.name == 'aux': - aux_var_names = self._data_dimensions.package_dim.\ - get_aux_variables() - if aux_var_names is not None: - for aux_var_name in aux_var_names[0]: - if aux_var_name.lower() != 'auxiliary': - header.append((aux_var_name, np_flt_type)) - ext_index += 1 - return header, int_cellid_indexes, ext_cellid_indexes - - def _get_cell_header(self, modelgrid): - if modelgrid.grid_type == 'structured': - return [('layer', np.int32), ('row', np.int32), ('col', np.int32)] - elif modelgrid.grid_type == 'vertex_layered': - return [('layer', np.int32), ('ncpl', np.int32)] - else: - return [('nodes', np.int32)] - - def load_from_package(self, first_line, file_handle, storage, - pre_data_comments=None): - # lock things to maximize performance - self._data_dimensions.lock() - self._last_line_info = [] - self._data_line = None - - # read in any pre data comments - current_line = self._read_pre_data_comments(first_line, file_handle, - pre_data_comments, storage) - # reset data line delimiter so that the next split_data_line will - # automatically determine the delimiter - datautil.PyListUtil.reset_delimiter_used() - arr_line = datautil.PyListUtil.split_data_line(current_line) - if arr_line and (len(arr_line[0]) >= 2 and - arr_line[0][:3].upper() == 'END'): - return [False, arr_line] - if len(arr_line) >= 2 and arr_line[0].upper() == 'OPEN/CLOSE': - try: - storage.process_open_close_line(arr_line, (0,)) - except Exception as ex: - message = 'An error occurred while processing the following ' \ - 'open/close line: {}'.format(current_line) - type_, value_, traceback_ = sys.exc_info() - raise MFDataException(self.structure.get_model(), - self.structure.get_package(), self._path, - 'processing open/close line', - self.structure.name, - inspect.stack()[0][3], type_, - value_, traceback_, message, - self._simulation_data.debug, ex) - else: - have_newrec_line, newrec_line, self._data_line =\ - self.read_list_data_from_file(file_handle, storage, - self._current_key, - current_line, self._data_line) - return [have_newrec_line, newrec_line] - - # loop until end of block - line = ' ' - while line != '': - arr_line = self._get_next_data_line(file_handle) - if arr_line and (len(arr_line[0]) >= 2 and - arr_line[0][:3].upper() == 'END'): - # end of block - self._data_dimensions.unlock() - return [False, line] - self._data_dimensions.unlock() - return [False, None] - - def read_list_data_from_file(self, file_handle, storage, current_key, - current_line=None, data_line=None, - store_internal=True): - data_rec = None - data_loaded = [] - self._temp_dict = {} - self._last_line_info = [] - store_data = False - struct = self.structure - self.simple_line = \ - len(self._data_dimensions.package_dim.get_tsnames()) == 0 and \ - not struct.is_mname - for data_item in struct.data_item_structures: - if data_item.optional and data_item.name != 'boundname' and \ - data_item.name != 'aux': - self.simple_line = False - if current_line is None: - current_line = file_handle.readline() - PyListUtil.reset_delimiter_used() - arr_line = PyListUtil.split_data_line(current_line) - line_num = 0 - # read any pre-data commented lines - while current_line and MFComment.is_comment(arr_line, True): - arr_line.insert(0, '\n') - storage.add_data_line_comment(arr_line, line_num) - PyListUtil.reset_delimiter_used() - current_line = file_handle.readline() - arr_line = PyListUtil.split_data_line(current_line) - - try: - data_line = self._load_list_line( - storage, arr_line, line_num, data_loaded, True, - current_key=current_key, data_line=data_line)[1:] - line_num += 1 - store_data = True - except MFDataException as err: - # this could possibly be a constant line. - line = file_handle.readline() - arr_line = PyListUtil.split_data_line(line) - if len(arr_line) >= 2 and arr_line[0].upper() == 'CONSTANT' \ - and len(struct.data_item_structures) >= 2 and \ - struct.data_item_structures[0].name.upper() \ - == 'CELLID': - # store first line as a comment - if storage.pre_data_comments is None: - storage.pre_data_comments = \ - MFComment(current_line, struct.path, - self._simulation_data, 0) - else: - storage.pre_data_comments.add_text(current_line) - # store constant value for all cellids - storage.layer_storage.first_item().set_internal_constant() - if store_internal: - storage.store_internal( - convert_data(arr_line[1], self._data_dimensions, - struct.data_item_structures[1].type, - struct.data_item_structures[0]), - 0, const=True) - else: - data_rec = storage._build_recarray(arr_line[1], None, - True) - line = ' ' - while line != '': - line = file_handle.readline() - arr_line = PyListUtil.split_data_line(line) - if arr_line and (len(arr_line[0]) >= 2 and - arr_line[0][:3].upper() == 'END'): - return [False, line, data_line] - else: - # not a constant or open/close line, exception is valid - comment = 'Unable to process line 1 of data list: ' \ - '"{}"'.format(current_line) - type_, value_, traceback_ = sys.exc_info() - raise MFDataException(struct.get_model(), struct.get_package(), - struct.path, - 'loading data list from ' - 'package file', - struct.name, - inspect.stack()[0][3], type_, - value_, traceback_, comment, - self._simulation_data.debug, err) - - if struct.type == DatumType.record or struct.type == DatumType.string: - # records only contain a single line - storage.append_data(data_loaded) - storage.data_dimensions.unlock() - return [False, None, data_line] - - # get block recarray list for later processing - recarrays = [] - parent_block = struct.parent_block - if parent_block is not None: - recarrays = parent_block.get_all_recarrays() - recarray_len = len(recarrays) - - # loop until end of block - line = ' ' - optional_line_info = [] - line_info_processed = False - data_structs = struct.data_item_structures - while line != '': - line = file_handle.readline() - arr_line = PyListUtil.split_data_line(line) - if not line or (arr_line and len(arr_line[0]) >= 2 and - arr_line[0][:3].upper() == 'END'): - # end of block - if store_data: - if store_internal: - # store as rec array - storage.store_internal(data_loaded, None, False, - current_key) - storage.data_dimensions.unlock() - return [False, line, data_line] - else: - data_rec = storage._build_recarray(data_loaded, - current_key, True) - storage.data_dimensions.unlock() - return data_rec - if recarray_len != 1 and \ - not MFComment.is_comment(arr_line, True): - key = find_keyword(arr_line, struct.get_keywords()) - if key is None: - # unexpected text, may be start of another record - if store_data: - if store_internal: - storage.store_internal(data_loaded, None, False, - current_key) - storage.data_dimensions.unlock() - return [True, line, data_line] - else: - data_rec = storage._build_recarray(data_loaded, - current_key, - True) - storage.data_dimensions.unlock() - return data_rec - self.simple_line = self.simple_line \ - and self.structure.package_type != 'sfr' - if self.simple_line: - line_len = len(self._last_line_info) - if struct.num_optional > 0 and not line_info_processed: - line_info_processed = True - for index, data_item in \ - enumerate(struct.data_item_structures): - if index < line_len: - if data_item.optional: - self._last_line_info = \ - self._last_line_info[:index] - line_len = len(self._last_line_info) - optional_line_info.append(data_item) - else: - optional_line_info.append(data_item) - if MFComment.is_comment(arr_line, - True): - arr_line.insert(0, '\n') - storage.add_data_line_comment(arr_line, line_num) - else: - # do higher performance quick load - self._data_line = () - cellid_index = 0 - cellid_tuple = () - data_index = 0 - for index, entry in enumerate(self._last_line_info): - for sub_entry in entry: - if sub_entry[1] is not None: - if sub_entry[2] > 0: - # is a cellid - cellid_tuple += \ - (int(arr_line[sub_entry[0]]) - 1,) - # increment index - cellid_index += 1 - if cellid_index == sub_entry[2]: - # end of current cellid - self._data_line += (cellid_tuple,) - cellid_index = 0 - cellid_tuple = () - else: - # not a cellid - self._data_line += (convert_data( - arr_line[sub_entry[0]], - self._data_dimensions, - sub_entry[1], - data_structs[index]),) - else: - self._data_line += (None,) - data_index = sub_entry[0] - arr_line_len = len(arr_line) - if arr_line_len > data_index + 1: - # more data on the end of the line. see if it can - # be loaded as optional data - data_index += 1 - for data_item in struct.data_item_structures[ - len(self._last_line_info):]: - if arr_line_len <= data_index: - break - if len(arr_line[data_index]) > 0 and \ - arr_line[data_index][0] == '#': - break - elif data_item.name == 'aux': - data_index, self._data_line = \ - self._process_aux( - storage, arr_line, arr_line_len, - data_item, data_index, None, - current_key, self._data_line, - False)[0:2] - elif data_item.name == 'boundname' and \ - self._data_dimensions.package_dim.\ - boundnames(): - self._data_line += (convert_data( - arr_line[data_index], - self._data_dimensions, - data_item.type, - data_item),) - if arr_line_len > data_index + 1: - # FEATURE: Keep number of white space characters used - # in comments section - storage.comments[line_num] = MFComment( - ' '.join(arr_line[data_index + 1:]), struct.path, - self._simulation_data, line_num) - - data_loaded.append(self._data_line) - else: - try: - data_line = self._load_list_line( - storage, arr_line, line_num, data_loaded, False, - current_key=current_key, data_line=data_line)[1] - except Exception as ex: - comment = 'Unable to process line {} of data list: ' \ - '"{}"'.format(line_num + 1, line) - type_, value_, traceback_ = sys.exc_info() - raise MFDataException(struct.get_model(), - struct.get_package(), - struct.path, - 'loading data list from ' - 'package file', - struct.name, - inspect.stack()[0][3], type_, - value_, traceback_, comment, - self._simulation_data.debug, ex) - line_num += 1 - if store_data: - # store as rec array - storage.store_internal(data_loaded, None, False, current_key) - storage.data_dimensions.unlock() - if not store_internal: - return data_rec - else: - return [False, None, data_line] - - def _load_list_line(self, storage, arr_line, line_num, data_loaded, - build_type_list, current_key, data_index_start=0, - data_set=None, ignore_optional_vars=False, - data_line=None): - data_item_ks = None - struct = self.structure - org_data_line = data_line - # only initialize if we are at the start of a new line - if data_index_start == 0: - data_set = struct - # new line of data - data_line = () - # determine if at end of block - if arr_line and arr_line[0][:3].upper() == 'END': - self.enabled = True - return 0, data_line - data_index = data_index_start - arr_line_len = len(arr_line) - if MFComment.is_comment(arr_line, True) and data_index_start == 0: - arr_line.insert(0, '\n') - storage.add_data_line_comment(arr_line, line_num) - else: - # read variables - var_index = 0 - data = '' - for data_item_index, data_item in \ - enumerate(data_set.data_item_structures): - if not data_item.optional or not ignore_optional_vars: - if data_item.name == 'aux': - data_index, data_line = \ - self._process_aux(storage, arr_line, arr_line_len, - data_item, data_index, var_index, - current_key, data_line)[0:2] - # optional mname data items are only specified if the - # package is part of a model - elif not data_item.optional or \ - data_item.name[0:5] != 'mname' or \ - not storage.in_model: - if data_item.type == DatumType.keyword: - data_index += 1 - self.simple_line = False - elif data_item.type == DatumType.record: - # this is a record within a record, recurse into - # _load_line to load it - data_index, data_line = \ - self._load_list_line( - storage, arr_line, line_num, data_loaded, - build_type_list, current_key, data_index, - data_item, False, data_line=data_line) - self.simple_line = False - elif data_item.name != 'boundname' or \ - self._data_dimensions.package_dim.boundnames(): - if data_item.optional and data == '#': - # comment mark found and expecting optional - # data_item, we are done - break - if data_index >= arr_line_len and data_item.optional: - break - more_data_expected = True - unknown_repeats = False - repeat_count = 0 - while more_data_expected or unknown_repeats: - if data_index >= arr_line_len: - if data_item.optional or unknown_repeats: - break - elif struct.num_optional >= \ - len(data_set.data_item_structures)\ - - data_item_index: - # there are enough optional variables - # to account for the lack of data - # reload line with all optional - # variables ignored - data_line = org_data_line - return self._load_list_line( - storage, arr_line, line_num, - data_loaded, build_type_list, - current_key, data_index_start, - data_set, True, data_line=data_line) - else: - comment = 'Not enough data provided ' \ - 'for {}. Data for required ' \ - 'data item "{}" not ' \ - 'found'.format(struct.name, - data_item. - name) - type_, value_, \ - traceback_ = sys.exc_info() - raise MFDataException( - struct.get_model(), - struct.get_package(), - struct.path, - 'loading data list from ' - 'package file', - struct.name, - inspect.stack()[0][3], type_, - value_, traceback_, comment, - self._simulation_data.debug) - - data = arr_line[data_index] - repeat_count += 1 - if data_item.type == DatumType.keystring: - self.simple_line = False - if repeat_count <= 1: # only process the - # keyword on the first repeat find - # data item associated with correct - # keystring - name_data = data.lower() - if name_data not in \ - data_item.keystring_dict: - name_data = '{}record'.format( - name_data) - if name_data not in \ - data_item.keystring_dict: - # data does not match any - # expected keywords - if self._simulation_data.\ - verbosity_level.value >= \ - VerbosityLevel.normal.\ - value: - print('WARNING: Failed to ' - 'process line {}. ' - 'Line does not match' - ' expected keystring' - ' {}'.format( - ' '.join(arr_line), - data_item.name)) - break - data_item_ks = \ - data_item.keystring_dict[ - name_data] - if data_item_ks == 0: - comment = 'Could not find ' \ - 'keystring ' \ - '{}.'.format(name_data) - type_, value_, \ - traceback_ = sys.exc_info() - raise MFDataException( - struct.get_model(), - struct.get_package(), - struct.path, - 'loading data list from ' - 'package file', - struct.name, - inspect.stack()[0][3], type_, - value_, traceback_, comment, - self._simulation_data.debug) - - # keyword is always implied in a - # keystring and should be stored, - # add a string data_item for the - # keyword - if data_item.name in \ - self._temp_dict: - # used cached data item for - # performance - keyword_data_item = \ - self._temp_dict[data_item.name] - else: - keyword_data_item = \ - deepcopy(data_item) - keyword_data_item.type = \ - DatumType.string - self._temp_dict[data_item.name] \ - = keyword_data_item - data_index, more_data_expected, \ - data_line, unknown_repeats = \ - self._append_data_list( - storage, - keyword_data_item, - arr_line, - arr_line_len, - data_index, - var_index, - repeat_count, - current_key, - data_line) - if isinstance(data_item_ks, - MFDataStructure): - dis = \ - data_item_ks.data_item_structures - for ks_data_item in dis: - if ks_data_item.type != \ - DatumType.keyword \ - and data_index < \ - arr_line_len: - # data item contains additional - # information - data_index, more_data_expected, \ - data_line, unknown_repeats = \ - self._append_data_list( - storage, - ks_data_item, - arr_line, - arr_line_len, - data_index, - var_index, - repeat_count, - current_key, - data_line) - while data_index < arr_line_len: - try: - # append remaining data - # (temporary fix) - data_index, more_data_expected, \ - data_line, unknown_repeats = \ - self._append_data_list( - storage, - ks_data_item, - arr_line, - arr_line_len, - data_index, - var_index, - repeat_count, - current_key, - data_line) - except MFDataException: - break - else: - if data_item_ks.type != \ - DatumType.keyword: - data_index, more_data_expected, \ - data_line, unknown_repeats = \ - self._append_data_list( - storage, data_item_ks, arr_line, - arr_line_len, data_index, - var_index, repeat_count, - current_key, data_line) - else: - # append empty data as a placeholder. - # this is necessarily to keep the - # recarray a consistent shape - data_line = \ - data_line + (None,) - data_index += 1 - else: - if data_item.tagged and repeat_count == 1: - # data item tagged, include data item - # name as a keyword - di_type = data_item.type - data_item.type = DatumType.keyword - data_index, more_data_expected, \ - data_line, unknown_repeats = \ - self._append_data_list( - storage, data_item, arr_line, - arr_line_len, data_index, - var_index, repeat_count, - current_key, data_line) - data_item.type = di_type - data_index, more_data_expected, \ - data_line, unknown_repeats = \ - self._append_data_list( - storage, data_item, arr_line, - arr_line_len, data_index, var_index, - repeat_count, current_key, - data_line) - if more_data_expected is None: - # indeterminate amount of data expected. - # keep reading data until eoln - more_data_expected = \ - (data_index < arr_line_len) - self.simple_line = self.simple_line and \ - not unknown_repeats and \ - (len(data_item.shape) == 0 or - data_item.is_cellid) - var_index += 1 - - # populate unused optional variables with None type - for data_item in data_set.data_item_structures[var_index:]: - if data_item.name == 'aux': - data_line = self._process_aux( - storage, arr_line, arr_line_len, data_item, data_index, - var_index, current_key, data_line)[1] - elif data_item.name != 'boundname' or \ - self._data_dimensions.package_dim.boundnames(): - data_index, more_data_expected, data_line, \ - unknown_repeats = self._append_data_list( - storage, data_item, None, 0, data_index, var_index, 1, - current_key, data_line) - - # only do final processing on outer-most record - if data_index_start == 0: - # if more pieces exist - if arr_line_len > data_index + 1: - # FEATURE: Keep number of white space characters used in - # comments section - storage.comments[line_num] = MFComment( - ' '.join(arr_line[data_index+1:]), struct.path, - self._simulation_data, line_num) - data_loaded.append(data_line) - return data_index, data_line - - def _process_aux(self, storage, arr_line, arr_line_len, data_item, - data_index, var_index, current_key, data_line, - add_to_last_line=True): - aux_var_names = self._data_dimensions.package_dim.get_aux_variables() - more_data_expected = False - if aux_var_names is not None: - for var_name in aux_var_names[0]: - if var_name.lower() != 'auxiliary': - if data_index >= arr_line_len: - # store placeholder None - data_index, more_data_expected, data_line = \ - self._append_data_list( - storage, data_item, None, 0, data_index, - var_index, 1, current_key, data_line, - add_to_last_line)[0:3] - else: - # read in aux variables - data_index, more_data_expected, data_line = \ - self._append_data_list( - storage, data_item, arr_line, arr_line_len, - data_index, var_index, 0, current_key, - data_line, add_to_last_line)[0:3] - return data_index, data_line, more_data_expected - - def _append_data_list(self, storage, data_item, arr_line, arr_line_len, - data_index, var_index, repeat_count, current_key, - data_line, add_to_last_line=True): - # append to a 2-D list which will later be converted to a numpy - # rec array - struct = self.structure - if add_to_last_line: - self._last_line_info.append([]) - if data_item.is_cellid or (data_item.possible_cellid and - storage._validate_cellid( - arr_line, data_index)): - if self._data_dimensions is None: - comment = 'CellID field specified in for data ' \ - '"{}" field "{}" which does not contain a model '\ - 'grid. This could be due to a problem with ' \ - 'the flopy definition files. Please get the ' \ - 'latest flopy definition files' \ - '.'.format(struct.name, data_item.name) - type_, value_, traceback_ = sys.exc_info() - raise MFDataException(struct.get_model(), - struct.get_package(), struct.path, - 'loading data list from package file', - struct.name, - inspect.stack()[0][3], type_, value_, - traceback_, comment, - self._simulation_data.debug) - # read in the entire cellid - model_grid = self._data_dimensions.get_model_grid() - cellid_size = model_grid.get_num_spatial_coordinates() - cellid_tuple = () - if not DatumUtil.is_int(arr_line[data_index]) and \ - arr_line[data_index].lower() == 'none': - # special case where cellid is 'none', store as 'none' - cellid_tuple = 'none' - if add_to_last_line: - self._last_line_info[-1].append([data_index, - data_item.type, - cellid_size]) - new_index = data_index + 1 - else: - # handle regular cellid - if cellid_size + data_index > arr_line_len: - comment = 'Not enough data found when reading cell ID ' \ - 'in data "{}" field "{}". Expected {} items ' \ - 'and found {} items'\ - '.'.format(struct.name, - data_item.name, cellid_size, - arr_line_len - data_index) - type_, value_, traceback_ = sys.exc_info() - raise MFDataException(struct.get_model(), - struct.get_package(), - struct.path, - 'loading data list from package ' - 'file', struct.name, - inspect.stack()[0][3], type_, value_, - traceback_, comment, - self._simulation_data.debug) - for index in range(data_index, cellid_size + data_index): - if not DatumUtil.is_int(arr_line[index]) or \ - int(arr_line[index]) < 0: - comment = 'Expected a integer or cell ID in ' \ - 'data "{}" field "{}". Found {} ' \ - 'in line "{}"' \ - '. '.format(struct.name, - data_item.name, arr_line[index], - arr_line) - type_, value_, traceback_ = sys.exc_info() - raise MFDataException(struct.get_model(), - struct.get_package(), - struct.path, - 'loading data list from package ' - 'file', struct.name, - inspect.stack()[0][3], type_, - value_, - traceback_, comment, - self._simulation_data.debug) - - data_converted = convert_data(arr_line[index], - self._data_dimensions, - data_item.type) - cellid_tuple = cellid_tuple + (int(data_converted) - 1,) - if add_to_last_line: - self._last_line_info[-1].append([index, - data_item.type, - cellid_size]) - new_index = data_index + cellid_size - data_line = data_line + (cellid_tuple,) - if data_item.shape is not None and len(data_item.shape) > 0 and \ - data_item.shape[0] == 'ncelldim': - # shape is the coordinate shape, which has already been read - more_data_expected = False - unknown_repeats = False - else: - more_data_expected, unknown_repeats = \ - storage.resolve_shape_list( - data_item, repeat_count, current_key, data_line) - return new_index, more_data_expected, data_line, unknown_repeats - else: - if arr_line is None: - data_converted = None - if add_to_last_line: - self._last_line_info[-1].append([data_index, - data_item.type, 0]) - else: - if arr_line[data_index].lower() in \ - self._data_dimensions.package_dim.get_tsnames(): - # references a time series, store as is - data_converted = arr_line[data_index].lower() - # override recarray data type to support writing - # string values - storage.override_data_type(var_index, object) - if add_to_last_line: - self._last_line_info[-1].append([data_index, - DatumType.string, 0]) - else: - data_converted = convert_data(arr_line[data_index], - self._data_dimensions, - data_item.type, - data_item) - if add_to_last_line: - self._last_line_info[-1].append([data_index, - data_item.type, 0]) - data_line = data_line + (data_converted,) - more_data_expected, unknown_repeats = \ - storage.resolve_shape_list( - data_item, repeat_count, current_key, data_line) - return data_index + 1, more_data_expected, data_line, \ - unknown_repeats - - -class MFFileAccessScalar(MFFileAccess): - def __init__(self, structure, data_dimensions, simulation_data, path, - current_key): - super(MFFileAccessScalar, self).__init__( - structure, data_dimensions, simulation_data, path, current_key) - - def load_from_package(self, first_line, file_handle, storage, data_type, - keyword, pre_data_comments=None): - # read in any pre data comments - current_line = self._read_pre_data_comments(first_line, file_handle, - pre_data_comments, storage) - - datautil.PyListUtil.reset_delimiter_used() - arr_line = datautil.PyListUtil.\ - split_data_line(current_line) - # verify keyword - index_num = self._load_keyword(arr_line, 0, keyword)[0] - - # store data - datatype = self.structure.get_datatype() - if self.structure.type == DatumType.record: - index = 0 - for data_item_type in self.structure.get_data_item_types(): - optional = self.structure.data_item_structures[index].optional - if len(arr_line) <= index + 1 or \ - data_item_type[0] != DatumType.keyword or (index > 0 - and optional == True): - break - index += 1 - first_type = self.structure.get_data_item_types()[0] - if first_type[0] == DatumType.keyword: - converted_data = [True] - else: - converted_data = [] - if first_type[0] != DatumType.keyword or index == 1: - if self.structure.get_data_item_types()[1] != \ - DatumType.keyword or arr_line[index].lower == \ - self.structure.data_item_structures[index].name: - try: - converted_data.append(convert_data( - arr_line[index], - self._data_dimensions, - self.structure.data_item_structures[index].type, - self.structure.data_item_structures[0])) - except Exception as ex: - message = 'Could not convert "{}" of type "{}" ' \ - 'to a string.'.format( - arr_line[index], - self.structure.data_item_structures[index]. - type) - type_, value_, traceback_ = sys.exc_info() - raise MFDataException(self.structure.get_model(), - self.structure.get_package(), - self._path, - 'converting data to string', - self.structure.name, - inspect.stack()[0][3], type_, - value_, traceback_, message, - self._simulation_data.debug, ex) - try: - storage.set_data(converted_data, key=self._current_key) - index_num += 1 - except Exception as ex: - message = 'Could not set data "{}" with key ' \ - '"{}".'.format(converted_data, self._current_key) - type_, value_, traceback_ = sys.exc_info() - raise MFDataException(self.structure.get_model(), - self.structure.get_package(), - self._path, - 'setting data', - self.structure.name, - inspect.stack()[0][3], type_, - value_, traceback_, message, - self._simulation_data.debug, ex) - elif datatype == DataType.scalar_keyword or \ - datatype == DataType.scalar_keyword_transient: - # store as true - try: - storage.set_data(True, key=self._current_key) - except Exception as ex: - message = 'Could not set data "True" with key ' \ - '"{}".'.format(self._current_key) - type_, value_, traceback_ = sys.exc_info() - raise MFDataException(self.structure.get_model(), - self.structure.get_package(), - self._path, - 'setting data', - self.structure.name, - inspect.stack()[0][3], type_, - value_, traceback_, message, - self._simulation_data.debug, ex) - else: - data_item_struct = self.structure.data_item_structures[0] - if len(arr_line) < 1 + index_num: - message = 'Error reading variable "{}". Expected data ' \ - 'after label "{}" not found at line ' \ - '"{}".'.format(self.structure.name, - data_item_struct.name.lower(), - current_line) - type_, value_, traceback_ = sys.exc_info() - raise MFDataException(self.structure.get_model(), - self.structure.get_package(), - self._path, - 'loading data from file', - self.structure.name, - inspect.stack()[0][3], type_, - value_, traceback_, message, - self._simulation_data.debug) - try: - converted_data = convert_data(arr_line[index_num], - self._data_dimensions, - data_type, data_item_struct) - except Exception as ex: - message = 'Could not convert "{}" of type "{}" ' \ - 'to a string.'.format(arr_line[index_num], - data_type) - type_, value_, traceback_ = sys.exc_info() - raise MFDataException(self.structure.get_model(), - self.structure.get_package(), - self._path, - 'converting data to string', - self.structure.name, - inspect.stack()[0][3], type_, - value_, traceback_, message, - self._simulation_data.debug, ex) - try: - # read next word as data - storage.set_data(converted_data, key=self._current_key) - except Exception as ex: - message = 'Could not set data "{}" with key ' \ - '"{}".'.format(converted_data, self._current_key) - type_, value_, traceback_ = sys.exc_info() - raise MFDataException(self.structure.get_model(), - self.structure.get_package(), - self._path, - 'setting data', - self.structure.name, - inspect.stack()[0][3], type_, - value_, traceback_, message, - self._simulation_data.debug, ex) - index_num += 1 - - if len(arr_line) > index_num: - # save remainder of line as comment - storage.add_data_line_comment(arr_line[index_num:], 0) - return [False, None] +import sys, inspect +from copy import deepcopy +import numpy as np +from ..mfbase import MFDataException, VerbosityLevel +from ...utils.datautil import PyListUtil, find_keyword, DatumUtil, MultiListIter +from .mfdatautil import convert_data, to_string, MFComment +from ...utils.binaryfile import BinaryHeader +from ...utils import datautil +from ..data.mfstructure import DatumType, MFDataStructure, DataType + + +class MFFileAccess(object): + def __init__(self, structure, data_dimensions, simulation_data, path, + current_key): + self.structure = structure + self._data_dimensions = data_dimensions + self._simulation_data = simulation_data + self._path = path + self._current_key = current_key + + @staticmethod + def _get_bintype(modelgrid): + if modelgrid.grid_type == 'vertex': + return 'vardisv' + elif modelgrid.grid_type == 'unstructured': + return 'vardisu' + else: + return 'vardis' + + def _get_next_data_line(self, file_handle): + end_of_file = False + while not end_of_file: + line = file_handle.readline() + if line == '': + message = 'More data expected when reading {} from file ' \ + '{}'.format(self.structure.name, file_handle.name) + type_, value_, traceback_ = sys.exc_info() + raise MFDataException( + self.structure.get_model(), + self.structure.get_package(), + self.structure.path, 'reading data from file', + self.structure.name, inspect.stack()[0][3], + type_, value_, traceback_, message, + self._simulation_data.debug) + clean_line = line.strip() + # If comment or empty line + if not MFComment.is_comment(clean_line, True): + return datautil.PyListUtil.split_data_line(clean_line) + + def _read_pre_data_comments(self, line, file_handle, pre_data_comments, + storage): + line_num = 0 + if pre_data_comments: + storage.pre_data_comments = MFComment(pre_data_comments.text, + self._path, + self._simulation_data, + line_num) + else: + storage.pre_data_comments = None + + # read through any fully commented or empty lines + PyListUtil.reset_delimiter_used() + arr_line = PyListUtil.split_data_line(line) + while MFComment.is_comment(arr_line, True) and line != '': + if storage.pre_data_comments: + storage.pre_data_comments.add_text('\n') + storage.pre_data_comments.add_text(' '.join(arr_line)) + else: + storage.pre_data_comments = MFComment(arr_line, self._path, + self._simulation_data, + line_num) + + storage.add_data_line_comment(arr_line, line_num) + + line = file_handle.readline() + arr_line = PyListUtil.split_data_line(line) + return line + + def _get_aux_var_index(self, aux_name): + aux_var_index = None + # confirm whether the keyword found is an auxiliary variable name + aux_var_names = self._data_dimensions.package_dim.get_aux_variables() + if aux_var_names: + for aux_var_name, index in zip(aux_var_names[0], + range(0,len(aux_var_names[0]))): + if aux_name.lower() == aux_var_name.lower(): + aux_var_index = index - 1 + return aux_var_index + + def _load_keyword(self, arr_line, index_num, keyword): + aux_var_index = None + if keyword != '': + # verify keyword + keyword_found = arr_line[index_num].lower() + keyword_match = keyword.lower() == keyword_found + aux_var_names = None + if not keyword_match: + aux_var_index = self._get_aux_var_index(keyword_found) + if not keyword_match and aux_var_index is None: + aux_text = '' + if aux_var_names is not None: + aux_text = ' or auxiliary variables ' \ + '{}'.format(aux_var_names[0]) + message = 'Error reading variable "{}". Expected ' \ + 'variable keyword "{}"{} not found ' \ + 'at line "{}". {}'.format(self.structure.name, + keyword, + aux_text, + ' '.join(arr_line), + self._path) + type_, value_, traceback_ = sys.exc_info() + raise MFDataException( + self.structure.get_model(), + self.structure.get_package(), + self.structure.path, 'loading keyword', + self.structure.name, inspect.stack()[0][3], + type_, value_, traceback_, message, + self._simulation_data.debug) + return (index_num + 1, aux_var_index) + return (index_num, aux_var_index) + + def _open_ext_file(self, fname, binary=False, write=False): + model_dim = self._data_dimensions.package_dim.model_dim[0] + read_file = self._simulation_data.mfpath.resolve_path( + fname, model_dim.model_name) + if write: + options = 'w' + else: + options = 'r' + if binary: + options = '{}b'.format(options) + try: + fd = open(read_file, options) + return fd + except: + message = 'Unable to open file {} in mode {}. Make sure the ' \ + 'file is not locked and the folder exists' \ + '.'.format(read_file, options) + type_, value_, traceback_ = sys.exc_info() + raise MFDataException( + self._data_dimensions.structure.get_model(), + self._data_dimensions.structure.get_package(), + self._data_dimensions.structure.path, + 'opening external file for writing', + self._data_dimensions.structure.name, inspect.stack()[0][3], + type_, value_, traceback_, message, + self._simulation_data.debug) + + @staticmethod + def datum_to_numpy_type(datum_type): + if datum_type == DatumType.integer: + return np.int32, 'int' + elif datum_type == DatumType.double_precision: + return np.float64, 'double' + elif datum_type == DatumType.string or \ + datum_type == DatumType.keyword: + return np.str, 'str' + else: + return None, None + + +class MFFileAccessArray(MFFileAccess): + def __init__(self, structure, data_dimensions, simulation_data, path, + current_key): + super(MFFileAccessArray, self).__init__( + structure, data_dimensions, simulation_data, path, current_key) + + def write_binary_file(self, data, fname, text, modelgrid=None, + modeltime=None, stress_period=0, + precision='double', write_multi_layer=False): + data = self._resolve_cellid_numbers_to_file(data) + fd = self._open_ext_file(fname, binary=True, write=True) + if write_multi_layer: + for layer, value in enumerate(data): + self._write_layer(fd, value, modelgrid, modeltime, + stress_period, precision, text, fname, + layer+1) + else: + self._write_layer(fd, data, modelgrid, modeltime, stress_period, + precision, text, fname) + data.tofile(fd) + fd.close() + + def _write_layer(self, fd, data, modelgrid, modeltime, stress_period, + precision, text, fname, ilay=None): + header_data = self._get_header(modelgrid, modeltime, stress_period, + precision, text, fname, ilay) + header_data.tofile(fd) + data.tofile(fd) + + def _get_header(self, modelgrid, modeltime, stress_period, precision, text, + fname, ilay=None): + # handle dis (row, col, lay), disv (ncpl, lay), and disu (nodes) cases + if modelgrid is not None and modeltime is not None: + pertim = modeltime.perlen[stress_period] + totim = modeltime.perlen.sum() + if ilay is None: + ilay = modelgrid.nlay + if modelgrid.grid_type == 'structured': + return BinaryHeader.create( + bintype='vardis', precision=precision, text=text, + nrow=modelgrid.nrow, ncol=modelgrid.ncol, + ilay=ilay, pertim=pertim, + totim=totim, kstp=1, kper=stress_period+1) + elif modelgrid.grid_type == 'vertex': + if ilay is None: + ilay = modelgrid.nlay + return BinaryHeader.create( + bintype='vardisv', precision=precision, text=text, + ncpl=modelgrid.ncpl, ilay=ilay, m3=1, + pertim=pertim, totim=totim, kstp=1, + kper=stress_period) + elif modelgrid.grid_type == 'unstructured': + return BinaryHeader.create( + bintype='vardisu', precision=precision, text=text, + nodes=modelgrid.nnodes, m2=1, m3=1, + pertim=pertim, totim=totim, kstp=1, kper=stress_period) + else: + if ilay is None: + ilay = 1 + header = BinaryHeader.create( + bintype='vardis', precision=precision, text=text, + nrow=1, ncol=1, ilay=ilay, pertim=pertim, + totim=totim, kstp=1, kper=stress_period) + if self._simulation_data.verbosity_level.value >= \ + VerbosityLevel.normal.value: + print('Model grid does not have a valid type. Using ' + 'default spatial discretization header values for ' + 'binary file {}.'.format(fname)) + else: + pertim = np.float64(1.0) + header = BinaryHeader.create( + bintype='vardis', precision=precision, text=text, + nrow=1, ncol=1, ilay=1, pertim=pertim, + totim=pertim, kstp=1, kper=stress_period) + if self._simulation_data.verbosity_level.value >= \ + VerbosityLevel.normal.value: + print('Binary file data not part of a model. Using default ' + 'spatial discretization header values for binary file ' + '{}.'.format(fname)) + return header + + def write_text_file(self, data, fp, data_type, data_size): + try: + fd = open(fp, 'w') + except: + message = 'Unable to open file {}. Make sure the file ' \ + 'is not locked and the folder exists' \ + '.'.format(fp) + type_, value_, traceback_ = sys.exc_info() + raise MFDataException( + self._data_dimensions.structure.get_model(), + self._data_dimensions.structure.get_package(), + self._data_dimensions.structure.path, + 'opening external file for writing', + self.structure.name, inspect.stack()[0][3], type_, + value_, traceback_, message, + self._simulation_data.debug) + fd.write(self.get_data_string(data, data_type, '')) + fd.close() + + def read_binary_data_from_file(self, fname, data_shape, data_size, + data_type, modelgrid, + read_multi_layer=False): + import flopy.utils.binaryfile as bf + fd = self._open_ext_file(fname, True) + numpy_type, name = self.datum_to_numpy_type(data_type) + header_dtype = bf.BinaryHeader.set_dtype( + bintype=self._get_bintype(modelgrid), + precision='double') + if read_multi_layer and len(data_shape) > 1: + all_data = np.empty(data_shape, numpy_type) + headers = [] + layer_shape = data_shape[1:] + data_size = int(data_size / data_shape[0]) + for index in range(0, data_shape[0]): + layer_data = self._read_binary_file_layer( + fd, fname, header_dtype, numpy_type, data_size, layer_shape) + all_data[index, :] = layer_data[0] + headers.append(layer_data[1]) + fd.close() + return all_data, headers + else: + bin_data = self._read_binary_file_layer( + fd, fname, header_dtype, numpy_type, data_size, data_shape) + fd.close() + return bin_data + + def get_data_string(self, data, data_type, data_indent=''): + layer_data_string = ['{}'.format(data_indent)] + line_data_count = 0 + indent_str = self._simulation_data.indent_string + data_iter = datautil.PyListUtil.next_item(data) + is_cellid = self.structure.data_item_structures[0].numeric_index or \ + self.structure.data_item_structures[0].is_cellid + + jag_arr = self.structure.data_item_structures[0].jagged_array + jagged_def = None + jagged_def_index = 0 + if jag_arr is not None: + # get jagged array definition + jagged_def_path = self._path[0:-1] + (jag_arr,) + if jagged_def_path in self._simulation_data.mfdata: + jagged_def = self._simulation_data.mfdata[jagged_def_path].array + + for item, last_item, new_list, nesting_change in data_iter: + # increment data/layer counts + line_data_count += 1 + try: + data_lyr = to_string(item, data_type, + self._simulation_data, + self._data_dimensions, is_cellid) + except Exception as ex: + type_, value_, traceback_ = sys.exc_info() + comment = 'Could not convert data "{}" of type "{}" to a ' \ + 'string.'.format(item, data_type) + raise MFDataException(self.structure.get_model(), + self.structure.get_package(), + self._path, + 'converting data', + self.structure.name, + inspect.stack()[0][3], type_, + value_, traceback_, comment, + self._simulation_data.debug, ex) + layer_data_string[-1] = '{}{}{}'.format(layer_data_string[-1], + indent_str, + data_lyr) + + if jagged_def is not None: + if line_data_count == jagged_def[jagged_def_index]: + layer_data_string.append('{}'.format(data_indent)) + line_data_count = 0 + jagged_def_index += 1 + else: + if self._simulation_data.wrap_multidim_arrays and \ + (line_data_count == self._simulation_data. + max_columns_of_data or last_item): + layer_data_string.append('{}'.format(data_indent)) + line_data_count = 0 + if len(layer_data_string) > 0: + # clean up the text at the end of the array + layer_data_string[-1] = layer_data_string[-1].strip() + if len(layer_data_string) == 1: + return '{}{}\n'.format(data_indent, layer_data_string[0].rstrip()) + else: + return '\n'.join(layer_data_string) + + def _read_binary_file_layer(self, fd, fname, header_dtype, numpy_type, + data_size, data_shape): + header_data = np.fromfile(fd, dtype=header_dtype, count=1) + data = np.fromfile(fd, dtype=numpy_type, count=data_size) + data = self._resolve_cellid_numbers_from_file(data) + if data.size != data_size: + message = 'Binary file {} does not contain expected data. ' \ + 'Expected array size {} but found size ' \ + '{}.'.format(fname, data_size, data.size) + type_, value_, traceback_ = sys.exc_info() + raise MFDataException( + self._data_dimensions.structure.get_model(), + self._data_dimensions.structure.get_package(), + self._data_dimensions.structure.path, + 'opening external file for writing', + self.structure.name, inspect.stack()[0][3], type_, + value_, traceback_, message, + self._simulation_data.debug) + return data.reshape(data_shape), header_data + + def read_text_data_from_file(self, data_size, data_type, data_dim, layer, + fname=None, fd=None, data_item=None): + # load variable data from file + current_size = 0 + if layer is None: + layer = 0 + close_file = False + if fd is None: + close_file = True + fd = self._open_ext_file(fname) + data_raw = [] + line = ' ' + PyListUtil.reset_delimiter_used() + while line != '' and len(data_raw) < data_size: + line = fd.readline() + arr_line = PyListUtil.split_data_line(line, True) + if not MFComment.is_comment(arr_line, True): + data_raw += arr_line + else: + PyListUtil.reset_delimiter_used() + + if len(data_raw) < data_size: + message = 'Not enough data in file {} for data "{}". ' \ + 'Expected data size {} but only found ' \ + '{}.'.format(fd.name, + self._data_dimensions.structure.name, + data_size, current_size) + type_, value_, traceback_ = sys.exc_info() + if close_file: + fd.close() + raise MFDataException( + self._data_dimensions.structure.get_model(), + self._data_dimensions.structure.get_package(), + self._data_dimensions.structure.path, + 'reading data file', + self._data_dimensions.structure.name, + inspect.stack()[0][3], type_, value_, + traceback_, message, + self._simulation_data.debug) + + if data_type == DatumType.double_precision: + data_type = np.float64 + elif data_type == DatumType.integer: + data_type = np.int32 + + data_out = np.fromiter(data_raw, dtype=data_type, + count=data_size) + data_out = self._resolve_cellid_numbers_from_file(data_out) + if close_file: + fd.close() + + data_out = np.reshape(data_out, data_dim) + return data_out, current_size + + def load_from_package(self, first_line, file_handle, layer_shape, + storage, keyword, pre_data_comments=None): + # read in any pre data comments + current_line = self._read_pre_data_comments(first_line, file_handle, + pre_data_comments, storage) + datautil.PyListUtil.reset_delimiter_used() + arr_line = datautil.PyListUtil.\ + split_data_line(current_line) + package_dim = self._data_dimensions.package_dim + if len(arr_line) > 2: + # check for time array series + if arr_line[1].upper() == 'TIMEARRAYSERIES': + storage.set_tas(arr_line[2], arr_line[1], self._current_key) + return layer_shape, [False, None] + if not self.structure.data_item_structures[0].just_data: + # verify keyword + index_num, aux_var_index = self._load_keyword(arr_line, 0, keyword) + else: + index_num = 0 + aux_var_index = None + + # TODO: Add species support + # if layered supported, look for layered flag + if self.structure.layered or aux_var_index is not None: + if (len(arr_line) > index_num and + arr_line[index_num].lower() == 'layered'): + storage.layered = True + try: + layers = layer_shape + except Exception as ex: + type_, value_, traceback_ = sys.exc_info() + raise MFDataException(self.structure.get_model(), + self.structure.get_package(), + self._path, + 'resolving layer dimensions', + self.structure.name, + inspect.stack()[0][3], type_, + value_, traceback_, None, + self._simulation_data.debug, ex) + if len(layers) > 0: + storage.init_layers(layers) + elif aux_var_index is not None: + # each layer stores a different aux variable + layers = len(package_dim.get_aux_variables()[0]) - 1 + layer_shape = (layers,) + storage.layered = True + while storage.layer_storage.list_shape[0] < layers: + storage.add_layer() + else: + storage.flatten() + try: + dimensions = storage.get_data_dimensions( + layer_shape) + except Exception as ex: + type_, value_, traceback_ = sys.exc_info() + comment = 'Could not get data shape for key "{}".'.format( + self._current_key) + raise MFDataException(self.structure.get_model(), + self.structure.get_package(), + self._path, + 'getting data shape', + self.structure.name, + inspect.stack()[0][3], type_, + value_, traceback_, comment, + self._simulation_data.debug, ex) + layer_size = 1 + for dimension in dimensions: + layer_size *= dimension + + if aux_var_index is None: + # loop through the number of layers + for layer in storage.layer_storage.indexes(): + self._load_layer(layer, layer_size, storage, arr_line, + file_handle, layer_shape) + else: + # write the aux var to it's unique index + self._load_layer((aux_var_index,), layer_size, storage, arr_line, + file_handle, layer_shape) + return layer_shape, [False, None] + + def _load_layer(self, layer, layer_size, storage, arr_line, file_handle, + layer_shape): + di_struct = self.structure.data_item_structures[0] + if not di_struct.just_data or datautil.max_tuple_abs_size(layer) > 0: + arr_line = self._get_next_data_line(file_handle) + + layer_storage = storage.layer_storage[layer] + # if constant + if arr_line[0].upper() == 'CONSTANT': + if len(arr_line) < 2: + message = 'MFArray "{}" contains a CONSTANT that is not ' \ + 'followed by a number.'.format(self.structure.name) + type_, value_, traceback_ = sys.exc_info() + raise MFDataException(self.structure.get_model(), + self.structure.get_package(), + self._path, + 'loading data layer from file', + self.structure.name, + inspect.stack()[0][3], type_, + value_, traceback_, message, + self._simulation_data.debug) + # store data + layer_storage.set_internal_constant() + try: + storage.store_internal([convert_data( + arr_line[1], self._data_dimensions, self.structure.type, + di_struct)], layer, const=True) + except Exception as ex: + type_, value_, traceback_ = sys.exc_info() + raise MFDataException(self.structure.get_model(), + self.structure.get_package(), + self._path, + 'storing data', + self.structure.name, + inspect.stack()[0][3], type_, + value_, traceback_, None, + self._simulation_data.debug, ex) + # store anything else as a comment + if len(arr_line) > 2: + layer_storage.comments = \ + MFComment(' '.join(arr_line[2:]), self._path, + self._simulation_data, layer) + # if internal + elif arr_line[0].upper() == 'INTERNAL': + if len(arr_line) < 2: + message = 'Data array "{}" contains a INTERNAL that is not ' \ + 'followed by a multiplier' \ + '.'.format(self.structure.name) + type_, value_, traceback_ = sys.exc_info() + raise MFDataException(self.structure.get_model(), + self.structure.get_package(), + self._path, + 'loading data layer from file', + self.structure.name, + inspect.stack()[0][3], type_, + value_, traceback_, message, + self._simulation_data.debug) + + try: + multiplier, print_format = \ + storage.process_internal_line(arr_line) + except Exception as ex: + type_, value_, traceback_ = sys.exc_info() + raise MFDataException(self.structure.get_model(), + self.structure.get_package(), + self._path, + 'processing line of data', + self.structure.name, + inspect.stack()[0][3], type_, + value_, traceback_, None, + self._simulation_data.debug, ex) + storage.layer_storage[layer].set_internal_array() + + # store anything else as a comment + if len(arr_line) > 5: + layer_storage.comments = \ + MFComment(' '.join(arr_line[5:]), self._path, + self._simulation_data, layer) + + try: + # load variable data from current file + if multiplier is not None: + storage.layer_storage[layer].factor = multiplier + if print_format is not None: + storage.layer_storage[layer].iprn = print_format + data_type = storage.data_dimensions.structure.\ + get_datum_type(True) + data_from_file = self.read_text_data_from_file( + storage.get_data_size(layer), data_type, + storage.get_data_dimensions(layer), layer, + fd=file_handle) + except Exception as ex: + type_, value_, traceback_ = sys.exc_info() + raise MFDataException(self.structure.get_model(), + self.structure.get_package(), + self._path, + 'reading data from file ' + '{}'.format(file_handle.name), + self.structure.name, + inspect.stack()[0][3], type_, + value_, traceback_, None, + self._simulation_data.debug, ex) + data_shaped = self._resolve_data_shape(data_from_file[0], + layer_shape, storage) + try: + storage.store_internal(data_shaped, layer, const=False, + multiplier=[multiplier], + print_format=print_format) + except Exception as ex: + comment = 'Could not store data: "{}"'.format(data_shaped) + type_, value_, traceback_ = sys.exc_info() + raise MFDataException(self.structure.get_model(), + self.structure.get_package(), + self._path, + 'storing data', + self.structure.name, + inspect.stack()[0][3], type_, + value_, traceback_, comment, + self._simulation_data.debug, ex) + elif arr_line[0].upper() == 'OPEN/CLOSE': + try: + storage.process_open_close_line(arr_line, layer) + except Exception as ex: + comment = 'Could not open open/close file specified by' \ + ' "{}".'.format(' '.join(arr_line)) + type_, value_, traceback_ = sys.exc_info() + raise MFDataException(self.structure.get_model(), + self.structure.get_package(), + self._path, + 'storing data', + self.structure.name, + inspect.stack()[0][3], type_, + value_, traceback_, comment, + self._simulation_data.debug, ex) + + def _is_cellid_or_numeric_index(self): + if self.structure.data_item_structures[0].numeric_index or \ + self.structure.data_item_structures[0].is_cellid: + return True + return False + + def _resolve_cellid_numbers_to_file(self, data): + if self._is_cellid_or_numeric_index(): + return abs(data) + 1 + else: + return data + + def _resolve_cellid_numbers_from_file(self, data): + if self._is_cellid_or_numeric_index(): + return abs(data) - 1 + else: + return data + + def _resolve_data_shape(self, data, layer_shape, storage): + try: + dimensions = storage.get_data_dimensions(layer_shape) + except Exception as ex: + type_, value_, traceback_ = sys.exc_info() + comment = 'Could not get data shape for key "{}".'.format( + self._current_key) + raise MFDataException(self.structure.get_model(), + self.structure.get_package(), + self._path, + 'getting data shape', + self.structure.name, + inspect.stack()[0][3], type_, + value_, traceback_, comment, + self._simulation_data.debug, ex) + if isinstance(data, list) or isinstance(data, np.ndarray): + try: + return np.reshape(data, dimensions).tolist() + except Exception as ex: + type_, value_, traceback_ = sys.exc_info() + comment = 'Could not reshape data to dimensions ' \ + '"{}".'.format(dimensions) + raise MFDataException(self.structure.get_model(), + self.structure.get_package(), + self._path, + 'reshaping data', + self.structure.name, + inspect.stack()[0][3], type_, + value_, traceback_, comment, + self._simulation_data.debug, ex) + else: + return data + + +class MFFileAccessList(MFFileAccess): + def __init__(self, structure, data_dimensions, simulation_data, path, + current_key): + super(MFFileAccessList, self).__init__( + structure, data_dimensions, simulation_data, path, current_key) + + def read_binary_data_from_file(self, read_file, modelgrid, + precision='double'): + # read from file + header, int_cellid_indexes, \ + ext_cellid_indexes = self._get_header(modelgrid, precision) + file_array = np.fromfile(read_file, dtype=header, count=-1) + # build data list for recarray + cellid_size = len(self._get_cell_header(modelgrid)) + data_list = [] + for record in file_array: + data_record = () + current_cellid_size = 0 + current_cellid = () + for index, data_item in enumerate(record): + if index in ext_cellid_indexes: + current_cellid += (data_item - 1,) + current_cellid_size += 1 + if current_cellid_size == cellid_size: + data_record += current_cellid + data_record = (data_record,) + current_cellid = () + current_cellid_size = 0 + else: + data_record += (data_item,) + data_list.append(data_record) + return data_list + + def write_binary_file(self, data, fname, modelgrid=None, + precision='double'): + fd = self._open_ext_file(fname, binary=True, write=True) + data_array = self._build_data_array(data, modelgrid, precision) + data_array.tofile(fd) + fd.close() + + def _build_data_array(self, data, modelgrid, precision): + header, int_cellid_indexes,\ + ext_cellid_indexes = self._get_header(modelgrid, precision) + data_list = [] + for record in data: + new_record = () + for index, column in enumerate(record): + if index in int_cellid_indexes: + if isinstance(column, int): + new_record += (column + 1,) + else: + for item in column: + new_record += (item + 1,) + else: + new_record += (column,) + data_list.append(new_record) + return np.array(data_list, dtype=header) + + def _get_header(self, modelgrid, precision): + np_flt_type = np.float64 + header = [] + int_cellid_indexes = {} + ext_cellid_indexes = {} + ext_index = 0 + for index, di_struct in enumerate(self.structure.data_item_structures): + if di_struct.is_cellid: + cell_header = self._get_cell_header(modelgrid) + header += cell_header + int_cellid_indexes[index] = True + for index in range(ext_index, ext_index + len(cell_header)): + ext_cellid_indexes[index] = True + ext_index += len(cell_header) + elif not di_struct.optional: + header.append((di_struct.name, np_flt_type)) + ext_index += 1 + elif di_struct.name == 'aux': + aux_var_names = self._data_dimensions.package_dim.\ + get_aux_variables() + if aux_var_names is not None: + for aux_var_name in aux_var_names[0]: + if aux_var_name.lower() != 'auxiliary': + header.append((aux_var_name, np_flt_type)) + ext_index += 1 + return header, int_cellid_indexes, ext_cellid_indexes + + def _get_cell_header(self, modelgrid): + if modelgrid.grid_type == 'structured': + return [('layer', np.int32), ('row', np.int32), ('col', np.int32)] + elif modelgrid.grid_type == 'vertex_layered': + return [('layer', np.int32), ('ncpl', np.int32)] + else: + return [('nodes', np.int32)] + + def load_from_package(self, first_line, file_handle, storage, + pre_data_comments=None): + # lock things to maximize performance + self._data_dimensions.lock() + self._last_line_info = [] + self._data_line = None + + # read in any pre data comments + current_line = self._read_pre_data_comments(first_line, file_handle, + pre_data_comments, storage) + # reset data line delimiter so that the next split_data_line will + # automatically determine the delimiter + datautil.PyListUtil.reset_delimiter_used() + arr_line = datautil.PyListUtil.split_data_line(current_line) + if arr_line and (len(arr_line[0]) >= 2 and + arr_line[0][:3].upper() == 'END'): + return [False, arr_line] + if len(arr_line) >= 2 and arr_line[0].upper() == 'OPEN/CLOSE': + try: + storage.process_open_close_line(arr_line, (0,)) + except Exception as ex: + message = 'An error occurred while processing the following ' \ + 'open/close line: {}'.format(current_line) + type_, value_, traceback_ = sys.exc_info() + raise MFDataException(self.structure.get_model(), + self.structure.get_package(), self._path, + 'processing open/close line', + self.structure.name, + inspect.stack()[0][3], type_, + value_, traceback_, message, + self._simulation_data.debug, ex) + else: + have_newrec_line, newrec_line, self._data_line =\ + self.read_list_data_from_file(file_handle, storage, + self._current_key, + current_line, self._data_line) + return [have_newrec_line, newrec_line] + + # loop until end of block + line = ' ' + while line != '': + arr_line = self._get_next_data_line(file_handle) + if arr_line and (len(arr_line[0]) >= 2 and + arr_line[0][:3].upper() == 'END'): + # end of block + self._data_dimensions.unlock() + return [False, line] + self._data_dimensions.unlock() + return [False, None] + + def read_list_data_from_file(self, file_handle, storage, current_key, + current_line=None, data_line=None, + store_internal=True): + data_rec = None + data_loaded = [] + self._temp_dict = {} + self._last_line_info = [] + store_data = False + struct = self.structure + self.simple_line = \ + len(self._data_dimensions.package_dim.get_tsnames()) == 0 and \ + not struct.is_mname + for data_item in struct.data_item_structures: + if data_item.optional and data_item.name != 'boundname' and \ + data_item.name != 'aux': + self.simple_line = False + if current_line is None: + current_line = file_handle.readline() + PyListUtil.reset_delimiter_used() + arr_line = PyListUtil.split_data_line(current_line) + line_num = 0 + # read any pre-data commented lines + while current_line and MFComment.is_comment(arr_line, True): + arr_line.insert(0, '\n') + storage.add_data_line_comment(arr_line, line_num) + PyListUtil.reset_delimiter_used() + current_line = file_handle.readline() + arr_line = PyListUtil.split_data_line(current_line) + + try: + data_line = self._load_list_line( + storage, arr_line, line_num, data_loaded, True, + current_key=current_key, data_line=data_line)[1:] + line_num += 1 + store_data = True + except MFDataException as err: + # this could possibly be a constant line. + line = file_handle.readline() + arr_line = PyListUtil.split_data_line(line) + if len(arr_line) >= 2 and arr_line[0].upper() == 'CONSTANT' \ + and len(struct.data_item_structures) >= 2 and \ + struct.data_item_structures[0].name.upper() \ + == 'CELLID': + # store first line as a comment + if storage.pre_data_comments is None: + storage.pre_data_comments = \ + MFComment(current_line, struct.path, + self._simulation_data, 0) + else: + storage.pre_data_comments.add_text(current_line) + # store constant value for all cellids + storage.layer_storage.first_item().set_internal_constant() + if store_internal: + storage.store_internal( + convert_data(arr_line[1], self._data_dimensions, + struct.data_item_structures[1].type, + struct.data_item_structures[0]), + 0, const=True) + else: + data_rec = storage._build_recarray(arr_line[1], None, + True) + line = ' ' + while line != '': + line = file_handle.readline() + arr_line = PyListUtil.split_data_line(line) + if arr_line and (len(arr_line[0]) >= 2 and + arr_line[0][:3].upper() == 'END'): + return [False, line, data_line] + else: + # not a constant or open/close line, exception is valid + comment = 'Unable to process line 1 of data list: ' \ + '"{}"'.format(current_line) + type_, value_, traceback_ = sys.exc_info() + raise MFDataException(struct.get_model(), struct.get_package(), + struct.path, + 'loading data list from ' + 'package file', + struct.name, + inspect.stack()[0][3], type_, + value_, traceback_, comment, + self._simulation_data.debug, err) + + if struct.type == DatumType.record or struct.type == DatumType.string: + # records only contain a single line + storage.append_data(data_loaded) + storage.data_dimensions.unlock() + return [False, None, data_line] + + # get block recarray list for later processing + recarrays = [] + parent_block = struct.parent_block + if parent_block is not None: + recarrays = parent_block.get_all_recarrays() + recarray_len = len(recarrays) + + # loop until end of block + line = ' ' + optional_line_info = [] + line_info_processed = False + data_structs = struct.data_item_structures + while line != '': + line = file_handle.readline() + arr_line = PyListUtil.split_data_line(line) + if not line or (arr_line and len(arr_line[0]) >= 2 and + arr_line[0][:3].upper() == 'END'): + # end of block + if store_data: + if store_internal: + # store as rec array + storage.store_internal(data_loaded, None, False, + current_key) + storage.data_dimensions.unlock() + return [False, line, data_line] + else: + data_rec = storage._build_recarray(data_loaded, + current_key, True) + storage.data_dimensions.unlock() + return data_rec + if recarray_len != 1 and \ + not MFComment.is_comment(arr_line, True): + key = find_keyword(arr_line, struct.get_keywords()) + if key is None: + # unexpected text, may be start of another record + if store_data: + if store_internal: + storage.store_internal(data_loaded, None, False, + current_key) + storage.data_dimensions.unlock() + return [True, line, data_line] + else: + data_rec = storage._build_recarray(data_loaded, + current_key, + True) + storage.data_dimensions.unlock() + return data_rec + self.simple_line = self.simple_line \ + and self.structure.package_type != 'sfr' + if self.simple_line: + line_len = len(self._last_line_info) + if struct.num_optional > 0 and not line_info_processed: + line_info_processed = True + for index, data_item in \ + enumerate(struct.data_item_structures): + if index < line_len: + if data_item.optional: + self._last_line_info = \ + self._last_line_info[:index] + line_len = len(self._last_line_info) + optional_line_info.append(data_item) + else: + optional_line_info.append(data_item) + if MFComment.is_comment(arr_line, + True): + arr_line.insert(0, '\n') + storage.add_data_line_comment(arr_line, line_num) + else: + # do higher performance quick load + self._data_line = () + cellid_index = 0 + cellid_tuple = () + data_index = 0 + for index, entry in enumerate(self._last_line_info): + for sub_entry in entry: + if sub_entry[1] is not None: + if sub_entry[2] > 0: + # is a cellid + cellid_tuple += \ + (int(arr_line[sub_entry[0]]) - 1,) + # increment index + cellid_index += 1 + if cellid_index == sub_entry[2]: + # end of current cellid + self._data_line += (cellid_tuple,) + cellid_index = 0 + cellid_tuple = () + else: + # not a cellid + self._data_line += (convert_data( + arr_line[sub_entry[0]], + self._data_dimensions, + sub_entry[1], + data_structs[index]),) + else: + self._data_line += (None,) + data_index = sub_entry[0] + arr_line_len = len(arr_line) + if arr_line_len > data_index + 1: + # more data on the end of the line. see if it can + # be loaded as optional data + data_index += 1 + for data_item in struct.data_item_structures[ + len(self._last_line_info):]: + if arr_line_len <= data_index: + break + if len(arr_line[data_index]) > 0 and \ + arr_line[data_index][0] == '#': + break + elif data_item.name == 'aux': + data_index, self._data_line = \ + self._process_aux( + storage, arr_line, arr_line_len, + data_item, data_index, None, + current_key, self._data_line, + False)[0:2] + elif data_item.name == 'boundname' and \ + self._data_dimensions.package_dim.\ + boundnames(): + self._data_line += (convert_data( + arr_line[data_index], + self._data_dimensions, + data_item.type, + data_item),) + if arr_line_len > data_index + 1: + # FEATURE: Keep number of white space characters used + # in comments section + storage.comments[line_num] = MFComment( + ' '.join(arr_line[data_index + 1:]), struct.path, + self._simulation_data, line_num) + + data_loaded.append(self._data_line) + else: + try: + data_line = self._load_list_line( + storage, arr_line, line_num, data_loaded, False, + current_key=current_key, data_line=data_line)[1] + except Exception as ex: + comment = 'Unable to process line {} of data list: ' \ + '"{}"'.format(line_num + 1, line) + type_, value_, traceback_ = sys.exc_info() + raise MFDataException(struct.get_model(), + struct.get_package(), + struct.path, + 'loading data list from ' + 'package file', + struct.name, + inspect.stack()[0][3], type_, + value_, traceback_, comment, + self._simulation_data.debug, ex) + line_num += 1 + if store_data: + # store as rec array + storage.store_internal(data_loaded, None, False, current_key) + storage.data_dimensions.unlock() + if not store_internal: + return data_rec + else: + return [False, None, data_line] + + def _load_list_line(self, storage, arr_line, line_num, data_loaded, + build_type_list, current_key, data_index_start=0, + data_set=None, ignore_optional_vars=False, + data_line=None): + data_item_ks = None + struct = self.structure + org_data_line = data_line + # only initialize if we are at the start of a new line + if data_index_start == 0: + data_set = struct + # new line of data + data_line = () + # determine if at end of block + if arr_line and arr_line[0][:3].upper() == 'END': + self.enabled = True + return 0, data_line + data_index = data_index_start + arr_line_len = len(arr_line) + if MFComment.is_comment(arr_line, True) and data_index_start == 0: + arr_line.insert(0, '\n') + storage.add_data_line_comment(arr_line, line_num) + else: + # read variables + var_index = 0 + data = '' + for data_item_index, data_item in \ + enumerate(data_set.data_item_structures): + if not data_item.optional or not ignore_optional_vars: + if data_item.name == 'aux': + data_index, data_line = \ + self._process_aux(storage, arr_line, arr_line_len, + data_item, data_index, var_index, + current_key, data_line)[0:2] + # optional mname data items are only specified if the + # package is part of a model + elif not data_item.optional or \ + data_item.name[0:5] != 'mname' or \ + not storage.in_model: + if data_item.type == DatumType.keyword: + data_index += 1 + self.simple_line = False + elif data_item.type == DatumType.record: + # this is a record within a record, recurse into + # _load_line to load it + data_index, data_line = \ + self._load_list_line( + storage, arr_line, line_num, data_loaded, + build_type_list, current_key, data_index, + data_item, False, data_line=data_line) + self.simple_line = False + elif data_item.name != 'boundname' or \ + self._data_dimensions.package_dim.boundnames(): + if data_item.optional and data == '#': + # comment mark found and expecting optional + # data_item, we are done + break + if data_index >= arr_line_len and data_item.optional: + break + more_data_expected = True + unknown_repeats = False + repeat_count = 0 + while more_data_expected or unknown_repeats: + if data_index >= arr_line_len: + if data_item.optional or unknown_repeats: + break + elif struct.num_optional >= \ + len(data_set.data_item_structures)\ + - data_item_index: + # there are enough optional variables + # to account for the lack of data + # reload line with all optional + # variables ignored + data_line = org_data_line + return self._load_list_line( + storage, arr_line, line_num, + data_loaded, build_type_list, + current_key, data_index_start, + data_set, True, data_line=data_line) + else: + comment = 'Not enough data provided ' \ + 'for {}. Data for required ' \ + 'data item "{}" not ' \ + 'found'.format(struct.name, + data_item. + name) + type_, value_, \ + traceback_ = sys.exc_info() + raise MFDataException( + struct.get_model(), + struct.get_package(), + struct.path, + 'loading data list from ' + 'package file', + struct.name, + inspect.stack()[0][3], type_, + value_, traceback_, comment, + self._simulation_data.debug) + + data = arr_line[data_index] + repeat_count += 1 + if data_item.type == DatumType.keystring: + self.simple_line = False + if repeat_count <= 1: # only process the + # keyword on the first repeat find + # data item associated with correct + # keystring + name_data = data.lower() + if name_data not in \ + data_item.keystring_dict: + name_data = '{}record'.format( + name_data) + if name_data not in \ + data_item.keystring_dict: + # data does not match any + # expected keywords + if self._simulation_data.\ + verbosity_level.value >= \ + VerbosityLevel.normal.\ + value: + print('WARNING: Failed to ' + 'process line {}. ' + 'Line does not match' + ' expected keystring' + ' {}'.format( + ' '.join(arr_line), + data_item.name)) + break + data_item_ks = \ + data_item.keystring_dict[ + name_data] + if data_item_ks == 0: + comment = 'Could not find ' \ + 'keystring ' \ + '{}.'.format(name_data) + type_, value_, \ + traceback_ = sys.exc_info() + raise MFDataException( + struct.get_model(), + struct.get_package(), + struct.path, + 'loading data list from ' + 'package file', + struct.name, + inspect.stack()[0][3], type_, + value_, traceback_, comment, + self._simulation_data.debug) + + # keyword is always implied in a + # keystring and should be stored, + # add a string data_item for the + # keyword + if data_item.name in \ + self._temp_dict: + # used cached data item for + # performance + keyword_data_item = \ + self._temp_dict[data_item.name] + else: + keyword_data_item = \ + deepcopy(data_item) + keyword_data_item.type = \ + DatumType.string + self._temp_dict[data_item.name] \ + = keyword_data_item + data_index, more_data_expected, \ + data_line, unknown_repeats = \ + self._append_data_list( + storage, + keyword_data_item, + arr_line, + arr_line_len, + data_index, + var_index, + repeat_count, + current_key, + data_line) + if isinstance(data_item_ks, + MFDataStructure): + dis = \ + data_item_ks.data_item_structures + for ks_data_item in dis: + if ks_data_item.type != \ + DatumType.keyword \ + and data_index < \ + arr_line_len: + # data item contains additional + # information + data_index, more_data_expected, \ + data_line, unknown_repeats = \ + self._append_data_list( + storage, + ks_data_item, + arr_line, + arr_line_len, + data_index, + var_index, + repeat_count, + current_key, + data_line) + while data_index < arr_line_len: + try: + # append remaining data + # (temporary fix) + data_index, more_data_expected, \ + data_line, unknown_repeats = \ + self._append_data_list( + storage, + ks_data_item, + arr_line, + arr_line_len, + data_index, + var_index, + repeat_count, + current_key, + data_line) + except MFDataException: + break + else: + if data_item_ks.type != \ + DatumType.keyword: + data_index, more_data_expected, \ + data_line, unknown_repeats = \ + self._append_data_list( + storage, data_item_ks, arr_line, + arr_line_len, data_index, + var_index, repeat_count, + current_key, data_line) + else: + # append empty data as a placeholder. + # this is necessarily to keep the + # recarray a consistent shape + data_line = \ + data_line + (None,) + data_index += 1 + else: + if data_item.tagged and repeat_count == 1: + # data item tagged, include data item + # name as a keyword + di_type = data_item.type + data_item.type = DatumType.keyword + data_index, more_data_expected, \ + data_line, unknown_repeats = \ + self._append_data_list( + storage, data_item, arr_line, + arr_line_len, data_index, + var_index, repeat_count, + current_key, data_line) + data_item.type = di_type + data_index, more_data_expected, \ + data_line, unknown_repeats = \ + self._append_data_list( + storage, data_item, arr_line, + arr_line_len, data_index, var_index, + repeat_count, current_key, + data_line) + if more_data_expected is None: + # indeterminate amount of data expected. + # keep reading data until eoln + more_data_expected = \ + (data_index < arr_line_len) + self.simple_line = self.simple_line and \ + not unknown_repeats and \ + (len(data_item.shape) == 0 or + data_item.is_cellid) + var_index += 1 + + # populate unused optional variables with None type + for data_item in data_set.data_item_structures[var_index:]: + if data_item.name == 'aux': + data_line = self._process_aux( + storage, arr_line, arr_line_len, data_item, data_index, + var_index, current_key, data_line)[1] + elif data_item.name != 'boundname' or \ + self._data_dimensions.package_dim.boundnames(): + data_index, more_data_expected, data_line, \ + unknown_repeats = self._append_data_list( + storage, data_item, None, 0, data_index, var_index, 1, + current_key, data_line) + + # only do final processing on outer-most record + if data_index_start == 0: + # if more pieces exist + if arr_line_len > data_index + 1: + # FEATURE: Keep number of white space characters used in + # comments section + storage.comments[line_num] = MFComment( + ' '.join(arr_line[data_index+1:]), struct.path, + self._simulation_data, line_num) + data_loaded.append(data_line) + return data_index, data_line + + def _process_aux(self, storage, arr_line, arr_line_len, data_item, + data_index, var_index, current_key, data_line, + add_to_last_line=True): + aux_var_names = self._data_dimensions.package_dim.get_aux_variables() + more_data_expected = False + if aux_var_names is not None: + for var_name in aux_var_names[0]: + if var_name.lower() != 'auxiliary': + if data_index >= arr_line_len: + # store placeholder None + data_index, more_data_expected, data_line = \ + self._append_data_list( + storage, data_item, None, 0, data_index, + var_index, 1, current_key, data_line, + add_to_last_line)[0:3] + else: + # read in aux variables + data_index, more_data_expected, data_line = \ + self._append_data_list( + storage, data_item, arr_line, arr_line_len, + data_index, var_index, 0, current_key, + data_line, add_to_last_line)[0:3] + return data_index, data_line, more_data_expected + + def _append_data_list(self, storage, data_item, arr_line, arr_line_len, + data_index, var_index, repeat_count, current_key, + data_line, add_to_last_line=True): + # append to a 2-D list which will later be converted to a numpy + # rec array + struct = self.structure + if add_to_last_line: + self._last_line_info.append([]) + if data_item.is_cellid or (data_item.possible_cellid and + storage._validate_cellid( + arr_line, data_index)): + if self._data_dimensions is None: + comment = 'CellID field specified in for data ' \ + '"{}" field "{}" which does not contain a model '\ + 'grid. This could be due to a problem with ' \ + 'the flopy definition files. Please get the ' \ + 'latest flopy definition files' \ + '.'.format(struct.name, data_item.name) + type_, value_, traceback_ = sys.exc_info() + raise MFDataException(struct.get_model(), + struct.get_package(), struct.path, + 'loading data list from package file', + struct.name, + inspect.stack()[0][3], type_, value_, + traceback_, comment, + self._simulation_data.debug) + # read in the entire cellid + model_grid = self._data_dimensions.get_model_grid() + cellid_size = model_grid.get_num_spatial_coordinates() + cellid_tuple = () + if not DatumUtil.is_int(arr_line[data_index]) and \ + arr_line[data_index].lower() == 'none': + # special case where cellid is 'none', store as 'none' + cellid_tuple = 'none' + if add_to_last_line: + self._last_line_info[-1].append([data_index, + data_item.type, + cellid_size]) + new_index = data_index + 1 + else: + # handle regular cellid + if cellid_size + data_index > arr_line_len: + comment = 'Not enough data found when reading cell ID ' \ + 'in data "{}" field "{}". Expected {} items ' \ + 'and found {} items'\ + '.'.format(struct.name, + data_item.name, cellid_size, + arr_line_len - data_index) + type_, value_, traceback_ = sys.exc_info() + raise MFDataException(struct.get_model(), + struct.get_package(), + struct.path, + 'loading data list from package ' + 'file', struct.name, + inspect.stack()[0][3], type_, value_, + traceback_, comment, + self._simulation_data.debug) + for index in range(data_index, cellid_size + data_index): + if not DatumUtil.is_int(arr_line[index]) or \ + int(arr_line[index]) < 0: + comment = 'Expected a integer or cell ID in ' \ + 'data "{}" field "{}". Found {} ' \ + 'in line "{}"' \ + '. '.format(struct.name, + data_item.name, arr_line[index], + arr_line) + type_, value_, traceback_ = sys.exc_info() + raise MFDataException(struct.get_model(), + struct.get_package(), + struct.path, + 'loading data list from package ' + 'file', struct.name, + inspect.stack()[0][3], type_, + value_, + traceback_, comment, + self._simulation_data.debug) + + data_converted = convert_data(arr_line[index], + self._data_dimensions, + data_item.type) + cellid_tuple = cellid_tuple + (int(data_converted) - 1,) + if add_to_last_line: + self._last_line_info[-1].append([index, + data_item.type, + cellid_size]) + new_index = data_index + cellid_size + data_line = data_line + (cellid_tuple,) + if data_item.shape is not None and len(data_item.shape) > 0 and \ + data_item.shape[0] == 'ncelldim': + # shape is the coordinate shape, which has already been read + more_data_expected = False + unknown_repeats = False + else: + more_data_expected, unknown_repeats = \ + storage.resolve_shape_list( + data_item, repeat_count, current_key, data_line) + return new_index, more_data_expected, data_line, unknown_repeats + else: + if arr_line is None: + data_converted = None + if add_to_last_line: + self._last_line_info[-1].append([data_index, + data_item.type, 0]) + else: + if arr_line[data_index].lower() in \ + self._data_dimensions.package_dim.get_tsnames(): + # references a time series, store as is + data_converted = arr_line[data_index].lower() + # override recarray data type to support writing + # string values + storage.override_data_type(var_index, object) + if add_to_last_line: + self._last_line_info[-1].append([data_index, + DatumType.string, 0]) + else: + data_converted = convert_data(arr_line[data_index], + self._data_dimensions, + data_item.type, + data_item) + if add_to_last_line: + self._last_line_info[-1].append([data_index, + data_item.type, 0]) + data_line = data_line + (data_converted,) + more_data_expected, unknown_repeats = \ + storage.resolve_shape_list( + data_item, repeat_count, current_key, data_line) + return data_index + 1, more_data_expected, data_line, \ + unknown_repeats + + +class MFFileAccessScalar(MFFileAccess): + def __init__(self, structure, data_dimensions, simulation_data, path, + current_key): + super(MFFileAccessScalar, self).__init__( + structure, data_dimensions, simulation_data, path, current_key) + + def load_from_package(self, first_line, file_handle, storage, data_type, + keyword, pre_data_comments=None): + # read in any pre data comments + current_line = self._read_pre_data_comments(first_line, file_handle, + pre_data_comments, storage) + + datautil.PyListUtil.reset_delimiter_used() + arr_line = datautil.PyListUtil.\ + split_data_line(current_line) + # verify keyword + index_num = self._load_keyword(arr_line, 0, keyword)[0] + + # store data + datatype = self.structure.get_datatype() + if self.structure.type == DatumType.record: + index = 0 + for data_item_type in self.structure.get_data_item_types(): + optional = self.structure.data_item_structures[index].optional + if len(arr_line) <= index + 1 or \ + data_item_type[0] != DatumType.keyword or (index > 0 + and optional == True): + break + index += 1 + first_type = self.structure.get_data_item_types()[0] + if first_type[0] == DatumType.keyword: + converted_data = [True] + else: + converted_data = [] + if first_type[0] != DatumType.keyword or index == 1: + if self.structure.get_data_item_types()[1] != \ + DatumType.keyword or arr_line[index].lower == \ + self.structure.data_item_structures[index].name: + try: + converted_data.append(convert_data( + arr_line[index], + self._data_dimensions, + self.structure.data_item_structures[index].type, + self.structure.data_item_structures[0])) + except Exception as ex: + message = 'Could not convert "{}" of type "{}" ' \ + 'to a string.'.format( + arr_line[index], + self.structure.data_item_structures[index]. + type) + type_, value_, traceback_ = sys.exc_info() + raise MFDataException(self.structure.get_model(), + self.structure.get_package(), + self._path, + 'converting data to string', + self.structure.name, + inspect.stack()[0][3], type_, + value_, traceback_, message, + self._simulation_data.debug, ex) + try: + storage.set_data(converted_data, key=self._current_key) + index_num += 1 + except Exception as ex: + message = 'Could not set data "{}" with key ' \ + '"{}".'.format(converted_data, self._current_key) + type_, value_, traceback_ = sys.exc_info() + raise MFDataException(self.structure.get_model(), + self.structure.get_package(), + self._path, + 'setting data', + self.structure.name, + inspect.stack()[0][3], type_, + value_, traceback_, message, + self._simulation_data.debug, ex) + elif datatype == DataType.scalar_keyword or \ + datatype == DataType.scalar_keyword_transient: + # store as true + try: + storage.set_data(True, key=self._current_key) + except Exception as ex: + message = 'Could not set data "True" with key ' \ + '"{}".'.format(self._current_key) + type_, value_, traceback_ = sys.exc_info() + raise MFDataException(self.structure.get_model(), + self.structure.get_package(), + self._path, + 'setting data', + self.structure.name, + inspect.stack()[0][3], type_, + value_, traceback_, message, + self._simulation_data.debug, ex) + else: + data_item_struct = self.structure.data_item_structures[0] + if len(arr_line) < 1 + index_num: + message = 'Error reading variable "{}". Expected data ' \ + 'after label "{}" not found at line ' \ + '"{}".'.format(self.structure.name, + data_item_struct.name.lower(), + current_line) + type_, value_, traceback_ = sys.exc_info() + raise MFDataException(self.structure.get_model(), + self.structure.get_package(), + self._path, + 'loading data from file', + self.structure.name, + inspect.stack()[0][3], type_, + value_, traceback_, message, + self._simulation_data.debug) + try: + converted_data = convert_data(arr_line[index_num], + self._data_dimensions, + data_type, data_item_struct) + except Exception as ex: + message = 'Could not convert "{}" of type "{}" ' \ + 'to a string.'.format(arr_line[index_num], + data_type) + type_, value_, traceback_ = sys.exc_info() + raise MFDataException(self.structure.get_model(), + self.structure.get_package(), + self._path, + 'converting data to string', + self.structure.name, + inspect.stack()[0][3], type_, + value_, traceback_, message, + self._simulation_data.debug, ex) + try: + # read next word as data + storage.set_data(converted_data, key=self._current_key) + except Exception as ex: + message = 'Could not set data "{}" with key ' \ + '"{}".'.format(converted_data, self._current_key) + type_, value_, traceback_ = sys.exc_info() + raise MFDataException(self.structure.get_model(), + self.structure.get_package(), + self._path, + 'setting data', + self.structure.name, + inspect.stack()[0][3], type_, + value_, traceback_, message, + self._simulation_data.debug, ex) + index_num += 1 + + if len(arr_line) > index_num: + # save remainder of line as comment + storage.add_data_line_comment(arr_line[index_num:], 0) + return [False, None] diff --git a/flopy/mf6/data/mfstructure.py b/flopy/mf6/data/mfstructure.py index 007a94f6e6..4c66c1ddaf 100644 --- a/flopy/mf6/data/mfstructure.py +++ b/flopy/mf6/data/mfstructure.py @@ -1,2236 +1,2236 @@ -""" -mfstructure module. Contains classes related to package structure - - -""" -import os -import traceback -import ast -import keyword -from enum import Enum -from textwrap import TextWrapper -from collections import OrderedDict -import numpy as np -from ..mfbase import PackageContainer, StructException - - -numeric_index_text = 'This argument is an index variable, which means that ' \ - 'it should be treated as zero-based when working with ' \ - 'FloPy and Python. Flopy will automatically subtract ' \ - 'one when loading index variables and add one when ' \ - 'writing index variables.' - - -class DfnType(Enum): - common = 1 - sim_name_file = 2 - sim_tdis_file = 3 - ims_file = 4 - exch_file = 5 - model_name_file = 6 - model_file = 7 - gnc_file = 8 - mvr_file = 9 - utl = 10 - unknown = 999 - - -class Dfn(object): - """ - Base class for package file definitions - - Attributes - ---------- - dfndir : path - folder containing package definition files (dfn) - common : path - file containing common information - multi_package : dict - contains the names of all packages that are allowed to have multiple - instances in a model/simulation - - Methods - ------- - get_file_list : () : list - returns all of the dfn files found in dfndir. files are returned in - a specified order defined in the local variable file_order - - See Also - -------- - - Notes - ----- - - Examples - ---- - """ - - def __init__(self): - # directories - self.dfndir = os.path.join('.', 'dfn') - self.common = os.path.join(self.dfndir, 'common.dfn') - # FIX: Transport - multi packages are hard coded - self.multi_package = {'exggwfgwf': 0, 'gwfchd': 0, 'gwfwel': 0, - 'gwfdrn': 0, 'gwfriv': 0, 'gwfghb': 0, - 'gwfrch': 0, 'gwfevt': 0, 'gwfmaw': 0, - 'gwfsfr': 0, 'gwflak': 0, 'gwfuzf': 0, - 'lnfcgeo': 0, 'lnfrgeo': 0, 'lnfngeo': 0, - 'utlobs': 0, 'utlts': 0, 'utltas': 0} - - def get_file_list(self): - file_order = ['sim-nam', # dfn completed tex updated - 'sim-tdis', # dfn completed tex updated - 'exg-gwfgwf', # dfn completed tex updated - 'sln-ims', # dfn completed tex updated - 'gwf-nam', # dfn completed tex updated - 'gwf-dis', # dfn completed tex updated - 'gwf-disv', # dfn completed tex updated - 'gwf-disu', # dfn completed tex updated - 'lnf-disl', # dfn completed tex updated - 'gwf-ic', # dfn completed tex updated - 'gwf-npf', # dfn completed tex updated - 'gwf-sto', # dfn completed tex updated - 'gwf-hfb', # dfn completed tex updated - 'gwf-chd', # dfn completed tex updated - 'gwf-wel', # dfn completed tex updated - 'gwf-drn', # dfn completed tex updated - 'gwf-riv', # dfn completed tex updated - 'gwf-ghb', # dfn completed tex updated - 'gwf-rch', # dfn completed tex updated - 'gwf-rcha', # dfn completed tex updated - 'gwf-evt', # dfn completed tex updated - 'gwf-evta', # dfn completed tex updated - 'gwf-maw', # dfn completed tex updated - 'gwf-sfr', # dfn completed tex updated - 'gwf-lak', # dfn completed tex updated - 'gwf-uzf', # dfn completed tex updated - 'gwf-mvr', # dfn completed tex updated - 'gwf-gnc', # dfn completed tex updated - 'gwf-oc', # dfn completed tex updated - 'utl-obs', - 'utl-ts', - 'utl-tab', - 'utl-tas'] - - dfn_path, tail = os.path.split(os.path.realpath(__file__)) - dfn_path = os.path.join(dfn_path, 'dfn') - # construct list of dfn files to process in the order of file_order - files = os.listdir(dfn_path) - for f in files: - if 'common' in f or 'flopy' in f: - continue - package_abbr = os.path.splitext(f)[0] - if package_abbr not in file_order: - file_order.append(package_abbr) - return [fname + '.dfn' for fname in file_order if - fname + '.dfn' in files] - - def _file_type(self, file_name): - # determine file type - if len(file_name) >= 6 and file_name[0:6] == 'common': - return DfnType.common, None - elif file_name[0:3] == 'sim': - if file_name[3:6] == 'nam': - return DfnType.sim_name_file, None - elif file_name[3:7] == 'tdis': - return DfnType.sim_tdis_file, None - else: - return DfnType.unknown, None - elif file_name[0:3] == 'nam': - return DfnType.sim_name_file, None - elif file_name[0:4] == 'tdis': - return DfnType.sim_tdis_file, None - elif file_name[0:3] == 'sln' or file_name[0:3] == 'ims': - return DfnType.ims_file, None - elif file_name[0:3] == 'exg': - return DfnType.exch_file, file_name[3:6] - elif file_name[0:3] == 'utl': - return DfnType.utl, None - else: - model_type = file_name[0:3] - if file_name[3:6] == 'nam': - return DfnType.model_name_file, model_type - elif file_name[3:6] == 'gnc': - return DfnType.gnc_file, model_type - elif file_name[3:6] == 'mvr': - return DfnType.mvr_file, model_type - else: - return DfnType.model_file, model_type - - -class DfnPackage(Dfn): - """ - Dfn child class that loads dfn information from a list structure stored - in the auto-built package classes - - Attributes - ---------- - package : MFPackage - MFPackage subclass that contains dfn information - - Methods - ------- - multi_package_support : () : bool - returns flag for multi-package support - get_block_structure_dict : (path : tuple, common : bool, model_file : - bool) : dict - returns a dictionary of block structure information for the package - - See Also - -------- - - Notes - ----- - - Examples - ---- - """ - - def __init__(self, package): - super(DfnPackage, self).__init__() - self.package = package - self.package_type = package._package_type - self.dfn_file_name = package.dfn_file_name - # the package type is always the text after the last - - package_name = self.package_type.split('-') - self.package_type = package_name[-1] - if not isinstance(package_name, str) and \ - len(package_name) > 1: - self.package_prefix = ''.join(package_name[:-1]) - else: - self.package_prefix = '' - self.dfn_type, \ - self.model_type = self._file_type(self.dfn_file_name.replace('-', '')) - self.dfn_list = package.dfn - - def multi_package_support(self): - return self.package.package_abbr in self.multi_package - - def get_block_structure_dict(self, path, common, model_file): - block_dict = OrderedDict() - dataset_items_in_block = {} - self.dataset_items_needed_dict = {} - keystring_items_needed_dict = {} - current_block = None - - for dfn_entry in self.dfn_list: - # load next data item - new_data_item_struct = MFDataItemStructure() - for next_line in dfn_entry: - new_data_item_struct.set_value(next_line, common) - # if block does not exist - if current_block is None or \ - current_block.name != new_data_item_struct.block_name: - # create block - current_block = MFBlockStructure( - new_data_item_struct.block_name, path, model_file) - # put block in block_dict - block_dict[current_block.name] = current_block - # init dataset item lookup - self.dataset_items_needed_dict = {} - dataset_items_in_block = {} - - # resolve block type - if len(current_block.block_header_structure) > 0: - if len(current_block.block_header_structure[ - 0].data_item_structures) > 0 and \ - current_block.block_header_structure[ - 0].data_item_structures[ - 0].type == DatumType.integer: - block_type = BlockType.transient - else: - block_type = BlockType.multiple - else: - block_type = BlockType.single - - if new_data_item_struct.block_variable: - block_dataset_struct = MFDataStructure( - new_data_item_struct, model_file, self.package_type, - self.dfn_list) - block_dataset_struct.parent_block = current_block - self._process_needed_data_items(block_dataset_struct, - dataset_items_in_block) - block_dataset_struct.set_path( - path + (new_data_item_struct.block_name,)) - block_dataset_struct.add_item(new_data_item_struct) - current_block.add_dataset(block_dataset_struct) - else: - new_data_item_struct.block_type = block_type - dataset_items_in_block[ - new_data_item_struct.name] = new_data_item_struct - - # if data item belongs to existing dataset(s) - item_location_found = False - if new_data_item_struct.name in \ - self.dataset_items_needed_dict: - if new_data_item_struct.type == DatumType.record: - # record within a record - create a data set in - # place of the data item - new_data_item_struct = self._new_dataset( - new_data_item_struct, current_block, - dataset_items_in_block, path, - model_file, False) - new_data_item_struct.record_within_record = True - - for dataset in self.dataset_items_needed_dict[ - new_data_item_struct.name]: - item_added = dataset.add_item(new_data_item_struct, - record=True) - item_location_found = item_location_found or \ - item_added - # if data item belongs to an existing keystring - if new_data_item_struct.name in \ - keystring_items_needed_dict: - new_data_item_struct.set_path( - keystring_items_needed_dict[ - new_data_item_struct.name].path) - if new_data_item_struct.type == DatumType.record: - # record within a keystring - create a data set in - # place of the data item - new_data_item_struct = self._new_dataset( - new_data_item_struct, current_block, - dataset_items_in_block, path, - model_file, False) - keystring_items_needed_dict[ - new_data_item_struct.name].keystring_dict[ - new_data_item_struct.name] \ - = new_data_item_struct - item_location_found = True - - if new_data_item_struct.type == DatumType.keystring: - # add keystrings to search list - for key, val in \ - new_data_item_struct.keystring_dict.items(): - keystring_items_needed_dict[ - key] = new_data_item_struct - - # if data set does not exist - if not item_location_found: - self._new_dataset(new_data_item_struct, current_block, - dataset_items_in_block, - path, model_file, True) - if current_block.name.upper() == 'SOLUTIONGROUP' and \ - len(current_block.block_header_structure) == 0: - # solution_group a special case for now - block_data_item_struct = MFDataItemStructure() - block_data_item_struct.name = 'order_num' - block_data_item_struct.data_items = ['order_num'] - block_data_item_struct.type = DatumType.integer - block_data_item_struct.longname = 'order_num' - block_data_item_struct.description = \ - 'internal variable to keep track of ' \ - 'solution group number' - block_dataset_struct = MFDataStructure( - block_data_item_struct, model_file, - self.package_type, self.dfn_list) - block_dataset_struct.parent_block = current_block - block_dataset_struct.set_path( - path + (new_data_item_struct.block_name,)) - block_dataset_struct.add_item( - block_data_item_struct) - current_block.add_dataset(block_dataset_struct) - return block_dict - - def _new_dataset(self, new_data_item_struct, current_block, - dataset_items_in_block, - path, model_file, add_to_block=True): - current_dataset_struct = MFDataStructure(new_data_item_struct, - model_file, self.package_type, - self.dfn_list) - current_dataset_struct.set_path( - path + (new_data_item_struct.block_name,)) - self._process_needed_data_items(current_dataset_struct, - dataset_items_in_block) - if add_to_block: - # add dataset - current_block.add_dataset(current_dataset_struct) - current_dataset_struct.parent_block = current_block - current_dataset_struct.add_item(new_data_item_struct) - return current_dataset_struct - - def _process_needed_data_items(self, current_dataset_struct, - dataset_items_in_block): - # add data items needed to dictionary - for item_name, val in \ - current_dataset_struct.expected_data_items.items(): - if item_name in dataset_items_in_block: - current_dataset_struct.add_item( - dataset_items_in_block[item_name]) - else: - if item_name in self.dataset_items_needed_dict: - self.dataset_items_needed_dict[item_name].append( - current_dataset_struct) - else: - self.dataset_items_needed_dict[item_name] = [ - current_dataset_struct] - - -class DfnFile(Dfn): - """ - Dfn child class that loads dfn information from a package definition (dfn) - file - - Attributes - ---------- - file : str - name of the file to be loaded - - Methods - ------- - multi_package_support : () : bool - returns flag for multi-package support - dict_by_name : {} : dict - returns a dictionary of data item descriptions from the dfn file with - the data item name as the dictionary key - get_block_structure_dict : (path : tuple, common : bool, model_file : - bool) : dict - returns a dictionary of block structure information for the package - - See Also - -------- - - Notes - ----- - - Examples - ---- - """ - - def __init__(self, file): - super(DfnFile, self).__init__() - - dfn_path, tail = os.path.split(os.path.realpath(__file__)) - dfn_path = os.path.join(dfn_path, 'dfn') - self._file_path = os.path.join(dfn_path, file) - self.dfn_file_name = file - self.dfn_type, \ - self.model_type = self._file_type(self.dfn_file_name.replace('-', '')) - self.package_type = os.path.splitext(file[4:])[0] - # the package type is always the text after the last - - package_name = self.package_type.split('-') - self.package_type = package_name[-1] - if not isinstance(package_name, str) and \ - len(package_name) > 1: - self.package_prefix = ''.join(package_name[:-1]) - else: - self.package_prefix = '' - self.file = file - self.dataset_items_needed_dict = {} - self.dfn_list = [] - - def multi_package_support(self): - base_file = os.path.splitext(self.file)[0] - base_file = base_file.replace('-', '') - return base_file in self.multi_package - - def dict_by_name(self): - name_dict = OrderedDict() - name = None - dfn_fp = open(self._file_path, 'r') - for line in dfn_fp: - if self._valid_line(line): - arr_line = line.strip().split() - if arr_line[0] == 'name': - name = arr_line[1] - elif arr_line[0] == 'description' and name is not None: - name_dict[name] = ' '.join(arr_line[1:]) - dfn_fp.close() - return name_dict - - def get_block_structure_dict(self, path, common, model_file): - self.dfn_list = [] - block_dict = OrderedDict() - dataset_items_in_block = {} - self.dataset_items_needed_dict = {} - keystring_items_needed_dict = {} - current_block = None - dfn_fp = open(self._file_path, 'r') - - for line in dfn_fp: - if self._valid_line(line): - # load next data item - new_data_item_struct = MFDataItemStructure() - new_data_item_struct.set_value(line, common) - self.dfn_list.append([line]) - for next_line in dfn_fp: - if self._empty_line(next_line): - break - if self._valid_line(next_line): - new_data_item_struct.set_value(next_line, common) - self.dfn_list[-1].append(next_line) - - # if block does not exist - if current_block is None or \ - current_block.name != new_data_item_struct.block_name: - # create block - current_block = MFBlockStructure( - new_data_item_struct.block_name, path, model_file) - # put block in block_dict - block_dict[current_block.name] = current_block - # init dataset item lookup - self.dataset_items_needed_dict = {} - dataset_items_in_block = {} - - # resolve block type - if len(current_block.block_header_structure) > 0: - if len(current_block.block_header_structure[ - 0].data_item_structures) > 0 and \ - current_block.block_header_structure[ - 0].data_item_structures[0].type == \ - DatumType.integer: - block_type = BlockType.transient - else: - block_type = BlockType.multiple - else: - block_type = BlockType.single - - if new_data_item_struct.block_variable: - block_dataset_struct = MFDataStructure( - new_data_item_struct, model_file, self.package_type, - self.dfn_list) - block_dataset_struct.parent_block = current_block - self._process_needed_data_items(block_dataset_struct, - dataset_items_in_block) - block_dataset_struct.set_path( - path + (new_data_item_struct.block_name,)) - block_dataset_struct.add_item(new_data_item_struct, False, - self.dfn_list) - current_block.add_dataset(block_dataset_struct) - else: - new_data_item_struct.block_type = block_type - dataset_items_in_block[ - new_data_item_struct.name] = new_data_item_struct - - # if data item belongs to existing dataset(s) - item_location_found = False - if new_data_item_struct.name in \ - self.dataset_items_needed_dict: - if new_data_item_struct.type == DatumType.record: - # record within a record - create a data set in - # place of the data item - new_data_item_struct = self._new_dataset( - new_data_item_struct, current_block, - dataset_items_in_block, path, - model_file, False) - new_data_item_struct.record_within_record = True - - for dataset in self.dataset_items_needed_dict[ - new_data_item_struct.name]: - item_added = dataset.add_item(new_data_item_struct, - True, self.dfn_list) - item_location_found = item_location_found or \ - item_added - # if data item belongs to an existing keystring - if new_data_item_struct.name in \ - keystring_items_needed_dict: - new_data_item_struct.set_path( - keystring_items_needed_dict[ - new_data_item_struct.name].path) - if new_data_item_struct.type == DatumType.record: - # record within a keystring - create a data set in - # place of the data item - new_data_item_struct = self._new_dataset( - new_data_item_struct, current_block, - dataset_items_in_block, path, - model_file, False) - keystring_items_needed_dict[ - new_data_item_struct.name].keystring_dict[ - new_data_item_struct.name] \ - = new_data_item_struct - item_location_found = True - - if new_data_item_struct.type == DatumType.keystring: - # add keystrings to search list - for key, val in \ - new_data_item_struct.keystring_dict.items(): - keystring_items_needed_dict[ - key] = new_data_item_struct - - # if data set does not exist - if not item_location_found: - self._new_dataset(new_data_item_struct, current_block, - dataset_items_in_block, - path, model_file, True) - if current_block.name.upper() == 'SOLUTIONGROUP' and \ - len(current_block.block_header_structure) == 0: - # solution_group a special case for now - block_data_item_struct = MFDataItemStructure() - block_data_item_struct.name = 'order_num' - block_data_item_struct.data_items = ['order_num'] - block_data_item_struct.type = DatumType.integer - block_data_item_struct.longname = 'order_num' - block_data_item_struct.description = \ - 'internal variable to keep track of ' \ - 'solution group number' - block_dataset_struct = MFDataStructure( - block_data_item_struct, model_file, - self.package_type, self.dfn_list) - block_dataset_struct.parent_block = current_block - block_dataset_struct.set_path( - path + (new_data_item_struct.block_name,)) - block_dataset_struct.add_item( - block_data_item_struct, False, self.dfn_list) - current_block.add_dataset(block_dataset_struct) - dfn_fp.close() - return block_dict - - def _new_dataset(self, new_data_item_struct, current_block, - dataset_items_in_block, - path, model_file, add_to_block=True): - current_dataset_struct = MFDataStructure(new_data_item_struct, - model_file, self.package_type, - self.dfn_list) - current_dataset_struct.set_path( - path + (new_data_item_struct.block_name,)) - self._process_needed_data_items(current_dataset_struct, - dataset_items_in_block) - if add_to_block: - # add dataset - current_block.add_dataset(current_dataset_struct) - current_dataset_struct.parent_block = current_block - current_dataset_struct.add_item(new_data_item_struct, False, - self.dfn_list) - return current_dataset_struct - - def _process_needed_data_items(self, current_dataset_struct, - dataset_items_in_block): - # add data items needed to dictionary - for item_name, val in \ - current_dataset_struct.expected_data_items.items(): - if item_name in dataset_items_in_block: - current_dataset_struct.add_item( - dataset_items_in_block[item_name], False, self.dfn_list) - else: - if item_name in self.dataset_items_needed_dict: - self.dataset_items_needed_dict[item_name].append( - current_dataset_struct) - else: - self.dataset_items_needed_dict[item_name] = [ - current_dataset_struct] - - def _valid_line(self, line): - if len(line.strip()) > 1 and line[0] != '#': - return True - return False - - def _empty_line(self, line): - if len(line.strip()) <= 1: - return True - return False - - -class DataType(Enum): - """ - Types of data that can be found in a package file - """ - scalar_keyword = 1 - scalar = 2 - array = 3 - array_transient = 4 - list = 5 - list_transient = 6 - list_multiple = 7 - scalar_transient = 8 - scalar_keyword_transient = 9 - - -class DatumType(Enum): - """ - Types of individual pieces of data - """ - keyword = 1 - integer = 2 - double_precision = 3 - string = 4 - constant = 5 - list_defined = 6 - keystring = 7 - record = 8 - repeating_record = 9 - recarray = 10 - - -class BlockType(Enum): - """ - Types of blocks that can be found in a package file - """ - single = 1 - multiple = 2 - transient = 3 - - -class MFDataItemStructure(object): - """ - Defines the structure of a single MF6 data item in a dfn file - - Attributes - ---------- - block_name : str - name of block that data item is in - name : str - name of data item - name_list : list - list of alternate names for the data item, includes data item's main - name "name" - python_name : str - name of data item referenced in python, with illegal python characters - removed - type : str - type of the data item as it appears in the dfn file - type_obj : python type - type of the data item as a python type - valid_values : list - list of valid values for the data item. if empty, this constraint does - not apply - data_items : list - list of data items contained in this data_item, including itself - in_record : bool - in_record attribute as appears in dfn file - tagged : bool - whether data item is tagged. if the data item is tagged its name is - included in the MF6 input file - just_data : bool - when just_data is true only data appears in the MF6 input file. - otherwise, name information appears - shape : list - describes the shape of the data - layer_dims : list - which dimensions in the shape function as layers, if None defaults to - "layer" - reader : basestring - reader that MF6 uses to read the data - optional : bool - whether data item is optional or required as part of the MFData in the - MF6 input file - longname : str - long name of the data item - description : str - description of the data item - path : tuple - a tuple describing the data item's location within the simulation - (,,,) - repeating : bool - whether or not the data item can repeat in the MF6 input file - block_variable : bool - if true, this data item is part of the block header - block_type : BlockType - whether the block containing this item is a single non-repeating block, - a multiple repeating block, or a transient repeating block - keystring_dict : dict - dictionary containing acceptable keystrings if this data item is of - type keystring - is_cellid : bool - true if this data item is definitely of type cellid - possible_cellid : bool - true if this data item may be of type cellid - ucase : bool - this data item must be displayed in upper case in the MF6 input file - - Methods - ------- - remove_cellid : (resolved_shape : list, cellid_size : int) - removes the cellid size from the shape of a data item - set_path : (path : tuple) - sets the path to this data item to path - get_rec_type : () : object type - gets the type of object of this data item to be used in a numpy - recarray - - See Also - -------- - - Notes - ----- - - Examples - -------- - """ - - def __init__(self): - self.file_name_keywords = {'filein':False, 'fileout':False} - self.contained_keywords = {'file_name':True} - self.block_name = None - self.name = None - self.display_name = None - self.name_length = None - self.is_aux = False - self.is_boundname = False - self.is_mname = False - self.name_list = [] - self.python_name = None - self.type = None - self.type_string = None - self.type_obj = None - self.valid_values = [] - self.data_items = None - self.in_record = False - self.tagged = True - self.just_data = False - self.shape = [] - self.layer_dims = ['nlay'] - self.reader = None - self.optional = False - self.longname = None - self.description = '' - self.path = None - self.repeating = False - self.block_variable = False - self.block_type = BlockType.single - self.keystring_dict = {} - self.is_cellid = False - self.possible_cellid = False - self.ucase = False - self.preserve_case = False - self.default_value = None - self.numeric_index = False - self.support_negative_index = False - self.construct_package = None - self.construct_data = None - self.parameter_name = None - self.one_per_pkg = False - self.jagged_array = None - - def set_value(self, line, common): - arr_line = line.strip().split() - if len(arr_line) > 1: - if arr_line[0] == 'block': - self.block_name = ' '.join(arr_line[1:]) - elif arr_line[0] == 'name': - if self.type == DatumType.keyword: - # display keyword names in upper case - self.display_name = ' '.join(arr_line[1:]).upper() - else: - self.display_name = ' '.join(arr_line[1:]).lower() - self.name = ' '.join(arr_line[1:]).lower() - self.name_list.append(self.name) - if len(self.name) >= 6 and self.name[0:6] == 'cellid': - self.is_cellid = True - if self.name and self.name[0:2] == 'id': - self.possible_cellid = True - self.python_name = self.name.replace('-', '_').lower() - # don't allow name to be a python keyword - if keyword.iskeyword(self.name): - self.python_name = '{}_'.format(self.python_name) - # performance optimizations - if self.name == 'aux': - self.is_aux = True - if self.name == 'boundname': - self.is_boundname = True - if self.name[0:5] == 'mname': - self.is_mname = True - self.name_length = len(self.name) - elif arr_line[0] == 'other_names': - arr_names = ' '.join(arr_line[1:]).lower().split(',') - for name in arr_names: - self.name_list.append(name) - elif arr_line[0] == 'type': - if self.support_negative_index: - # type already automatically set when - # support_negative_index flag is set - return - type_line = arr_line[1:] - if len(type_line) <= 0: - raise StructException('Data structure "{}" does not have ' - 'a type specified' - '.'.format(self.name), self.path) - self.type_string = type_line[0].lower() - self.type = self._str_to_enum_type(type_line[0]) - if self.type == DatumType.recarray or \ - self.type == DatumType.record or \ - self.type == DatumType.repeating_record or \ - self.type == DatumType.keystring: - self.data_items = type_line[1:] - if self.type == DatumType.keystring: - for item in self.data_items: - self.keystring_dict[item.lower()] = 0 - else: - self.data_items = [self.name] - self.type_obj = self._get_type() - if self.type == DatumType.keyword: - # display keyword names in upper case - if self.display_name is not None: - self.display_name = self.display_name.upper() - elif arr_line[0] == 'valid': - for value in arr_line[1:]: - self.valid_values.append(value) - elif arr_line[0] == 'in_record': - self.in_record = self._get_boolean_val(arr_line) - elif arr_line[0] == 'tagged': - self.tagged = self._get_boolean_val(arr_line) - elif arr_line[0] == 'just_data': - self.just_data = self._get_boolean_val(arr_line) - elif arr_line[0] == 'shape': - if len(arr_line) > 1: - self.shape = [] - for dimension in arr_line[1:]: - if dimension[-1] != ';': - dimension = dimension.replace('(', '') - dimension = dimension.replace(')', '') - dimension = dimension.replace(',', '') - if dimension[0] == '*': - dimension = dimension.replace('*', '') - # set as a "layer" dimension - self.layer_dims.insert(0, dimension) - self.shape.append(dimension) - else: - # only process what is after the last ; which by - # convention is the most generalized form of the - # shape - self.shape = [] - if len(self.shape) > 0: - self.repeating = True - elif arr_line[0] == 'reader': - self.reader = ' '.join(arr_line[1:]) - elif arr_line[0] == 'optional': - self.optional = self._get_boolean_val(arr_line) - elif arr_line[0] == 'longname': - self.longname = ' '.join(arr_line[1:]) - elif arr_line[0] == 'description': - if arr_line[1] == 'REPLACE': - self.description = self._resolve_common(arr_line, common) - elif len(arr_line) > 1 and arr_line[1].strip(): - self.description = ' '.join(arr_line[1:]) - - # clean self.description - self.description = self.description.replace('``', '"') - self.description = self.description.replace("''", '"') - - # massage latex equations - if '$' in self.description: - descsplit = self.description.split('$') - mylist = [i.replace('\\', '') + ':math:`' + - j.replace('\\', '\\\\') + '`' for i, j in - zip(descsplit[::2], descsplit[1::2])] - mylist.append(descsplit[-1]) - self.description = ''.join(mylist) - else: - self.description = self.description.replace('\\', '') - elif arr_line[0] == 'block_variable': - if len(arr_line) > 1: - self.block_variable = bool(arr_line[1]) - elif arr_line[0] == 'ucase': - if len(arr_line) > 1: - self.ucase = bool(arr_line[1]) - elif arr_line[0] == 'preserve_case': - self.preserve_case = self._get_boolean_val(arr_line) - elif arr_line[0] == 'default_value': - self.default_value = ' '.join(arr_line[1:]) - elif arr_line[0] == 'numeric_index': - self.numeric_index = self._get_boolean_val(arr_line) - elif arr_line[0] == 'support_negative_index': - self.support_negative_index = self._get_boolean_val(arr_line) - # must be double precision to support 0 and -0 - self.type_string = 'double_precision' - self.type = self._str_to_enum_type(self.type_string) - self.type_obj = self._get_type() - elif arr_line[0] == 'construct_package': - self.construct_package = arr_line[1] - elif arr_line[0] == 'construct_data': - self.construct_data = arr_line[1] - elif arr_line[0] == 'parameter_name': - self.parameter_name = arr_line[1] - elif arr_line[0] == 'one_per_pkg': - self.one_per_pkg = bool(arr_line[1]) - elif arr_line[0] == 'jagged_array': - self.jagged_array = arr_line[1] - - def get_type_string(self): - return '[{}]'.format(self.type_string) - - def get_description(self, line_size, initial_indent, level_indent): - item_desc = '* {} ({}) {}'.format(self.name, self.type_string, - self.description) - if self.numeric_index or self.is_cellid: - # append zero-based index text - item_desc = '{} {}'.format(item_desc, - numeric_index_text) - twr = TextWrapper(width=line_size, initial_indent=initial_indent, - drop_whitespace = True, - subsequent_indent=' {}'.format(initial_indent)) - item_desc = '\n'.join(twr.wrap(item_desc)) - return item_desc - - def get_doc_string(self, line_size, initial_indent, level_indent): - description = self.get_description(line_size, - initial_indent + level_indent, - level_indent) - param_doc_string = '{} : {}'.format(self.python_name, - self.get_type_string()) - twr = TextWrapper(width=line_size, initial_indent=initial_indent, - subsequent_indent=' {}'.format(initial_indent), - drop_whitespace=True) - param_doc_string = '\n'.join(twr.wrap(param_doc_string)) - param_doc_string = '{}\n{}'.format(param_doc_string, description) - return param_doc_string - - def get_keystring_desc(self, line_size, initial_indent, level_indent): - if self.type != DatumType.keystring: - raise StructException('Can not get keystring description for "{}" ' - 'because it is not a keystring' - '.'.format(self.name), self.path) - - # get description of keystring elements - description = '' - for key, item in self.keystring_dict.items(): - if description: - description = '{}\n'.format(description) - description = '{}{}'.format(description, - item.get_doc_string(line_size, - initial_indent, - level_indent)) - return description - - def indicates_file_name(self): - if self.name.lower() in self.file_name_keywords: - return True - for key, item in self.contained_keywords.items(): - if self.name.lower().find(key) != -1: - return True - return False - - def is_file_name(self): - if self.name.lower() in self.file_name_keywords and \ - self.file_name_keywords[self.name.lower()] == True: - return True - for key, item in self.contained_keywords.items(): - if self.name.lower().find(key) != -1 and item == True: - return True - return False - - @staticmethod - def remove_cellid(resolved_shape, cellid_size): - # remove the cellid size from the shape - for dimension, index in zip(resolved_shape, - range(0, len(resolved_shape))): - if dimension == cellid_size: - resolved_shape[index] = 1 - break - - @staticmethod - def _get_boolean_val(bool_option_line): - if len(bool_option_line) <= 1: - return False - if bool_option_line[1].lower() == 'true': - return True - return False - - @staticmethod - def _find_close_bracket(arr_line): - for index, word in enumerate(arr_line): - word = word.strip() - if len(word) > 0 and word[-1] == '}': - return index - return None - - @staticmethod - def _resolve_common(arr_line, common): - if common is None: - return arr_line - if not (arr_line[2] in common and len(arr_line) >= 4): - raise StructException('Could not find line "{}" in common dfn' - '.'.format(arr_line)) - close_bracket_loc = MFDataItemStructure._find_close_bracket( - arr_line[2:]) - resolved_str = common[arr_line[2]] - if close_bracket_loc is None: - find_replace_str = ' '.join(arr_line[3:]) - else: - close_bracket_loc += 3 - find_replace_str = ' '.join(arr_line[3:close_bracket_loc]) - find_replace_dict = ast.literal_eval(find_replace_str) - for find_str, replace_str in find_replace_dict.items(): - resolved_str = resolved_str.replace(find_str, replace_str) - # clean up formatting - resolved_str = resolved_str.replace('\\texttt', '') - resolved_str = resolved_str.replace('{', '') - resolved_str = resolved_str.replace('}', '') - - return resolved_str - - def set_path(self, path): - self.path = path + (self.name,) - mfstruct = MFStructure(True) - for dimension in self.shape: - dim_path = path + (dimension,) - if dim_path in mfstruct.dimension_dict: - mfstruct.dimension_dict[dim_path].append(self) - else: - mfstruct.dimension_dict[dim_path] = [self] - - def _get_type(self): - if self.type == DatumType.double_precision: - return float - elif self.type == DatumType.integer: - return int - elif self.type == DatumType.constant: - return bool - elif self.type == DatumType.string: - return str - elif self.type == DatumType.list_defined: - return str - return str - - def _str_to_enum_type(self, type_string): - if type_string.lower() == 'keyword': - return DatumType.keyword - elif type_string.lower() == 'integer': - return DatumType.integer - elif type_string.lower() == 'double_precision' or \ - type_string.lower() == 'double': - return DatumType.double_precision - elif type_string.lower() == 'string': - return DatumType.string - elif type_string.lower() == 'constant': - return DatumType.constant - elif type_string.lower() == 'list-defined': - return DatumType.list_defined - elif type_string.lower() == 'keystring': - return DatumType.keystring - elif type_string.lower() == 'record': - return DatumType.record - elif type_string.lower() == 'recarray': - return DatumType.recarray - elif type_string.lower() == 'repeating_record': - return DatumType.repeating_record - else: - exc_text = 'Data item type "{}" not supported.'.format(type_string) - raise StructException(exc_text, self.path) - - def get_rec_type(self): - item_type = self.type_obj - if item_type == str or self.is_cellid: - return object - return item_type - - -class MFDataStructure(object): - """ - Defines the structure of a single MF6 data item in a dfn file - - Parameters - ---------- - data_item : MFDataItemStructure - base data item associated with this data structure - model_data : bool - whether or not this is part of a model - package_type : str - abbreviated package type - - Attributes - ---------- - type : str - type of the data as it appears in the dfn file - path : tuple - a tuple describing the data's location within the simulation - (,,,) - optional : bool - whether data is optional or required as part of the MFBlock in the MF6 - input file - name : str - name of data item - name_list : list - list of alternate names for the data, includes data item's main name - "name" - python_name : str - name of data referenced in python, with illegal python characters - removed - longname : str - long name of the data - repeating : bool - whether or not the data can repeat in the MF6 input file - layered : bool - whether this data can appear by layer - num_data_items : int - number of data item structures contained in this MFDataStructure, - including itself - record_within_record : bool - true if this MFDataStructure is a record within a container - MFDataStructure - file_data : bool - true if data points to a file - block_type : BlockType - whether the block containing this data is a single non-repeating block, - a multiple repeating block, or a transient repeating block - block_variable : bool - if true, this data is part of the block header - model_data : bool - if true, data is part of a model - num_optional : int - number of optional data items - parent_block : MFBlockStructure - parent block structure object - data_item_structures : list - list of data item structures contained in this MFDataStructure - expected_data_items : dict - dictionary of expected data item names for quick lookup - shape : tuple - shape of first data item - - Methods - ------- - get_keywords : () : list - returns a list of all keywords associated with this data - supports_aux : () : bool - returns true of this data supports aux variables - add_item : (item : MFDataItemStructure, record : bool) - adds a data item to this MFDataStructure - set_path : (path : tuple) - sets the path describing the data's location within the simulation - (,,,) - get_datatype : () : DataType - returns the DataType of this data (array, list, scalar, ...) - get_min_record_entries : () : int - gets the minimum number of entries, as entered in a package file, - for a single record. excludes optional data items - get_record_size : () : int - gets the number of data items, excluding keyword data items, in this - MFDataStructure - all_keywords : () : bool - returns true of all data items are keywords - get_type_string : () : str - returns descriptive string of the data types in this MFDataStructure - get_description : () : str - returns a description of the data - get_type_array : (type_array : list): - builds an array of data type information in type_array - get_datum_type : (numpy_type : bool): - returns the object type of the first data item in this MFDataStructure - with a standard type. if numpy_type is true returns the type as a - numpy type - get_data_item_types: () : list - returns a list of object type for every data item in this - MFDataStructure - first_non_keyword_index : () : int - return the index of the first data item in this MFDataStructure that is - not a keyword - - See Also - -------- - - Notes - ----- - - Examples - -------- - """ - - def __init__(self, data_item, model_data, package_type, dfn_list): - self.type = data_item.type - self.package_type = package_type - self.path = None - self.optional = data_item.optional - self.name = data_item.name - self.block_name = data_item.block_name - self.name_length = len(self.name) - self.is_aux = data_item.is_aux - self.is_boundname = data_item.is_boundname - self.name_list = data_item.name_list - self.python_name = data_item.python_name - self.longname = data_item.longname - self.default_value = data_item.default_value - self.repeating = False - self.layered = ('nlay' in data_item.shape or - 'nodes' in data_item.shape or - len(data_item.layer_dims) > 1) - self.num_data_items = len(data_item.data_items) - self.record_within_record = False - self.file_data = False - self.block_type = data_item.block_type - self.block_variable = data_item.block_variable - self.model_data = model_data - self.num_optional = 0 - self.parent_block = None - self._fpmerge_data_item(data_item, dfn_list) - self.construct_package = data_item.construct_package - self.construct_data = data_item.construct_data - self.parameter_name = data_item.parameter_name - self.one_per_pkg = data_item.one_per_pkg - - # self.data_item_structures_dict = OrderedDict() - self.data_item_structures = [] - self.expected_data_items = OrderedDict() - self.shape = data_item.shape - if self.type == DatumType.recarray or self.type == DatumType.record \ - or self.type == DatumType.repeating_record: - # record expected data for later error checking - for data_item_name in data_item.data_items: - self.expected_data_items[data_item_name] = len( - self.expected_data_items) - else: - self.expected_data_items[data_item.name] = len( - self.expected_data_items) - - @property - def is_mname(self): - for item in self.data_item_structures: - if item.is_mname: - return True - return False - - def get_item(self, item_name): - for item in self.data_item_structures: - if item.name.lower() == item_name.lower(): - return item - return None - - def get_keywords(self): - keywords = [] - if self.type == DatumType.recarray or self.type == DatumType.record \ - or self.type == DatumType.repeating_record: - for data_item_struct in self.data_item_structures: - if data_item_struct.type == DatumType.keyword: - if len(keywords) == 0: - # create first keyword tuple - for name in data_item_struct.name_list: - keywords.append((name,)) - else: - # update all keyword tuples with latest keyword found - new_keywords = [] - for keyword_tuple in keywords: - for name in data_item_struct.name_list: - new_keywords.append(keyword_tuple + (name,)) - if data_item_struct.optional: - keywords = keywords + new_keywords - else: - keywords = new_keywords - elif data_item_struct.type == DatumType.keystring: - for keyword_item in data_item_struct.data_items: - keywords.append((keyword_item,)) - elif len(keywords) == 0: - if len(data_item_struct.valid_values) > 0: - new_keywords = [] - # loop through all valid values and append to the end - # of each keyword tuple - for valid_value in data_item_struct.valid_values: - if len(keywords) == 0: - new_keywords.append((valid_value,)) - else: - for keyword_tuple in keywords: - new_keywords.append( - keyword_tuple + (valid_value,)) - keywords = new_keywords - else: - for name in data_item_struct.name_list: - keywords.append((name,)) - else: - for name in self.name_list: - keywords.append((name,)) - return keywords - - def supports_aux(self): - for data_item_struct in self.data_item_structures: - if data_item_struct.name.lower() == 'aux': - return True - return False - - def add_item(self, item, record=False, dfn_list=None): - item_added = False - if item.type != DatumType.recarray and \ - ((item.type != DatumType.record and - item.type != DatumType.repeating_record) or - record == True): - if item.name not in self.expected_data_items: - raise StructException('Could not find data item "{}" in ' - 'expected data items of data structure ' - '{}.'.format(item.name, self.name), - self.path) - item.set_path(self.path) - if len(self.data_item_structures) == 0: - self.keyword = item.name - # insert data item into correct location in array - location = self.expected_data_items[item.name] - if len(self.data_item_structures) > location: - # TODO: ask about this condition and remove - if self.data_item_structures[location] is None: - # verify that this is not a placeholder value - if self.data_item_structures[location] is not None: - raise StructException('Data structure "{}" already ' - 'has the item named "{}"' - '.'.format(self.name, - item.name), - self.path) - if isinstance(item, MFDataItemStructure): - self.file_data = self.file_data or \ - item.indicates_file_name() - # replace placeholder value - self.data_item_structures[location] = item - item_added = True - else: - for index in range(0, - location - len(self.data_item_structures)): - # insert placeholder in array - self.data_item_structures.append(None) - if isinstance(item, MFDataItemStructure): - self.file_data = self.file_data or \ - item.indicates_file_name() - self.data_item_structures.append(item) - item_added = True - self.optional = self.optional and item.optional - if item.optional: - self.num_optional += 1 - if item_added: - self._fpmerge_data_item(item, dfn_list) - return item_added - - def _fpmerge_data_item(self, item, dfn_list): - mfstruct = MFStructure() - # check for flopy-specific dfn data - if item.name.lower() in mfstruct.flopy_dict: - # read flopy-specific dfn data - for name, value in mfstruct.flopy_dict[item.name.lower()].items(): - line = '{} {}'.format(name, value) - item.set_value(line, None) - if dfn_list is not None: - dfn_list[-1].append(line) - - def set_path(self, path): - self.path = path + (self.name,) - - def get_datatype(self): - if self.type == DatumType.recarray: - if self.block_type != BlockType.single and not self.block_variable: - if self.block_type == BlockType.transient: - return DataType.list_transient - else: - return DataType.list_multiple - else: - return DataType.list - if self.type == DatumType.record or self.type == \ - DatumType.repeating_record: - record_size, repeating_data_item = self.get_record_size() - if (record_size >= 1 and not self.all_keywords()) or \ - repeating_data_item: - if self.block_type != BlockType.single and \ - not self.block_variable: - if self.block_type == BlockType.transient: - return DataType.list_transient - else: - return DataType.list_multiple - else: - return DataType.list - else: - if self.block_type != BlockType.single and \ - not self.block_variable: - return DataType.scalar_transient - else: - return DataType.scalar - elif len(self.data_item_structures) > 0 and \ - self.data_item_structures[0].repeating: - if self.data_item_structures[0].type == DatumType.string: - return DataType.list - else: - if self.block_type == BlockType.single: - return DataType.array - else: - return DataType.array_transient - elif len(self.data_item_structures) > 0 and \ - self.data_item_structures[0].type == DatumType.keyword: - if self.block_type != BlockType.single and not self.block_variable: - return DataType.scalar_keyword_transient - else: - return DataType.scalar_keyword - else: - if self.block_type != BlockType.single and not self.block_variable: - return DataType.scalar_transient - else: - return DataType.scalar - - def is_mult_or_trans(self): - data_type = self.get_datatype() - if data_type == DataType.scalar_keyword_transient or \ - data_type == DataType.array_transient or \ - data_type == DataType.list_transient or \ - data_type == DataType.list_multiple: - return True - return False - - def get_min_record_entries(self): - count = 0 - for data_item_structure in self.data_item_structures: - if not data_item_structure.optional: - if data_item_structure.type == DatumType.record: - count += data_item_structure.get_record_size()[0] - else: - if data_item_structure.type != DatumType.keyword: - count += 1 - return count - - def get_record_size(self): - count = 0 - repeating = False - for data_item_structure in self.data_item_structures: - if data_item_structure.type == DatumType.record: - count += data_item_structure.get_record_size()[0] - else: - if data_item_structure.type != DatumType.keyword or \ - count > 0: - if data_item_structure.repeating: - # count repeats as one extra record - repeating = True - count += 1 - return count, repeating - - def all_keywords(self): - for data_item_structure in self.data_item_structures: - if data_item_structure.type == DatumType.record: - if not data_item_structure.all_keywords(): - return False - else: - if data_item_structure.type != DatumType.keyword: - return False - return True - - def get_type_string(self): - type_array = [] - self.get_docstring_type_array(type_array) - type_string = ', '.join(type_array) - type_header = '' - type_footer = '' - if len(self.data_item_structures) > 1 or \ - self.data_item_structures[ - 0].repeating: - type_header = '[' - type_footer = ']' - if self.repeating: - type_footer = '] ... [{}]'.format(type_string) - - return '{}{}{}'.format(type_header, type_string, type_footer) - - def get_docstring_type_array(self, type_array): - for index, item in enumerate(self.data_item_structures): - if item.type == DatumType.record: - item.get_docstring_type_array(type_array) - else: - if self.display_item(index): - if self.type == DatumType.recarray or self.type == \ - DatumType.record or \ - self.type == DatumType.repeating_record: - type_array.append('{}'.format(item.name)) - else: - type_array.append('{}'.format( - self._resolve_item_type(item))) - - def get_description(self, line_size=79, initial_indent=' ', - level_indent=' '): - type_array = [] - self.get_type_array(type_array) - description = '' - for datastr, index, itype in type_array: - item = datastr.data_item_structures[index] - if item is None: - continue - if item.type == DatumType.record: - item_desc = item.get_description(line_size, - initial_indent + level_indent, - level_indent) - description = '{}\n{}'.format(description, item_desc) - elif datastr.display_item(index): - if len(description.strip()) > 0: - description = '{}\n'.format(description) - item_desc = item.description - if item.numeric_index or item.is_cellid: - # append zero-based index text - item_desc = '{} {}'.format(item_desc, - numeric_index_text) - - item_desc = '* {} ({}) {}'.format(item.name, itype, - item_desc) - twr = TextWrapper(width=line_size, - initial_indent=initial_indent, - subsequent_indent=' {}'.format( - initial_indent)) - item_desc = '\n'.join(twr.wrap(item_desc)) - description = '{}{}'.format(description, item_desc) - if item.type == DatumType.keystring: - keystr_desc = item.get_keystring_desc(line_size, - initial_indent + - level_indent, - level_indent) - description = '{}\n{}'.format(description, - keystr_desc) - return description - - def get_subpackage_description(self, line_size=79, - initial_indent=' ', - level_indent=' '): - item_desc = '* Contains data for the {} package. Data can be ' \ - 'stored in a dictionary containing data for the {} ' \ - 'package with variable names as keys and package data as ' \ - 'values. Data just for the {} variable is also ' \ - 'acceptable. See {} package documentation for more ' \ - 'information' \ - '.'.format(self.construct_package, - self.construct_package, - self.parameter_name, - self.construct_package) - twr = TextWrapper(width=line_size, - initial_indent=initial_indent, - subsequent_indent=' {}'.format( - initial_indent)) - return '\n'.join(twr.wrap(item_desc)) - - def get_doc_string(self, line_size=79, initial_indent=' ', - level_indent=' '): - if self.parameter_name is not None: - description = self.get_subpackage_description( - line_size, initial_indent + level_indent, level_indent) - var_name = self.parameter_name - type_name = '{}varname:data{} or {} data'.format( - '{', '}', self.construct_data) - else: - description = self.get_description(line_size, - initial_indent + level_indent, - level_indent) - var_name = self.python_name - type_name = self.get_type_string() - - param_doc_string = '{} : {}'.format(var_name, type_name) - twr = TextWrapper(width=line_size, initial_indent=initial_indent, - subsequent_indent=' {}'.format(initial_indent)) - param_doc_string = '\n'.join(twr.wrap(param_doc_string)) - param_doc_string = '{}\n{}'.format(param_doc_string, description) - return param_doc_string - - def get_type_array(self, type_array): - for index, item in enumerate(self.data_item_structures): - if item.type == DatumType.record: - item.get_type_array(type_array) - else: - if self.display_item(index): - type_array.append((self, index,'{}'.format( - self._resolve_item_type(item)))) - - def _resolve_item_type(self, item): - item_type = item.type_string - first_nk_idx = self.first_non_keyword_index() - # single keyword is type boolean - if item_type == 'keyword' and \ - len(self.data_item_structures) == 1: - item_type = 'boolean' - if item.is_cellid: - item_type = '(integer, ...)' - # two keywords - if len(self.data_item_structures) == 2 and \ - first_nk_idx is None: - # keyword type is string - item_type = 'string' - return item_type - - def display_item(self, item_num): - item = self.data_item_structures[item_num] - first_nk_idx = self.first_non_keyword_index() - # all keywords excluded if there is a non-keyword - if not (item.type == DatumType.keyword and first_nk_idx is not None): - # ignore first keyword if there are two keywords - if len(self.data_item_structures) == 2 and first_nk_idx is None \ - and item_num == 0: - return False - return True - return False - - def get_datum_type(self, numpy_type=False, return_enum_type=False): - data_item_types = self.get_data_item_types() - for var_type in data_item_types: - if var_type[0] == DatumType.double_precision or var_type[0] == \ - DatumType.integer or var_type[0] == DatumType.string: - if return_enum_type: - return var_type[0] - else: - if numpy_type: - if var_type[0] == DatumType.double_precision: - return np.float64 - elif var_type[0] == DatumType.integer: - return np.int32 - else: - return np.object - else: - return var_type[2] - return None - - def get_data_item_types(self): - data_item_types = [] - for data_item in self.data_item_structures: - if data_item.type == DatumType.record: - # record within a record - data_item_types += data_item.get_data_item_types() - else: - data_item_types.append([data_item.type, - data_item.type_string, - data_item.type_obj]) - return data_item_types - - def first_non_keyword_index(self): - for data_item, index in zip(self.data_item_structures, - range(0, len(self.data_item_structures))): - if data_item.type != DatumType.keyword: - return index - return None - - def get_model(self): - if self.model_data: - if len(self.path) >= 1: - return self.path[0] - return None - - def get_package(self): - if self.model_data: - if len(self.path) >= 2: - return self.path[1] - else: - if len(self.path) >= 1: - return self.path[0] - return '' - - -class MFBlockStructure(object): - """ - Defines the structure of a MF6 block. - - - Parameters - ---------- - name : string - block name - path : tuple - tuple that describes location of block within simulation - (, , ) - model_block : bool - true if this block is part of a model - - Attributes - ---------- - name : string - block name - path : tuple - tuple that describes location of block within simulation - (, , ) - model_block : bool - true if this block is part of a model - data_structures : OrderedDict - dictionary of data items in this block, with the data item name as - the key - block_header_structure : list - list of data items that are part of this block's "header" - - Methods - ------- - repeating() : bool - Returns true if more than one instance of this block can appear in a - MF6 package file - add_dataset(dataset : MFDataStructure, block_header_dataset : bool) - Adds dataset to this block, as a header dataset of block_header_dataset - is true - number_non_optional_data() : int - Returns the number of non-optional non-header data structures in - this block - number_non_optional_block_header_data() : int - Returns the number of non-optional block header data structures in - this block - get_data_structure(path : tuple) : MFDataStructure - Returns the data structure in this block with name defined by path[0]. - If name does not exist, returns None. - get_all_recarrays() : list - Returns all data non-header data structures in this block that are of - type recarray - - See Also - -------- - - Notes - ----- - - Examples - -------- - - - """ - - def __init__(self, name, path, model_block): - # initialize - self.data_structures = OrderedDict() - self.block_header_structure = [] - self.name = name - self.path = path + (self.name,) - self.model_block = model_block - - def repeating(self): - if len(self.block_header_structure) > 0: - return True - return False - - def add_dataset(self, dataset): - dataset.set_path(self.path) - if dataset.block_variable: - self.block_header_structure.append(dataset) - else: - self.data_structures[dataset.name] = dataset - - def number_non_optional_data(self): - num = 0 - for key, data_structure in self.data_structures.items(): - if not data_structure.optional: - num += 1 - return num - - def number_non_optional_block_header_data(self): - if len(self.block_header_structure) > 0 and not \ - self.block_header_structure[0].optional: - return 1 - else: - return 0 - - def get_data_structure(self, path): - if path[0] in self.data_structures: - return self.data_structures[path[0]] - else: - return None - - def get_all_recarrays(self): - recarray_list = [] - for ds_key, item in self.data_structures.items(): - if item.type == DatumType.recarray: - recarray_list.append(item) - return recarray_list - - -class MFInputFileStructure(object): - """ - MODFLOW Input File Stucture class. Loads file - structure information for individual input file - types. - - - Parameters - ---------- - dfn_file : string - the definition file used to define the structure of this input file - path : tuple - path defining the location of the container of this input file - structure within the overall simulation structure - common : bool - is this the common dfn file - model_file : bool - this file belongs to a specific model type - - Attributes - ---------- - valid : boolean - simulation structure validity - path : tuple - path defining the location of this input file structure within the - overall simulation structure - read_as_arrays : bool - if this input file structure is the READASARRAYS version of a package - - Methods - ------- - is_valid() : bool - Checks all structures objects within the file for validity - get_data_structure(path : string) - Returns a data structure of it exists, otherwise returns None. Data - structure type returned is based on the tuple/list "path" - - See Also - -------- - - Notes - ----- - - Examples - -------- - - """ - - def __init__(self, dfn_file, path, common, model_file): - # initialize - self.valid = True - self.file_type = dfn_file.package_type - self.file_prefix = dfn_file.package_prefix - self.dfn_type = dfn_file.dfn_type - self.dfn_file_name = dfn_file.dfn_file_name - self.description = '' - self.path = path + (self.file_type,) - self.model_file = model_file # file belongs to a specific model - self.read_as_arrays = False - - self.multi_package_support = dfn_file.multi_package_support() - self.blocks = dfn_file.get_block_structure_dict(self.path, common, - model_file) - self.dfn_list = dfn_file.dfn_list - - def is_valid(self): - valid = True - for block in self.blocks: - valid = valid and block.is_valid() - return valid - - def get_data_structure(self, path): - if isinstance(path, tuple) or isinstance(path, list): - if path[0] in self.blocks: - return self.blocks[path[0]].get_data_structure(path[1:]) - else: - return None - else: - for block in self.blocks: - if path in block.data_structures: - return block.data_structures[path] - return None - - -class MFModelStructure(object): - """ - Defines the structure of a MF6 model and its packages - - Parameters - ---------- - model_type : string - abbreviation of model type - - Attributes - ---------- - valid : boolean - simulation structure validity - name_file_struct_obj : MFInputFileStructure - describes the structure of the simulation name file - package_struct_objs : OrderedDict - describes the structure of the simulation packages - model_type : string - dictionary containing simulation package structure - - Methods - ------- - add_namefile : (dfn_file : DfnFile, model_file=True : bool) - Adds a namefile structure object to the model - add_package(dfn_file : DfnFile, model_file=True : bool) - Adds a package structure object to the model - is_valid() : bool - Checks all structures objects within the model for validity - get_data_structure(path : string) - Returns a data structure of it exists, otherwise returns None. Data - structure type returned is based on the tuple/list "path" - - See Also - -------- - - Notes - ----- - - Examples - -------- - """ - - def __init__(self, model_type, utl_struct_objs): - # add name file structure - self.model_type = model_type - self.name_file_struct_obj = None - self.package_struct_objs = OrderedDict() - self.utl_struct_objs = utl_struct_objs - - def add_namefile(self, dfn_file, common): - self.name_file_struct_obj = MFInputFileStructure(dfn_file, - (self.model_type,), - common, True) - - def add_package(self, dfn_file, common): - self.package_struct_objs[dfn_file.package_type] = MFInputFileStructure( - dfn_file, (self.model_type,), common, True) - - def get_package_struct(self, package_type): - if package_type in self.package_struct_objs: - return self.package_struct_objs[package_type] - elif package_type in self.utl_struct_objs: - return self.utl_struct_objs[package_type] - else: - return None - - def is_valid(self): - valid = True - for package_struct in self.package_struct_objs: - valid = valid and package_struct.is_valid() - return valid - - def get_data_structure(self, path): - if path[0] in self.package_struct_objs: - if len(path) > 1: - return self.package_struct_objs[path[0]].get_data_structure( - path[1:]) - else: - return self.package_struct_objs[path[0]] - elif path[0] == 'nam': - if len(path) > 1: - return self.name_file_struct_obj.get_data_structure(path[1:]) - else: - return self.name_file_struct_obj - else: - return None - - -class MFSimulationStructure(object): - """ - Defines the structure of a MF6 simulation and its packages - and models. - - Parameters - ---------- - - Attributes - ---------- - name_file_struct_obj : MFInputFileStructure - describes the structure of the simulation name file - package_struct_objs : OrderedDict - describes the structure of the simulation packages - model_struct_objs : OrderedDict - describes the structure of the supported model types - utl_struct_objs : OrderedDict - describes the structure of the supported utility packages - common : OrderedDict - common file information - model_type : string - placeholder - - Methods - ------- - process_dfn : (dfn_file : DfnFile) - reads in the contents of a dfn file, storing that contents in the - appropriate object - add_namefile : (dfn_file : DfnFile, model_file=True : bool) - Adds a namefile structure object to the simulation - add_util : (dfn_file : DfnFile) - Adds a utility package structure object to the simulation - add_package(dfn_file : DfnFile, model_file=True : bool) - Adds a package structure object to the simulation - store_common(dfn_file : DfnFile) - Stores the contents of the common dfn file - add_model(model_type : string) - Adds a model structure object to the simulation - is_valid() : bool - Checks all structures objects within the simulation for validity - get_data_structure(path : string) - Returns a data structure of it exists, otherwise returns None. Data - structure type returned is based on the tuple/list "path" - tag_read_as_arrays - Searches through all packages and tags any packages with a name that - indicates they are the READASARRAYS version of a package. - - See Also - -------- - - Notes - ----- - - Examples - -------- - """ - - def __init__(self): - # initialize - self.name_file_struct_obj = None - self.package_struct_objs = OrderedDict() - self.utl_struct_objs = OrderedDict() - self.model_struct_objs = OrderedDict() - self.common = None - self.model_type = '' - - @property - def model_types(self): - model_type_list = [] - for model in self.model_struct_objs.values(): - model_type_list.append(model.model_type[:-1]) - return model_type_list - - def process_dfn(self, dfn_file): - if dfn_file.dfn_type == DfnType.common: - self.store_common(dfn_file) - elif dfn_file.dfn_type == DfnType.sim_name_file: - self.add_namefile(dfn_file, False) - elif dfn_file.dfn_type == DfnType.sim_tdis_file or \ - dfn_file.dfn_type == DfnType.exch_file or \ - dfn_file.dfn_type == DfnType.ims_file: - self.add_package(dfn_file, False) - elif dfn_file.dfn_type == DfnType.utl: - self.add_util(dfn_file) - elif dfn_file.dfn_type == DfnType.model_file or \ - dfn_file.dfn_type == DfnType.model_name_file or \ - dfn_file.dfn_type == DfnType.gnc_file or \ - dfn_file.dfn_type == DfnType.mvr_file: - model_ver = '{}{}'.format(dfn_file.model_type, - MFStructure(True).get_version_string()) - if model_ver not in self.model_struct_objs: - self.add_model(model_ver) - if dfn_file.dfn_type == DfnType.model_file: - self.model_struct_objs[model_ver].add_package(dfn_file, - self.common) - elif dfn_file.dfn_type == DfnType.gnc_file or \ - dfn_file.dfn_type == DfnType.mvr_file: - # gnc and mvr files belong both on the simulation and model - # level - self.model_struct_objs[model_ver].add_package(dfn_file, - self.common) - self.add_package(dfn_file, False) - else: - self.model_struct_objs[model_ver].add_namefile(dfn_file, - self.common) - - def add_namefile(self, dfn_file, model_file=True): - self.name_file_struct_obj = MFInputFileStructure(dfn_file, (), - self.common, - model_file) - - def add_util(self, dfn_file): - self.utl_struct_objs[dfn_file.package_type] = MFInputFileStructure( - dfn_file, (), self.common, True) - - def add_package(self, dfn_file, model_file=True): - self.package_struct_objs[dfn_file.package_type] = MFInputFileStructure( - dfn_file, (), self.common, model_file) - - def store_common(self, dfn_file): - # store common stuff - self.common = dfn_file.dict_by_name() - - def add_model(self, model_type): - self.model_struct_objs[model_type] = MFModelStructure( - model_type, self.utl_struct_objs) - - def is_valid(self): - valid = True - for package_struct in self.package_struct_objs: - valid = valid and package_struct.is_valid() - for model_struct in self.model_struct_objs: - valid = valid and model_struct.is_valid() - return valid - - def get_data_structure(self, path): - if path[0] in self.package_struct_objs: - if len(path) > 1: - return self.package_struct_objs[path[0]].get_data_structure( - path[1:]) - else: - return self.package_struct_objs[path[0]] - elif path[0] in self.model_struct_objs: - if len(path) > 1: - return self.model_struct_objs[path[0]].get_data_structure( - path[1:]) - else: - return self.model_struct_objs[path[0]] - elif path[0] in self.utl_struct_objs: - if len(path) > 1: - return self.utl_struct_objs[path[0]].get_data_structure( - path[1:]) - else: - return self.utl_struct_objs[path[0]] - elif path[0] == 'nam': - if len(path) > 1: - return self.name_file_struct_obj.get_data_structure(path[1:]) - else: - return self.name_file_struct_obj - else: - return None - - def tag_read_as_arrays(self): - for key, package_struct in self.package_struct_objs.items(): - if key[0:-1] in self.package_struct_objs and key[-1] == 'a': - package_struct.read_as_arrays = True - for model_key, model_struct in self.model_struct_objs.items(): - for key, package_struct in \ - model_struct.package_struct_objs.items(): - if key[0:-1] in model_struct.package_struct_objs and \ - key[-1] == 'a': - package_struct.read_as_arrays = True - - -class MFStructure(object): - """ - Singleton class for accessing the contents of the json structure file - (only one instance of this class can exist, which loads the json file on - initialization) - - Parameters - ---------- - mf_version : int - version of MODFLOW - valid : bool - whether the structure information loaded from the dfn files is valid - sim_struct : MFSimulationStructure - Object containing file structure for all simulation files - dimension_dict : dict - Dictionary mapping paths to dimension information to the dataitem whose - dimension information is being described - """ - _instance = None - - def __new__(cls, internal_request=False, load_from_dfn_files=False): - if cls._instance is None: - cls._instance = super(MFStructure, cls).__new__(cls) - - # Initialize variables - cls._instance.mf_version = 6 - cls._instance.valid = True - cls._instance.sim_struct = None - cls._instance.dimension_dict = {} - cls._instance.load_from_dfn_files = load_from_dfn_files - cls._instance.flopy_dict = {} - - # Read metadata from file - cls._instance.valid = cls._instance.__load_structure() - elif not cls._instance.valid and not internal_request: - if cls._instance.__load_structure(): - cls._instance.valid = True - - return cls._instance - - def get_version_string(self): - return format(str(self.mf_version)) - - def __load_structure(self): - # set up structure classes - self.sim_struct = MFSimulationStructure() - - if self.load_from_dfn_files: - mf_dfn = Dfn() - dfn_files = mf_dfn.get_file_list() - - # load flopy-specific settings - self.__load_flopy() - - # get common - common_dfn = DfnFile('common.dfn') - self.sim_struct.process_dfn(common_dfn) - - # process each file - for file in dfn_files: - self.sim_struct.process_dfn(DfnFile(file)) - self.sim_struct.tag_read_as_arrays() - else: - package_list = PackageContainer.package_factory(None, None) - for package in package_list: - self.sim_struct.process_dfn(DfnPackage(package)) - self.sim_struct.tag_read_as_arrays() - - return True - - def __load_flopy(self): - current_variable = None - var_info = {} - dfn_path, tail = os.path.split(os.path.realpath(__file__)) - flopy_path = os.path.join(dfn_path, 'dfn', 'flopy.dfn') - dfn_fp = open(flopy_path, 'r') - for line in dfn_fp: - if self.__valid_line(line): - lst_line = line.strip().split() - if lst_line[0].lower() == 'name': - # store current variable - self.flopy_dict[current_variable] = var_info - # reset var_info dict - var_info = {} - current_variable = lst_line[1].lower() - else: - var_info[lst_line[0].lower()] = lst_line[1].lower() - # store last variable - self.flopy_dict[current_variable] = var_info - - @staticmethod - def __valid_line(line): - if len(line.strip()) > 1 and line[0] != '#': - return True +""" +mfstructure module. Contains classes related to package structure + + +""" +import os +import traceback +import ast +import keyword +from enum import Enum +from textwrap import TextWrapper +from collections import OrderedDict +import numpy as np +from ..mfbase import PackageContainer, StructException + + +numeric_index_text = 'This argument is an index variable, which means that ' \ + 'it should be treated as zero-based when working with ' \ + 'FloPy and Python. Flopy will automatically subtract ' \ + 'one when loading index variables and add one when ' \ + 'writing index variables.' + + +class DfnType(Enum): + common = 1 + sim_name_file = 2 + sim_tdis_file = 3 + ims_file = 4 + exch_file = 5 + model_name_file = 6 + model_file = 7 + gnc_file = 8 + mvr_file = 9 + utl = 10 + unknown = 999 + + +class Dfn(object): + """ + Base class for package file definitions + + Attributes + ---------- + dfndir : path + folder containing package definition files (dfn) + common : path + file containing common information + multi_package : dict + contains the names of all packages that are allowed to have multiple + instances in a model/simulation + + Methods + ------- + get_file_list : () : list + returns all of the dfn files found in dfndir. files are returned in + a specified order defined in the local variable file_order + + See Also + -------- + + Notes + ----- + + Examples + ---- + """ + + def __init__(self): + # directories + self.dfndir = os.path.join('.', 'dfn') + self.common = os.path.join(self.dfndir, 'common.dfn') + # FIX: Transport - multi packages are hard coded + self.multi_package = {'exggwfgwf': 0, 'gwfchd': 0, 'gwfwel': 0, + 'gwfdrn': 0, 'gwfriv': 0, 'gwfghb': 0, + 'gwfrch': 0, 'gwfevt': 0, 'gwfmaw': 0, + 'gwfsfr': 0, 'gwflak': 0, 'gwfuzf': 0, + 'lnfcgeo': 0, 'lnfrgeo': 0, 'lnfngeo': 0, + 'utlobs': 0, 'utlts': 0, 'utltas': 0} + + def get_file_list(self): + file_order = ['sim-nam', # dfn completed tex updated + 'sim-tdis', # dfn completed tex updated + 'exg-gwfgwf', # dfn completed tex updated + 'sln-ims', # dfn completed tex updated + 'gwf-nam', # dfn completed tex updated + 'gwf-dis', # dfn completed tex updated + 'gwf-disv', # dfn completed tex updated + 'gwf-disu', # dfn completed tex updated + 'lnf-disl', # dfn completed tex updated + 'gwf-ic', # dfn completed tex updated + 'gwf-npf', # dfn completed tex updated + 'gwf-sto', # dfn completed tex updated + 'gwf-hfb', # dfn completed tex updated + 'gwf-chd', # dfn completed tex updated + 'gwf-wel', # dfn completed tex updated + 'gwf-drn', # dfn completed tex updated + 'gwf-riv', # dfn completed tex updated + 'gwf-ghb', # dfn completed tex updated + 'gwf-rch', # dfn completed tex updated + 'gwf-rcha', # dfn completed tex updated + 'gwf-evt', # dfn completed tex updated + 'gwf-evta', # dfn completed tex updated + 'gwf-maw', # dfn completed tex updated + 'gwf-sfr', # dfn completed tex updated + 'gwf-lak', # dfn completed tex updated + 'gwf-uzf', # dfn completed tex updated + 'gwf-mvr', # dfn completed tex updated + 'gwf-gnc', # dfn completed tex updated + 'gwf-oc', # dfn completed tex updated + 'utl-obs', + 'utl-ts', + 'utl-tab', + 'utl-tas'] + + dfn_path, tail = os.path.split(os.path.realpath(__file__)) + dfn_path = os.path.join(dfn_path, 'dfn') + # construct list of dfn files to process in the order of file_order + files = os.listdir(dfn_path) + for f in files: + if 'common' in f or 'flopy' in f: + continue + package_abbr = os.path.splitext(f)[0] + if package_abbr not in file_order: + file_order.append(package_abbr) + return [fname + '.dfn' for fname in file_order if + fname + '.dfn' in files] + + def _file_type(self, file_name): + # determine file type + if len(file_name) >= 6 and file_name[0:6] == 'common': + return DfnType.common, None + elif file_name[0:3] == 'sim': + if file_name[3:6] == 'nam': + return DfnType.sim_name_file, None + elif file_name[3:7] == 'tdis': + return DfnType.sim_tdis_file, None + else: + return DfnType.unknown, None + elif file_name[0:3] == 'nam': + return DfnType.sim_name_file, None + elif file_name[0:4] == 'tdis': + return DfnType.sim_tdis_file, None + elif file_name[0:3] == 'sln' or file_name[0:3] == 'ims': + return DfnType.ims_file, None + elif file_name[0:3] == 'exg': + return DfnType.exch_file, file_name[3:6] + elif file_name[0:3] == 'utl': + return DfnType.utl, None + else: + model_type = file_name[0:3] + if file_name[3:6] == 'nam': + return DfnType.model_name_file, model_type + elif file_name[3:6] == 'gnc': + return DfnType.gnc_file, model_type + elif file_name[3:6] == 'mvr': + return DfnType.mvr_file, model_type + else: + return DfnType.model_file, model_type + + +class DfnPackage(Dfn): + """ + Dfn child class that loads dfn information from a list structure stored + in the auto-built package classes + + Attributes + ---------- + package : MFPackage + MFPackage subclass that contains dfn information + + Methods + ------- + multi_package_support : () : bool + returns flag for multi-package support + get_block_structure_dict : (path : tuple, common : bool, model_file : + bool) : dict + returns a dictionary of block structure information for the package + + See Also + -------- + + Notes + ----- + + Examples + ---- + """ + + def __init__(self, package): + super(DfnPackage, self).__init__() + self.package = package + self.package_type = package._package_type + self.dfn_file_name = package.dfn_file_name + # the package type is always the text after the last - + package_name = self.package_type.split('-') + self.package_type = package_name[-1] + if not isinstance(package_name, str) and \ + len(package_name) > 1: + self.package_prefix = ''.join(package_name[:-1]) + else: + self.package_prefix = '' + self.dfn_type, \ + self.model_type = self._file_type(self.dfn_file_name.replace('-', '')) + self.dfn_list = package.dfn + + def multi_package_support(self): + return self.package.package_abbr in self.multi_package + + def get_block_structure_dict(self, path, common, model_file): + block_dict = OrderedDict() + dataset_items_in_block = {} + self.dataset_items_needed_dict = {} + keystring_items_needed_dict = {} + current_block = None + + for dfn_entry in self.dfn_list: + # load next data item + new_data_item_struct = MFDataItemStructure() + for next_line in dfn_entry: + new_data_item_struct.set_value(next_line, common) + # if block does not exist + if current_block is None or \ + current_block.name != new_data_item_struct.block_name: + # create block + current_block = MFBlockStructure( + new_data_item_struct.block_name, path, model_file) + # put block in block_dict + block_dict[current_block.name] = current_block + # init dataset item lookup + self.dataset_items_needed_dict = {} + dataset_items_in_block = {} + + # resolve block type + if len(current_block.block_header_structure) > 0: + if len(current_block.block_header_structure[ + 0].data_item_structures) > 0 and \ + current_block.block_header_structure[ + 0].data_item_structures[ + 0].type == DatumType.integer: + block_type = BlockType.transient + else: + block_type = BlockType.multiple + else: + block_type = BlockType.single + + if new_data_item_struct.block_variable: + block_dataset_struct = MFDataStructure( + new_data_item_struct, model_file, self.package_type, + self.dfn_list) + block_dataset_struct.parent_block = current_block + self._process_needed_data_items(block_dataset_struct, + dataset_items_in_block) + block_dataset_struct.set_path( + path + (new_data_item_struct.block_name,)) + block_dataset_struct.add_item(new_data_item_struct) + current_block.add_dataset(block_dataset_struct) + else: + new_data_item_struct.block_type = block_type + dataset_items_in_block[ + new_data_item_struct.name] = new_data_item_struct + + # if data item belongs to existing dataset(s) + item_location_found = False + if new_data_item_struct.name in \ + self.dataset_items_needed_dict: + if new_data_item_struct.type == DatumType.record: + # record within a record - create a data set in + # place of the data item + new_data_item_struct = self._new_dataset( + new_data_item_struct, current_block, + dataset_items_in_block, path, + model_file, False) + new_data_item_struct.record_within_record = True + + for dataset in self.dataset_items_needed_dict[ + new_data_item_struct.name]: + item_added = dataset.add_item(new_data_item_struct, + record=True) + item_location_found = item_location_found or \ + item_added + # if data item belongs to an existing keystring + if new_data_item_struct.name in \ + keystring_items_needed_dict: + new_data_item_struct.set_path( + keystring_items_needed_dict[ + new_data_item_struct.name].path) + if new_data_item_struct.type == DatumType.record: + # record within a keystring - create a data set in + # place of the data item + new_data_item_struct = self._new_dataset( + new_data_item_struct, current_block, + dataset_items_in_block, path, + model_file, False) + keystring_items_needed_dict[ + new_data_item_struct.name].keystring_dict[ + new_data_item_struct.name] \ + = new_data_item_struct + item_location_found = True + + if new_data_item_struct.type == DatumType.keystring: + # add keystrings to search list + for key, val in \ + new_data_item_struct.keystring_dict.items(): + keystring_items_needed_dict[ + key] = new_data_item_struct + + # if data set does not exist + if not item_location_found: + self._new_dataset(new_data_item_struct, current_block, + dataset_items_in_block, + path, model_file, True) + if current_block.name.upper() == 'SOLUTIONGROUP' and \ + len(current_block.block_header_structure) == 0: + # solution_group a special case for now + block_data_item_struct = MFDataItemStructure() + block_data_item_struct.name = 'order_num' + block_data_item_struct.data_items = ['order_num'] + block_data_item_struct.type = DatumType.integer + block_data_item_struct.longname = 'order_num' + block_data_item_struct.description = \ + 'internal variable to keep track of ' \ + 'solution group number' + block_dataset_struct = MFDataStructure( + block_data_item_struct, model_file, + self.package_type, self.dfn_list) + block_dataset_struct.parent_block = current_block + block_dataset_struct.set_path( + path + (new_data_item_struct.block_name,)) + block_dataset_struct.add_item( + block_data_item_struct) + current_block.add_dataset(block_dataset_struct) + return block_dict + + def _new_dataset(self, new_data_item_struct, current_block, + dataset_items_in_block, + path, model_file, add_to_block=True): + current_dataset_struct = MFDataStructure(new_data_item_struct, + model_file, self.package_type, + self.dfn_list) + current_dataset_struct.set_path( + path + (new_data_item_struct.block_name,)) + self._process_needed_data_items(current_dataset_struct, + dataset_items_in_block) + if add_to_block: + # add dataset + current_block.add_dataset(current_dataset_struct) + current_dataset_struct.parent_block = current_block + current_dataset_struct.add_item(new_data_item_struct) + return current_dataset_struct + + def _process_needed_data_items(self, current_dataset_struct, + dataset_items_in_block): + # add data items needed to dictionary + for item_name, val in \ + current_dataset_struct.expected_data_items.items(): + if item_name in dataset_items_in_block: + current_dataset_struct.add_item( + dataset_items_in_block[item_name]) + else: + if item_name in self.dataset_items_needed_dict: + self.dataset_items_needed_dict[item_name].append( + current_dataset_struct) + else: + self.dataset_items_needed_dict[item_name] = [ + current_dataset_struct] + + +class DfnFile(Dfn): + """ + Dfn child class that loads dfn information from a package definition (dfn) + file + + Attributes + ---------- + file : str + name of the file to be loaded + + Methods + ------- + multi_package_support : () : bool + returns flag for multi-package support + dict_by_name : {} : dict + returns a dictionary of data item descriptions from the dfn file with + the data item name as the dictionary key + get_block_structure_dict : (path : tuple, common : bool, model_file : + bool) : dict + returns a dictionary of block structure information for the package + + See Also + -------- + + Notes + ----- + + Examples + ---- + """ + + def __init__(self, file): + super(DfnFile, self).__init__() + + dfn_path, tail = os.path.split(os.path.realpath(__file__)) + dfn_path = os.path.join(dfn_path, 'dfn') + self._file_path = os.path.join(dfn_path, file) + self.dfn_file_name = file + self.dfn_type, \ + self.model_type = self._file_type(self.dfn_file_name.replace('-', '')) + self.package_type = os.path.splitext(file[4:])[0] + # the package type is always the text after the last - + package_name = self.package_type.split('-') + self.package_type = package_name[-1] + if not isinstance(package_name, str) and \ + len(package_name) > 1: + self.package_prefix = ''.join(package_name[:-1]) + else: + self.package_prefix = '' + self.file = file + self.dataset_items_needed_dict = {} + self.dfn_list = [] + + def multi_package_support(self): + base_file = os.path.splitext(self.file)[0] + base_file = base_file.replace('-', '') + return base_file in self.multi_package + + def dict_by_name(self): + name_dict = OrderedDict() + name = None + dfn_fp = open(self._file_path, 'r') + for line in dfn_fp: + if self._valid_line(line): + arr_line = line.strip().split() + if arr_line[0] == 'name': + name = arr_line[1] + elif arr_line[0] == 'description' and name is not None: + name_dict[name] = ' '.join(arr_line[1:]) + dfn_fp.close() + return name_dict + + def get_block_structure_dict(self, path, common, model_file): + self.dfn_list = [] + block_dict = OrderedDict() + dataset_items_in_block = {} + self.dataset_items_needed_dict = {} + keystring_items_needed_dict = {} + current_block = None + dfn_fp = open(self._file_path, 'r') + + for line in dfn_fp: + if self._valid_line(line): + # load next data item + new_data_item_struct = MFDataItemStructure() + new_data_item_struct.set_value(line, common) + self.dfn_list.append([line]) + for next_line in dfn_fp: + if self._empty_line(next_line): + break + if self._valid_line(next_line): + new_data_item_struct.set_value(next_line, common) + self.dfn_list[-1].append(next_line) + + # if block does not exist + if current_block is None or \ + current_block.name != new_data_item_struct.block_name: + # create block + current_block = MFBlockStructure( + new_data_item_struct.block_name, path, model_file) + # put block in block_dict + block_dict[current_block.name] = current_block + # init dataset item lookup + self.dataset_items_needed_dict = {} + dataset_items_in_block = {} + + # resolve block type + if len(current_block.block_header_structure) > 0: + if len(current_block.block_header_structure[ + 0].data_item_structures) > 0 and \ + current_block.block_header_structure[ + 0].data_item_structures[0].type == \ + DatumType.integer: + block_type = BlockType.transient + else: + block_type = BlockType.multiple + else: + block_type = BlockType.single + + if new_data_item_struct.block_variable: + block_dataset_struct = MFDataStructure( + new_data_item_struct, model_file, self.package_type, + self.dfn_list) + block_dataset_struct.parent_block = current_block + self._process_needed_data_items(block_dataset_struct, + dataset_items_in_block) + block_dataset_struct.set_path( + path + (new_data_item_struct.block_name,)) + block_dataset_struct.add_item(new_data_item_struct, False, + self.dfn_list) + current_block.add_dataset(block_dataset_struct) + else: + new_data_item_struct.block_type = block_type + dataset_items_in_block[ + new_data_item_struct.name] = new_data_item_struct + + # if data item belongs to existing dataset(s) + item_location_found = False + if new_data_item_struct.name in \ + self.dataset_items_needed_dict: + if new_data_item_struct.type == DatumType.record: + # record within a record - create a data set in + # place of the data item + new_data_item_struct = self._new_dataset( + new_data_item_struct, current_block, + dataset_items_in_block, path, + model_file, False) + new_data_item_struct.record_within_record = True + + for dataset in self.dataset_items_needed_dict[ + new_data_item_struct.name]: + item_added = dataset.add_item(new_data_item_struct, + True, self.dfn_list) + item_location_found = item_location_found or \ + item_added + # if data item belongs to an existing keystring + if new_data_item_struct.name in \ + keystring_items_needed_dict: + new_data_item_struct.set_path( + keystring_items_needed_dict[ + new_data_item_struct.name].path) + if new_data_item_struct.type == DatumType.record: + # record within a keystring - create a data set in + # place of the data item + new_data_item_struct = self._new_dataset( + new_data_item_struct, current_block, + dataset_items_in_block, path, + model_file, False) + keystring_items_needed_dict[ + new_data_item_struct.name].keystring_dict[ + new_data_item_struct.name] \ + = new_data_item_struct + item_location_found = True + + if new_data_item_struct.type == DatumType.keystring: + # add keystrings to search list + for key, val in \ + new_data_item_struct.keystring_dict.items(): + keystring_items_needed_dict[ + key] = new_data_item_struct + + # if data set does not exist + if not item_location_found: + self._new_dataset(new_data_item_struct, current_block, + dataset_items_in_block, + path, model_file, True) + if current_block.name.upper() == 'SOLUTIONGROUP' and \ + len(current_block.block_header_structure) == 0: + # solution_group a special case for now + block_data_item_struct = MFDataItemStructure() + block_data_item_struct.name = 'order_num' + block_data_item_struct.data_items = ['order_num'] + block_data_item_struct.type = DatumType.integer + block_data_item_struct.longname = 'order_num' + block_data_item_struct.description = \ + 'internal variable to keep track of ' \ + 'solution group number' + block_dataset_struct = MFDataStructure( + block_data_item_struct, model_file, + self.package_type, self.dfn_list) + block_dataset_struct.parent_block = current_block + block_dataset_struct.set_path( + path + (new_data_item_struct.block_name,)) + block_dataset_struct.add_item( + block_data_item_struct, False, self.dfn_list) + current_block.add_dataset(block_dataset_struct) + dfn_fp.close() + return block_dict + + def _new_dataset(self, new_data_item_struct, current_block, + dataset_items_in_block, + path, model_file, add_to_block=True): + current_dataset_struct = MFDataStructure(new_data_item_struct, + model_file, self.package_type, + self.dfn_list) + current_dataset_struct.set_path( + path + (new_data_item_struct.block_name,)) + self._process_needed_data_items(current_dataset_struct, + dataset_items_in_block) + if add_to_block: + # add dataset + current_block.add_dataset(current_dataset_struct) + current_dataset_struct.parent_block = current_block + current_dataset_struct.add_item(new_data_item_struct, False, + self.dfn_list) + return current_dataset_struct + + def _process_needed_data_items(self, current_dataset_struct, + dataset_items_in_block): + # add data items needed to dictionary + for item_name, val in \ + current_dataset_struct.expected_data_items.items(): + if item_name in dataset_items_in_block: + current_dataset_struct.add_item( + dataset_items_in_block[item_name], False, self.dfn_list) + else: + if item_name in self.dataset_items_needed_dict: + self.dataset_items_needed_dict[item_name].append( + current_dataset_struct) + else: + self.dataset_items_needed_dict[item_name] = [ + current_dataset_struct] + + def _valid_line(self, line): + if len(line.strip()) > 1 and line[0] != '#': + return True + return False + + def _empty_line(self, line): + if len(line.strip()) <= 1: + return True + return False + + +class DataType(Enum): + """ + Types of data that can be found in a package file + """ + scalar_keyword = 1 + scalar = 2 + array = 3 + array_transient = 4 + list = 5 + list_transient = 6 + list_multiple = 7 + scalar_transient = 8 + scalar_keyword_transient = 9 + + +class DatumType(Enum): + """ + Types of individual pieces of data + """ + keyword = 1 + integer = 2 + double_precision = 3 + string = 4 + constant = 5 + list_defined = 6 + keystring = 7 + record = 8 + repeating_record = 9 + recarray = 10 + + +class BlockType(Enum): + """ + Types of blocks that can be found in a package file + """ + single = 1 + multiple = 2 + transient = 3 + + +class MFDataItemStructure(object): + """ + Defines the structure of a single MF6 data item in a dfn file + + Attributes + ---------- + block_name : str + name of block that data item is in + name : str + name of data item + name_list : list + list of alternate names for the data item, includes data item's main + name "name" + python_name : str + name of data item referenced in python, with illegal python characters + removed + type : str + type of the data item as it appears in the dfn file + type_obj : python type + type of the data item as a python type + valid_values : list + list of valid values for the data item. if empty, this constraint does + not apply + data_items : list + list of data items contained in this data_item, including itself + in_record : bool + in_record attribute as appears in dfn file + tagged : bool + whether data item is tagged. if the data item is tagged its name is + included in the MF6 input file + just_data : bool + when just_data is true only data appears in the MF6 input file. + otherwise, name information appears + shape : list + describes the shape of the data + layer_dims : list + which dimensions in the shape function as layers, if None defaults to + "layer" + reader : basestring + reader that MF6 uses to read the data + optional : bool + whether data item is optional or required as part of the MFData in the + MF6 input file + longname : str + long name of the data item + description : str + description of the data item + path : tuple + a tuple describing the data item's location within the simulation + (,,,) + repeating : bool + whether or not the data item can repeat in the MF6 input file + block_variable : bool + if true, this data item is part of the block header + block_type : BlockType + whether the block containing this item is a single non-repeating block, + a multiple repeating block, or a transient repeating block + keystring_dict : dict + dictionary containing acceptable keystrings if this data item is of + type keystring + is_cellid : bool + true if this data item is definitely of type cellid + possible_cellid : bool + true if this data item may be of type cellid + ucase : bool + this data item must be displayed in upper case in the MF6 input file + + Methods + ------- + remove_cellid : (resolved_shape : list, cellid_size : int) + removes the cellid size from the shape of a data item + set_path : (path : tuple) + sets the path to this data item to path + get_rec_type : () : object type + gets the type of object of this data item to be used in a numpy + recarray + + See Also + -------- + + Notes + ----- + + Examples + -------- + """ + + def __init__(self): + self.file_name_keywords = {'filein':False, 'fileout':False} + self.contained_keywords = {'file_name':True} + self.block_name = None + self.name = None + self.display_name = None + self.name_length = None + self.is_aux = False + self.is_boundname = False + self.is_mname = False + self.name_list = [] + self.python_name = None + self.type = None + self.type_string = None + self.type_obj = None + self.valid_values = [] + self.data_items = None + self.in_record = False + self.tagged = True + self.just_data = False + self.shape = [] + self.layer_dims = ['nlay'] + self.reader = None + self.optional = False + self.longname = None + self.description = '' + self.path = None + self.repeating = False + self.block_variable = False + self.block_type = BlockType.single + self.keystring_dict = {} + self.is_cellid = False + self.possible_cellid = False + self.ucase = False + self.preserve_case = False + self.default_value = None + self.numeric_index = False + self.support_negative_index = False + self.construct_package = None + self.construct_data = None + self.parameter_name = None + self.one_per_pkg = False + self.jagged_array = None + + def set_value(self, line, common): + arr_line = line.strip().split() + if len(arr_line) > 1: + if arr_line[0] == 'block': + self.block_name = ' '.join(arr_line[1:]) + elif arr_line[0] == 'name': + if self.type == DatumType.keyword: + # display keyword names in upper case + self.display_name = ' '.join(arr_line[1:]).upper() + else: + self.display_name = ' '.join(arr_line[1:]).lower() + self.name = ' '.join(arr_line[1:]).lower() + self.name_list.append(self.name) + if len(self.name) >= 6 and self.name[0:6] == 'cellid': + self.is_cellid = True + if self.name and self.name[0:2] == 'id': + self.possible_cellid = True + self.python_name = self.name.replace('-', '_').lower() + # don't allow name to be a python keyword + if keyword.iskeyword(self.name): + self.python_name = '{}_'.format(self.python_name) + # performance optimizations + if self.name == 'aux': + self.is_aux = True + if self.name == 'boundname': + self.is_boundname = True + if self.name[0:5] == 'mname': + self.is_mname = True + self.name_length = len(self.name) + elif arr_line[0] == 'other_names': + arr_names = ' '.join(arr_line[1:]).lower().split(',') + for name in arr_names: + self.name_list.append(name) + elif arr_line[0] == 'type': + if self.support_negative_index: + # type already automatically set when + # support_negative_index flag is set + return + type_line = arr_line[1:] + if len(type_line) <= 0: + raise StructException('Data structure "{}" does not have ' + 'a type specified' + '.'.format(self.name), self.path) + self.type_string = type_line[0].lower() + self.type = self._str_to_enum_type(type_line[0]) + if self.type == DatumType.recarray or \ + self.type == DatumType.record or \ + self.type == DatumType.repeating_record or \ + self.type == DatumType.keystring: + self.data_items = type_line[1:] + if self.type == DatumType.keystring: + for item in self.data_items: + self.keystring_dict[item.lower()] = 0 + else: + self.data_items = [self.name] + self.type_obj = self._get_type() + if self.type == DatumType.keyword: + # display keyword names in upper case + if self.display_name is not None: + self.display_name = self.display_name.upper() + elif arr_line[0] == 'valid': + for value in arr_line[1:]: + self.valid_values.append(value) + elif arr_line[0] == 'in_record': + self.in_record = self._get_boolean_val(arr_line) + elif arr_line[0] == 'tagged': + self.tagged = self._get_boolean_val(arr_line) + elif arr_line[0] == 'just_data': + self.just_data = self._get_boolean_val(arr_line) + elif arr_line[0] == 'shape': + if len(arr_line) > 1: + self.shape = [] + for dimension in arr_line[1:]: + if dimension[-1] != ';': + dimension = dimension.replace('(', '') + dimension = dimension.replace(')', '') + dimension = dimension.replace(',', '') + if dimension[0] == '*': + dimension = dimension.replace('*', '') + # set as a "layer" dimension + self.layer_dims.insert(0, dimension) + self.shape.append(dimension) + else: + # only process what is after the last ; which by + # convention is the most generalized form of the + # shape + self.shape = [] + if len(self.shape) > 0: + self.repeating = True + elif arr_line[0] == 'reader': + self.reader = ' '.join(arr_line[1:]) + elif arr_line[0] == 'optional': + self.optional = self._get_boolean_val(arr_line) + elif arr_line[0] == 'longname': + self.longname = ' '.join(arr_line[1:]) + elif arr_line[0] == 'description': + if arr_line[1] == 'REPLACE': + self.description = self._resolve_common(arr_line, common) + elif len(arr_line) > 1 and arr_line[1].strip(): + self.description = ' '.join(arr_line[1:]) + + # clean self.description + self.description = self.description.replace('``', '"') + self.description = self.description.replace("''", '"') + + # massage latex equations + if '$' in self.description: + descsplit = self.description.split('$') + mylist = [i.replace('\\', '') + ':math:`' + + j.replace('\\', '\\\\') + '`' for i, j in + zip(descsplit[::2], descsplit[1::2])] + mylist.append(descsplit[-1]) + self.description = ''.join(mylist) + else: + self.description = self.description.replace('\\', '') + elif arr_line[0] == 'block_variable': + if len(arr_line) > 1: + self.block_variable = bool(arr_line[1]) + elif arr_line[0] == 'ucase': + if len(arr_line) > 1: + self.ucase = bool(arr_line[1]) + elif arr_line[0] == 'preserve_case': + self.preserve_case = self._get_boolean_val(arr_line) + elif arr_line[0] == 'default_value': + self.default_value = ' '.join(arr_line[1:]) + elif arr_line[0] == 'numeric_index': + self.numeric_index = self._get_boolean_val(arr_line) + elif arr_line[0] == 'support_negative_index': + self.support_negative_index = self._get_boolean_val(arr_line) + # must be double precision to support 0 and -0 + self.type_string = 'double_precision' + self.type = self._str_to_enum_type(self.type_string) + self.type_obj = self._get_type() + elif arr_line[0] == 'construct_package': + self.construct_package = arr_line[1] + elif arr_line[0] == 'construct_data': + self.construct_data = arr_line[1] + elif arr_line[0] == 'parameter_name': + self.parameter_name = arr_line[1] + elif arr_line[0] == 'one_per_pkg': + self.one_per_pkg = bool(arr_line[1]) + elif arr_line[0] == 'jagged_array': + self.jagged_array = arr_line[1] + + def get_type_string(self): + return '[{}]'.format(self.type_string) + + def get_description(self, line_size, initial_indent, level_indent): + item_desc = '* {} ({}) {}'.format(self.name, self.type_string, + self.description) + if self.numeric_index or self.is_cellid: + # append zero-based index text + item_desc = '{} {}'.format(item_desc, + numeric_index_text) + twr = TextWrapper(width=line_size, initial_indent=initial_indent, + drop_whitespace = True, + subsequent_indent=' {}'.format(initial_indent)) + item_desc = '\n'.join(twr.wrap(item_desc)) + return item_desc + + def get_doc_string(self, line_size, initial_indent, level_indent): + description = self.get_description(line_size, + initial_indent + level_indent, + level_indent) + param_doc_string = '{} : {}'.format(self.python_name, + self.get_type_string()) + twr = TextWrapper(width=line_size, initial_indent=initial_indent, + subsequent_indent=' {}'.format(initial_indent), + drop_whitespace=True) + param_doc_string = '\n'.join(twr.wrap(param_doc_string)) + param_doc_string = '{}\n{}'.format(param_doc_string, description) + return param_doc_string + + def get_keystring_desc(self, line_size, initial_indent, level_indent): + if self.type != DatumType.keystring: + raise StructException('Can not get keystring description for "{}" ' + 'because it is not a keystring' + '.'.format(self.name), self.path) + + # get description of keystring elements + description = '' + for key, item in self.keystring_dict.items(): + if description: + description = '{}\n'.format(description) + description = '{}{}'.format(description, + item.get_doc_string(line_size, + initial_indent, + level_indent)) + return description + + def indicates_file_name(self): + if self.name.lower() in self.file_name_keywords: + return True + for key, item in self.contained_keywords.items(): + if self.name.lower().find(key) != -1: + return True + return False + + def is_file_name(self): + if self.name.lower() in self.file_name_keywords and \ + self.file_name_keywords[self.name.lower()] == True: + return True + for key, item in self.contained_keywords.items(): + if self.name.lower().find(key) != -1 and item == True: + return True + return False + + @staticmethod + def remove_cellid(resolved_shape, cellid_size): + # remove the cellid size from the shape + for dimension, index in zip(resolved_shape, + range(0, len(resolved_shape))): + if dimension == cellid_size: + resolved_shape[index] = 1 + break + + @staticmethod + def _get_boolean_val(bool_option_line): + if len(bool_option_line) <= 1: + return False + if bool_option_line[1].lower() == 'true': + return True + return False + + @staticmethod + def _find_close_bracket(arr_line): + for index, word in enumerate(arr_line): + word = word.strip() + if len(word) > 0 and word[-1] == '}': + return index + return None + + @staticmethod + def _resolve_common(arr_line, common): + if common is None: + return arr_line + if not (arr_line[2] in common and len(arr_line) >= 4): + raise StructException('Could not find line "{}" in common dfn' + '.'.format(arr_line)) + close_bracket_loc = MFDataItemStructure._find_close_bracket( + arr_line[2:]) + resolved_str = common[arr_line[2]] + if close_bracket_loc is None: + find_replace_str = ' '.join(arr_line[3:]) + else: + close_bracket_loc += 3 + find_replace_str = ' '.join(arr_line[3:close_bracket_loc]) + find_replace_dict = ast.literal_eval(find_replace_str) + for find_str, replace_str in find_replace_dict.items(): + resolved_str = resolved_str.replace(find_str, replace_str) + # clean up formatting + resolved_str = resolved_str.replace('\\texttt', '') + resolved_str = resolved_str.replace('{', '') + resolved_str = resolved_str.replace('}', '') + + return resolved_str + + def set_path(self, path): + self.path = path + (self.name,) + mfstruct = MFStructure(True) + for dimension in self.shape: + dim_path = path + (dimension,) + if dim_path in mfstruct.dimension_dict: + mfstruct.dimension_dict[dim_path].append(self) + else: + mfstruct.dimension_dict[dim_path] = [self] + + def _get_type(self): + if self.type == DatumType.double_precision: + return float + elif self.type == DatumType.integer: + return int + elif self.type == DatumType.constant: + return bool + elif self.type == DatumType.string: + return str + elif self.type == DatumType.list_defined: + return str + return str + + def _str_to_enum_type(self, type_string): + if type_string.lower() == 'keyword': + return DatumType.keyword + elif type_string.lower() == 'integer': + return DatumType.integer + elif type_string.lower() == 'double_precision' or \ + type_string.lower() == 'double': + return DatumType.double_precision + elif type_string.lower() == 'string': + return DatumType.string + elif type_string.lower() == 'constant': + return DatumType.constant + elif type_string.lower() == 'list-defined': + return DatumType.list_defined + elif type_string.lower() == 'keystring': + return DatumType.keystring + elif type_string.lower() == 'record': + return DatumType.record + elif type_string.lower() == 'recarray': + return DatumType.recarray + elif type_string.lower() == 'repeating_record': + return DatumType.repeating_record + else: + exc_text = 'Data item type "{}" not supported.'.format(type_string) + raise StructException(exc_text, self.path) + + def get_rec_type(self): + item_type = self.type_obj + if item_type == str or self.is_cellid: + return object + return item_type + + +class MFDataStructure(object): + """ + Defines the structure of a single MF6 data item in a dfn file + + Parameters + ---------- + data_item : MFDataItemStructure + base data item associated with this data structure + model_data : bool + whether or not this is part of a model + package_type : str + abbreviated package type + + Attributes + ---------- + type : str + type of the data as it appears in the dfn file + path : tuple + a tuple describing the data's location within the simulation + (,,,) + optional : bool + whether data is optional or required as part of the MFBlock in the MF6 + input file + name : str + name of data item + name_list : list + list of alternate names for the data, includes data item's main name + "name" + python_name : str + name of data referenced in python, with illegal python characters + removed + longname : str + long name of the data + repeating : bool + whether or not the data can repeat in the MF6 input file + layered : bool + whether this data can appear by layer + num_data_items : int + number of data item structures contained in this MFDataStructure, + including itself + record_within_record : bool + true if this MFDataStructure is a record within a container + MFDataStructure + file_data : bool + true if data points to a file + block_type : BlockType + whether the block containing this data is a single non-repeating block, + a multiple repeating block, or a transient repeating block + block_variable : bool + if true, this data is part of the block header + model_data : bool + if true, data is part of a model + num_optional : int + number of optional data items + parent_block : MFBlockStructure + parent block structure object + data_item_structures : list + list of data item structures contained in this MFDataStructure + expected_data_items : dict + dictionary of expected data item names for quick lookup + shape : tuple + shape of first data item + + Methods + ------- + get_keywords : () : list + returns a list of all keywords associated with this data + supports_aux : () : bool + returns true of this data supports aux variables + add_item : (item : MFDataItemStructure, record : bool) + adds a data item to this MFDataStructure + set_path : (path : tuple) + sets the path describing the data's location within the simulation + (,,,) + get_datatype : () : DataType + returns the DataType of this data (array, list, scalar, ...) + get_min_record_entries : () : int + gets the minimum number of entries, as entered in a package file, + for a single record. excludes optional data items + get_record_size : () : int + gets the number of data items, excluding keyword data items, in this + MFDataStructure + all_keywords : () : bool + returns true of all data items are keywords + get_type_string : () : str + returns descriptive string of the data types in this MFDataStructure + get_description : () : str + returns a description of the data + get_type_array : (type_array : list): + builds an array of data type information in type_array + get_datum_type : (numpy_type : bool): + returns the object type of the first data item in this MFDataStructure + with a standard type. if numpy_type is true returns the type as a + numpy type + get_data_item_types: () : list + returns a list of object type for every data item in this + MFDataStructure + first_non_keyword_index : () : int + return the index of the first data item in this MFDataStructure that is + not a keyword + + See Also + -------- + + Notes + ----- + + Examples + -------- + """ + + def __init__(self, data_item, model_data, package_type, dfn_list): + self.type = data_item.type + self.package_type = package_type + self.path = None + self.optional = data_item.optional + self.name = data_item.name + self.block_name = data_item.block_name + self.name_length = len(self.name) + self.is_aux = data_item.is_aux + self.is_boundname = data_item.is_boundname + self.name_list = data_item.name_list + self.python_name = data_item.python_name + self.longname = data_item.longname + self.default_value = data_item.default_value + self.repeating = False + self.layered = ('nlay' in data_item.shape or + 'nodes' in data_item.shape or + len(data_item.layer_dims) > 1) + self.num_data_items = len(data_item.data_items) + self.record_within_record = False + self.file_data = False + self.block_type = data_item.block_type + self.block_variable = data_item.block_variable + self.model_data = model_data + self.num_optional = 0 + self.parent_block = None + self._fpmerge_data_item(data_item, dfn_list) + self.construct_package = data_item.construct_package + self.construct_data = data_item.construct_data + self.parameter_name = data_item.parameter_name + self.one_per_pkg = data_item.one_per_pkg + + # self.data_item_structures_dict = OrderedDict() + self.data_item_structures = [] + self.expected_data_items = OrderedDict() + self.shape = data_item.shape + if self.type == DatumType.recarray or self.type == DatumType.record \ + or self.type == DatumType.repeating_record: + # record expected data for later error checking + for data_item_name in data_item.data_items: + self.expected_data_items[data_item_name] = len( + self.expected_data_items) + else: + self.expected_data_items[data_item.name] = len( + self.expected_data_items) + + @property + def is_mname(self): + for item in self.data_item_structures: + if item.is_mname: + return True + return False + + def get_item(self, item_name): + for item in self.data_item_structures: + if item.name.lower() == item_name.lower(): + return item + return None + + def get_keywords(self): + keywords = [] + if self.type == DatumType.recarray or self.type == DatumType.record \ + or self.type == DatumType.repeating_record: + for data_item_struct in self.data_item_structures: + if data_item_struct.type == DatumType.keyword: + if len(keywords) == 0: + # create first keyword tuple + for name in data_item_struct.name_list: + keywords.append((name,)) + else: + # update all keyword tuples with latest keyword found + new_keywords = [] + for keyword_tuple in keywords: + for name in data_item_struct.name_list: + new_keywords.append(keyword_tuple + (name,)) + if data_item_struct.optional: + keywords = keywords + new_keywords + else: + keywords = new_keywords + elif data_item_struct.type == DatumType.keystring: + for keyword_item in data_item_struct.data_items: + keywords.append((keyword_item,)) + elif len(keywords) == 0: + if len(data_item_struct.valid_values) > 0: + new_keywords = [] + # loop through all valid values and append to the end + # of each keyword tuple + for valid_value in data_item_struct.valid_values: + if len(keywords) == 0: + new_keywords.append((valid_value,)) + else: + for keyword_tuple in keywords: + new_keywords.append( + keyword_tuple + (valid_value,)) + keywords = new_keywords + else: + for name in data_item_struct.name_list: + keywords.append((name,)) + else: + for name in self.name_list: + keywords.append((name,)) + return keywords + + def supports_aux(self): + for data_item_struct in self.data_item_structures: + if data_item_struct.name.lower() == 'aux': + return True + return False + + def add_item(self, item, record=False, dfn_list=None): + item_added = False + if item.type != DatumType.recarray and \ + ((item.type != DatumType.record and + item.type != DatumType.repeating_record) or + record == True): + if item.name not in self.expected_data_items: + raise StructException('Could not find data item "{}" in ' + 'expected data items of data structure ' + '{}.'.format(item.name, self.name), + self.path) + item.set_path(self.path) + if len(self.data_item_structures) == 0: + self.keyword = item.name + # insert data item into correct location in array + location = self.expected_data_items[item.name] + if len(self.data_item_structures) > location: + # TODO: ask about this condition and remove + if self.data_item_structures[location] is None: + # verify that this is not a placeholder value + if self.data_item_structures[location] is not None: + raise StructException('Data structure "{}" already ' + 'has the item named "{}"' + '.'.format(self.name, + item.name), + self.path) + if isinstance(item, MFDataItemStructure): + self.file_data = self.file_data or \ + item.indicates_file_name() + # replace placeholder value + self.data_item_structures[location] = item + item_added = True + else: + for index in range(0, + location - len(self.data_item_structures)): + # insert placeholder in array + self.data_item_structures.append(None) + if isinstance(item, MFDataItemStructure): + self.file_data = self.file_data or \ + item.indicates_file_name() + self.data_item_structures.append(item) + item_added = True + self.optional = self.optional and item.optional + if item.optional: + self.num_optional += 1 + if item_added: + self._fpmerge_data_item(item, dfn_list) + return item_added + + def _fpmerge_data_item(self, item, dfn_list): + mfstruct = MFStructure() + # check for flopy-specific dfn data + if item.name.lower() in mfstruct.flopy_dict: + # read flopy-specific dfn data + for name, value in mfstruct.flopy_dict[item.name.lower()].items(): + line = '{} {}'.format(name, value) + item.set_value(line, None) + if dfn_list is not None: + dfn_list[-1].append(line) + + def set_path(self, path): + self.path = path + (self.name,) + + def get_datatype(self): + if self.type == DatumType.recarray: + if self.block_type != BlockType.single and not self.block_variable: + if self.block_type == BlockType.transient: + return DataType.list_transient + else: + return DataType.list_multiple + else: + return DataType.list + if self.type == DatumType.record or self.type == \ + DatumType.repeating_record: + record_size, repeating_data_item = self.get_record_size() + if (record_size >= 1 and not self.all_keywords()) or \ + repeating_data_item: + if self.block_type != BlockType.single and \ + not self.block_variable: + if self.block_type == BlockType.transient: + return DataType.list_transient + else: + return DataType.list_multiple + else: + return DataType.list + else: + if self.block_type != BlockType.single and \ + not self.block_variable: + return DataType.scalar_transient + else: + return DataType.scalar + elif len(self.data_item_structures) > 0 and \ + self.data_item_structures[0].repeating: + if self.data_item_structures[0].type == DatumType.string: + return DataType.list + else: + if self.block_type == BlockType.single: + return DataType.array + else: + return DataType.array_transient + elif len(self.data_item_structures) > 0 and \ + self.data_item_structures[0].type == DatumType.keyword: + if self.block_type != BlockType.single and not self.block_variable: + return DataType.scalar_keyword_transient + else: + return DataType.scalar_keyword + else: + if self.block_type != BlockType.single and not self.block_variable: + return DataType.scalar_transient + else: + return DataType.scalar + + def is_mult_or_trans(self): + data_type = self.get_datatype() + if data_type == DataType.scalar_keyword_transient or \ + data_type == DataType.array_transient or \ + data_type == DataType.list_transient or \ + data_type == DataType.list_multiple: + return True + return False + + def get_min_record_entries(self): + count = 0 + for data_item_structure in self.data_item_structures: + if not data_item_structure.optional: + if data_item_structure.type == DatumType.record: + count += data_item_structure.get_record_size()[0] + else: + if data_item_structure.type != DatumType.keyword: + count += 1 + return count + + def get_record_size(self): + count = 0 + repeating = False + for data_item_structure in self.data_item_structures: + if data_item_structure.type == DatumType.record: + count += data_item_structure.get_record_size()[0] + else: + if data_item_structure.type != DatumType.keyword or \ + count > 0: + if data_item_structure.repeating: + # count repeats as one extra record + repeating = True + count += 1 + return count, repeating + + def all_keywords(self): + for data_item_structure in self.data_item_structures: + if data_item_structure.type == DatumType.record: + if not data_item_structure.all_keywords(): + return False + else: + if data_item_structure.type != DatumType.keyword: + return False + return True + + def get_type_string(self): + type_array = [] + self.get_docstring_type_array(type_array) + type_string = ', '.join(type_array) + type_header = '' + type_footer = '' + if len(self.data_item_structures) > 1 or \ + self.data_item_structures[ + 0].repeating: + type_header = '[' + type_footer = ']' + if self.repeating: + type_footer = '] ... [{}]'.format(type_string) + + return '{}{}{}'.format(type_header, type_string, type_footer) + + def get_docstring_type_array(self, type_array): + for index, item in enumerate(self.data_item_structures): + if item.type == DatumType.record: + item.get_docstring_type_array(type_array) + else: + if self.display_item(index): + if self.type == DatumType.recarray or self.type == \ + DatumType.record or \ + self.type == DatumType.repeating_record: + type_array.append('{}'.format(item.name)) + else: + type_array.append('{}'.format( + self._resolve_item_type(item))) + + def get_description(self, line_size=79, initial_indent=' ', + level_indent=' '): + type_array = [] + self.get_type_array(type_array) + description = '' + for datastr, index, itype in type_array: + item = datastr.data_item_structures[index] + if item is None: + continue + if item.type == DatumType.record: + item_desc = item.get_description(line_size, + initial_indent + level_indent, + level_indent) + description = '{}\n{}'.format(description, item_desc) + elif datastr.display_item(index): + if len(description.strip()) > 0: + description = '{}\n'.format(description) + item_desc = item.description + if item.numeric_index or item.is_cellid: + # append zero-based index text + item_desc = '{} {}'.format(item_desc, + numeric_index_text) + + item_desc = '* {} ({}) {}'.format(item.name, itype, + item_desc) + twr = TextWrapper(width=line_size, + initial_indent=initial_indent, + subsequent_indent=' {}'.format( + initial_indent)) + item_desc = '\n'.join(twr.wrap(item_desc)) + description = '{}{}'.format(description, item_desc) + if item.type == DatumType.keystring: + keystr_desc = item.get_keystring_desc(line_size, + initial_indent + + level_indent, + level_indent) + description = '{}\n{}'.format(description, + keystr_desc) + return description + + def get_subpackage_description(self, line_size=79, + initial_indent=' ', + level_indent=' '): + item_desc = '* Contains data for the {} package. Data can be ' \ + 'stored in a dictionary containing data for the {} ' \ + 'package with variable names as keys and package data as ' \ + 'values. Data just for the {} variable is also ' \ + 'acceptable. See {} package documentation for more ' \ + 'information' \ + '.'.format(self.construct_package, + self.construct_package, + self.parameter_name, + self.construct_package) + twr = TextWrapper(width=line_size, + initial_indent=initial_indent, + subsequent_indent=' {}'.format( + initial_indent)) + return '\n'.join(twr.wrap(item_desc)) + + def get_doc_string(self, line_size=79, initial_indent=' ', + level_indent=' '): + if self.parameter_name is not None: + description = self.get_subpackage_description( + line_size, initial_indent + level_indent, level_indent) + var_name = self.parameter_name + type_name = '{}varname:data{} or {} data'.format( + '{', '}', self.construct_data) + else: + description = self.get_description(line_size, + initial_indent + level_indent, + level_indent) + var_name = self.python_name + type_name = self.get_type_string() + + param_doc_string = '{} : {}'.format(var_name, type_name) + twr = TextWrapper(width=line_size, initial_indent=initial_indent, + subsequent_indent=' {}'.format(initial_indent)) + param_doc_string = '\n'.join(twr.wrap(param_doc_string)) + param_doc_string = '{}\n{}'.format(param_doc_string, description) + return param_doc_string + + def get_type_array(self, type_array): + for index, item in enumerate(self.data_item_structures): + if item.type == DatumType.record: + item.get_type_array(type_array) + else: + if self.display_item(index): + type_array.append((self, index,'{}'.format( + self._resolve_item_type(item)))) + + def _resolve_item_type(self, item): + item_type = item.type_string + first_nk_idx = self.first_non_keyword_index() + # single keyword is type boolean + if item_type == 'keyword' and \ + len(self.data_item_structures) == 1: + item_type = 'boolean' + if item.is_cellid: + item_type = '(integer, ...)' + # two keywords + if len(self.data_item_structures) == 2 and \ + first_nk_idx is None: + # keyword type is string + item_type = 'string' + return item_type + + def display_item(self, item_num): + item = self.data_item_structures[item_num] + first_nk_idx = self.first_non_keyword_index() + # all keywords excluded if there is a non-keyword + if not (item.type == DatumType.keyword and first_nk_idx is not None): + # ignore first keyword if there are two keywords + if len(self.data_item_structures) == 2 and first_nk_idx is None \ + and item_num == 0: + return False + return True + return False + + def get_datum_type(self, numpy_type=False, return_enum_type=False): + data_item_types = self.get_data_item_types() + for var_type in data_item_types: + if var_type[0] == DatumType.double_precision or var_type[0] == \ + DatumType.integer or var_type[0] == DatumType.string: + if return_enum_type: + return var_type[0] + else: + if numpy_type: + if var_type[0] == DatumType.double_precision: + return np.float64 + elif var_type[0] == DatumType.integer: + return np.int32 + else: + return np.object + else: + return var_type[2] + return None + + def get_data_item_types(self): + data_item_types = [] + for data_item in self.data_item_structures: + if data_item.type == DatumType.record: + # record within a record + data_item_types += data_item.get_data_item_types() + else: + data_item_types.append([data_item.type, + data_item.type_string, + data_item.type_obj]) + return data_item_types + + def first_non_keyword_index(self): + for data_item, index in zip(self.data_item_structures, + range(0, len(self.data_item_structures))): + if data_item.type != DatumType.keyword: + return index + return None + + def get_model(self): + if self.model_data: + if len(self.path) >= 1: + return self.path[0] + return None + + def get_package(self): + if self.model_data: + if len(self.path) >= 2: + return self.path[1] + else: + if len(self.path) >= 1: + return self.path[0] + return '' + + +class MFBlockStructure(object): + """ + Defines the structure of a MF6 block. + + + Parameters + ---------- + name : string + block name + path : tuple + tuple that describes location of block within simulation + (, , ) + model_block : bool + true if this block is part of a model + + Attributes + ---------- + name : string + block name + path : tuple + tuple that describes location of block within simulation + (, , ) + model_block : bool + true if this block is part of a model + data_structures : OrderedDict + dictionary of data items in this block, with the data item name as + the key + block_header_structure : list + list of data items that are part of this block's "header" + + Methods + ------- + repeating() : bool + Returns true if more than one instance of this block can appear in a + MF6 package file + add_dataset(dataset : MFDataStructure, block_header_dataset : bool) + Adds dataset to this block, as a header dataset of block_header_dataset + is true + number_non_optional_data() : int + Returns the number of non-optional non-header data structures in + this block + number_non_optional_block_header_data() : int + Returns the number of non-optional block header data structures in + this block + get_data_structure(path : tuple) : MFDataStructure + Returns the data structure in this block with name defined by path[0]. + If name does not exist, returns None. + get_all_recarrays() : list + Returns all data non-header data structures in this block that are of + type recarray + + See Also + -------- + + Notes + ----- + + Examples + -------- + + + """ + + def __init__(self, name, path, model_block): + # initialize + self.data_structures = OrderedDict() + self.block_header_structure = [] + self.name = name + self.path = path + (self.name,) + self.model_block = model_block + + def repeating(self): + if len(self.block_header_structure) > 0: + return True + return False + + def add_dataset(self, dataset): + dataset.set_path(self.path) + if dataset.block_variable: + self.block_header_structure.append(dataset) + else: + self.data_structures[dataset.name] = dataset + + def number_non_optional_data(self): + num = 0 + for key, data_structure in self.data_structures.items(): + if not data_structure.optional: + num += 1 + return num + + def number_non_optional_block_header_data(self): + if len(self.block_header_structure) > 0 and not \ + self.block_header_structure[0].optional: + return 1 + else: + return 0 + + def get_data_structure(self, path): + if path[0] in self.data_structures: + return self.data_structures[path[0]] + else: + return None + + def get_all_recarrays(self): + recarray_list = [] + for ds_key, item in self.data_structures.items(): + if item.type == DatumType.recarray: + recarray_list.append(item) + return recarray_list + + +class MFInputFileStructure(object): + """ + MODFLOW Input File Stucture class. Loads file + structure information for individual input file + types. + + + Parameters + ---------- + dfn_file : string + the definition file used to define the structure of this input file + path : tuple + path defining the location of the container of this input file + structure within the overall simulation structure + common : bool + is this the common dfn file + model_file : bool + this file belongs to a specific model type + + Attributes + ---------- + valid : boolean + simulation structure validity + path : tuple + path defining the location of this input file structure within the + overall simulation structure + read_as_arrays : bool + if this input file structure is the READASARRAYS version of a package + + Methods + ------- + is_valid() : bool + Checks all structures objects within the file for validity + get_data_structure(path : string) + Returns a data structure of it exists, otherwise returns None. Data + structure type returned is based on the tuple/list "path" + + See Also + -------- + + Notes + ----- + + Examples + -------- + + """ + + def __init__(self, dfn_file, path, common, model_file): + # initialize + self.valid = True + self.file_type = dfn_file.package_type + self.file_prefix = dfn_file.package_prefix + self.dfn_type = dfn_file.dfn_type + self.dfn_file_name = dfn_file.dfn_file_name + self.description = '' + self.path = path + (self.file_type,) + self.model_file = model_file # file belongs to a specific model + self.read_as_arrays = False + + self.multi_package_support = dfn_file.multi_package_support() + self.blocks = dfn_file.get_block_structure_dict(self.path, common, + model_file) + self.dfn_list = dfn_file.dfn_list + + def is_valid(self): + valid = True + for block in self.blocks: + valid = valid and block.is_valid() + return valid + + def get_data_structure(self, path): + if isinstance(path, tuple) or isinstance(path, list): + if path[0] in self.blocks: + return self.blocks[path[0]].get_data_structure(path[1:]) + else: + return None + else: + for block in self.blocks: + if path in block.data_structures: + return block.data_structures[path] + return None + + +class MFModelStructure(object): + """ + Defines the structure of a MF6 model and its packages + + Parameters + ---------- + model_type : string + abbreviation of model type + + Attributes + ---------- + valid : boolean + simulation structure validity + name_file_struct_obj : MFInputFileStructure + describes the structure of the simulation name file + package_struct_objs : OrderedDict + describes the structure of the simulation packages + model_type : string + dictionary containing simulation package structure + + Methods + ------- + add_namefile : (dfn_file : DfnFile, model_file=True : bool) + Adds a namefile structure object to the model + add_package(dfn_file : DfnFile, model_file=True : bool) + Adds a package structure object to the model + is_valid() : bool + Checks all structures objects within the model for validity + get_data_structure(path : string) + Returns a data structure of it exists, otherwise returns None. Data + structure type returned is based on the tuple/list "path" + + See Also + -------- + + Notes + ----- + + Examples + -------- + """ + + def __init__(self, model_type, utl_struct_objs): + # add name file structure + self.model_type = model_type + self.name_file_struct_obj = None + self.package_struct_objs = OrderedDict() + self.utl_struct_objs = utl_struct_objs + + def add_namefile(self, dfn_file, common): + self.name_file_struct_obj = MFInputFileStructure(dfn_file, + (self.model_type,), + common, True) + + def add_package(self, dfn_file, common): + self.package_struct_objs[dfn_file.package_type] = MFInputFileStructure( + dfn_file, (self.model_type,), common, True) + + def get_package_struct(self, package_type): + if package_type in self.package_struct_objs: + return self.package_struct_objs[package_type] + elif package_type in self.utl_struct_objs: + return self.utl_struct_objs[package_type] + else: + return None + + def is_valid(self): + valid = True + for package_struct in self.package_struct_objs: + valid = valid and package_struct.is_valid() + return valid + + def get_data_structure(self, path): + if path[0] in self.package_struct_objs: + if len(path) > 1: + return self.package_struct_objs[path[0]].get_data_structure( + path[1:]) + else: + return self.package_struct_objs[path[0]] + elif path[0] == 'nam': + if len(path) > 1: + return self.name_file_struct_obj.get_data_structure(path[1:]) + else: + return self.name_file_struct_obj + else: + return None + + +class MFSimulationStructure(object): + """ + Defines the structure of a MF6 simulation and its packages + and models. + + Parameters + ---------- + + Attributes + ---------- + name_file_struct_obj : MFInputFileStructure + describes the structure of the simulation name file + package_struct_objs : OrderedDict + describes the structure of the simulation packages + model_struct_objs : OrderedDict + describes the structure of the supported model types + utl_struct_objs : OrderedDict + describes the structure of the supported utility packages + common : OrderedDict + common file information + model_type : string + placeholder + + Methods + ------- + process_dfn : (dfn_file : DfnFile) + reads in the contents of a dfn file, storing that contents in the + appropriate object + add_namefile : (dfn_file : DfnFile, model_file=True : bool) + Adds a namefile structure object to the simulation + add_util : (dfn_file : DfnFile) + Adds a utility package structure object to the simulation + add_package(dfn_file : DfnFile, model_file=True : bool) + Adds a package structure object to the simulation + store_common(dfn_file : DfnFile) + Stores the contents of the common dfn file + add_model(model_type : string) + Adds a model structure object to the simulation + is_valid() : bool + Checks all structures objects within the simulation for validity + get_data_structure(path : string) + Returns a data structure of it exists, otherwise returns None. Data + structure type returned is based on the tuple/list "path" + tag_read_as_arrays + Searches through all packages and tags any packages with a name that + indicates they are the READASARRAYS version of a package. + + See Also + -------- + + Notes + ----- + + Examples + -------- + """ + + def __init__(self): + # initialize + self.name_file_struct_obj = None + self.package_struct_objs = OrderedDict() + self.utl_struct_objs = OrderedDict() + self.model_struct_objs = OrderedDict() + self.common = None + self.model_type = '' + + @property + def model_types(self): + model_type_list = [] + for model in self.model_struct_objs.values(): + model_type_list.append(model.model_type[:-1]) + return model_type_list + + def process_dfn(self, dfn_file): + if dfn_file.dfn_type == DfnType.common: + self.store_common(dfn_file) + elif dfn_file.dfn_type == DfnType.sim_name_file: + self.add_namefile(dfn_file, False) + elif dfn_file.dfn_type == DfnType.sim_tdis_file or \ + dfn_file.dfn_type == DfnType.exch_file or \ + dfn_file.dfn_type == DfnType.ims_file: + self.add_package(dfn_file, False) + elif dfn_file.dfn_type == DfnType.utl: + self.add_util(dfn_file) + elif dfn_file.dfn_type == DfnType.model_file or \ + dfn_file.dfn_type == DfnType.model_name_file or \ + dfn_file.dfn_type == DfnType.gnc_file or \ + dfn_file.dfn_type == DfnType.mvr_file: + model_ver = '{}{}'.format(dfn_file.model_type, + MFStructure(True).get_version_string()) + if model_ver not in self.model_struct_objs: + self.add_model(model_ver) + if dfn_file.dfn_type == DfnType.model_file: + self.model_struct_objs[model_ver].add_package(dfn_file, + self.common) + elif dfn_file.dfn_type == DfnType.gnc_file or \ + dfn_file.dfn_type == DfnType.mvr_file: + # gnc and mvr files belong both on the simulation and model + # level + self.model_struct_objs[model_ver].add_package(dfn_file, + self.common) + self.add_package(dfn_file, False) + else: + self.model_struct_objs[model_ver].add_namefile(dfn_file, + self.common) + + def add_namefile(self, dfn_file, model_file=True): + self.name_file_struct_obj = MFInputFileStructure(dfn_file, (), + self.common, + model_file) + + def add_util(self, dfn_file): + self.utl_struct_objs[dfn_file.package_type] = MFInputFileStructure( + dfn_file, (), self.common, True) + + def add_package(self, dfn_file, model_file=True): + self.package_struct_objs[dfn_file.package_type] = MFInputFileStructure( + dfn_file, (), self.common, model_file) + + def store_common(self, dfn_file): + # store common stuff + self.common = dfn_file.dict_by_name() + + def add_model(self, model_type): + self.model_struct_objs[model_type] = MFModelStructure( + model_type, self.utl_struct_objs) + + def is_valid(self): + valid = True + for package_struct in self.package_struct_objs: + valid = valid and package_struct.is_valid() + for model_struct in self.model_struct_objs: + valid = valid and model_struct.is_valid() + return valid + + def get_data_structure(self, path): + if path[0] in self.package_struct_objs: + if len(path) > 1: + return self.package_struct_objs[path[0]].get_data_structure( + path[1:]) + else: + return self.package_struct_objs[path[0]] + elif path[0] in self.model_struct_objs: + if len(path) > 1: + return self.model_struct_objs[path[0]].get_data_structure( + path[1:]) + else: + return self.model_struct_objs[path[0]] + elif path[0] in self.utl_struct_objs: + if len(path) > 1: + return self.utl_struct_objs[path[0]].get_data_structure( + path[1:]) + else: + return self.utl_struct_objs[path[0]] + elif path[0] == 'nam': + if len(path) > 1: + return self.name_file_struct_obj.get_data_structure(path[1:]) + else: + return self.name_file_struct_obj + else: + return None + + def tag_read_as_arrays(self): + for key, package_struct in self.package_struct_objs.items(): + if key[0:-1] in self.package_struct_objs and key[-1] == 'a': + package_struct.read_as_arrays = True + for model_key, model_struct in self.model_struct_objs.items(): + for key, package_struct in \ + model_struct.package_struct_objs.items(): + if key[0:-1] in model_struct.package_struct_objs and \ + key[-1] == 'a': + package_struct.read_as_arrays = True + + +class MFStructure(object): + """ + Singleton class for accessing the contents of the json structure file + (only one instance of this class can exist, which loads the json file on + initialization) + + Parameters + ---------- + mf_version : int + version of MODFLOW + valid : bool + whether the structure information loaded from the dfn files is valid + sim_struct : MFSimulationStructure + Object containing file structure for all simulation files + dimension_dict : dict + Dictionary mapping paths to dimension information to the dataitem whose + dimension information is being described + """ + _instance = None + + def __new__(cls, internal_request=False, load_from_dfn_files=False): + if cls._instance is None: + cls._instance = super(MFStructure, cls).__new__(cls) + + # Initialize variables + cls._instance.mf_version = 6 + cls._instance.valid = True + cls._instance.sim_struct = None + cls._instance.dimension_dict = {} + cls._instance.load_from_dfn_files = load_from_dfn_files + cls._instance.flopy_dict = {} + + # Read metadata from file + cls._instance.valid = cls._instance.__load_structure() + elif not cls._instance.valid and not internal_request: + if cls._instance.__load_structure(): + cls._instance.valid = True + + return cls._instance + + def get_version_string(self): + return format(str(self.mf_version)) + + def __load_structure(self): + # set up structure classes + self.sim_struct = MFSimulationStructure() + + if self.load_from_dfn_files: + mf_dfn = Dfn() + dfn_files = mf_dfn.get_file_list() + + # load flopy-specific settings + self.__load_flopy() + + # get common + common_dfn = DfnFile('common.dfn') + self.sim_struct.process_dfn(common_dfn) + + # process each file + for file in dfn_files: + self.sim_struct.process_dfn(DfnFile(file)) + self.sim_struct.tag_read_as_arrays() + else: + package_list = PackageContainer.package_factory(None, None) + for package in package_list: + self.sim_struct.process_dfn(DfnPackage(package)) + self.sim_struct.tag_read_as_arrays() + + return True + + def __load_flopy(self): + current_variable = None + var_info = {} + dfn_path, tail = os.path.split(os.path.realpath(__file__)) + flopy_path = os.path.join(dfn_path, 'dfn', 'flopy.dfn') + dfn_fp = open(flopy_path, 'r') + for line in dfn_fp: + if self.__valid_line(line): + lst_line = line.strip().split() + if lst_line[0].lower() == 'name': + # store current variable + self.flopy_dict[current_variable] = var_info + # reset var_info dict + var_info = {} + current_variable = lst_line[1].lower() + else: + var_info[lst_line[0].lower()] = lst_line[1].lower() + # store last variable + self.flopy_dict[current_variable] = var_info + + @staticmethod + def __valid_line(line): + if len(line.strip()) > 1 and line[0] != '#': + return True return False \ No newline at end of file diff --git a/flopy/mf6/mfbase.py b/flopy/mf6/mfbase.py index c4f142180b..6d03397022 100644 --- a/flopy/mf6/mfbase.py +++ b/flopy/mf6/mfbase.py @@ -1,670 +1,670 @@ -import glob -import importlib -import inspect, sys, traceback -import os, collections, copy -from shutil import copyfile -from enum import Enum - - -# internal handled exceptions -class MFInvalidTransientBlockHeaderException(Exception): - """ - Exception related to parsing a transient block header - """ - - def __init__(self, error): - Exception.__init__(self, - "MFInvalidTransientBlockHeaderException: {}".format( - error)) - - -class ReadAsArraysException(Exception): - """ - Attempted to load ReadAsArrays package as non-ReadAsArraysPackage - """ - - def __init__(self, error): - Exception.__init__(self, "ReadAsArraysException: {}".format(error)) - - -# external exceptions for users -class FlopyException(Exception): - """ - General Flopy Exception - """ - - def __init__(self, error, location=''): - self.message = error - Exception.__init__(self, - "FlopyException: {} ({})".format(error, location)) - - -class StructException(Exception): - """ - Exception related to the package file structure - """ - - def __init__(self, error, location): - self.message = error - Exception.__init__(self, - "StructException: {} ({})".format(error, location)) - - -class MFDataException(Exception): - """ - Exception related to MODFLOW input/output data - """ - def __init__(self, model=None, package=None, path=None, - current_process=None, data_element=None, - method_caught_in=None, org_type=None, org_value=None, - org_traceback=None, message=None, debug=None, - mfdata_except=None): - if mfdata_except is not None and \ - isinstance(mfdata_except, MFDataException): - # copy constructor - copying values from original exception - self.model = mfdata_except.model - self.package = mfdata_except.package - self.current_process = mfdata_except.current_process - self.data_element = mfdata_except.data_element - self.path = mfdata_except.path - self.messages = mfdata_except.messages - self.debug = mfdata_except.debug - self.method_caught_in = mfdata_except.method_caught_in - self.org_type = mfdata_except.org_type - self.org_value = mfdata_except.org_value - self.org_traceback = mfdata_except.org_traceback - self.org_tb_string = mfdata_except.org_tb_string - else: - self.messages = [] - if mfdata_except is not None and \ - (isinstance(mfdata_except, StructException) or - isinstance(mfdata_except, FlopyException)): - self.messages.append(mfdata_except.message) - self.model = None - self.package = None - self.current_process = None - self.data_element = None - self.path = None - self.debug = False - self.method_caught_in = None - self.org_type = None - self.org_value = None - self.org_traceback = None - self.org_tb_string = None - # override/assign any values that are not none - if model is not None: - self.model = model - if package is not None: - self.package = package - if current_process is not None: - self.current_process = current_process - if data_element is not None: - self.data_element = data_element - if path is not None: - self.path = path - if message is not None: - self.messages.append(message) - if debug is not None: - self.debug = debug - if method_caught_in is not None: - self.method_caught_in = method_caught_in - if org_type is not None: - self.org_type = org_type - if org_value is not None: - self.org_value = org_value - if org_traceback is not None: - self.org_traceback = org_traceback - self.org_tb_string = traceback.format_exception(self.org_type, - self.org_value, - self.org_traceback) - # build error string - error_message_0 = 'An error occurred in ' - if self.data_element is not None and self.data_element != '': - error_message_1 = 'data element "{}"' \ - ' '.format(self.data_element) - else: - error_message_1 = '' - if self.model is not None and self.model != '': - error_message_2 = 'model "{}" '.format(self.model) - else: - error_message_2 = '' - error_message_3 = 'package "{}".'.format(self.package) - error_message_4 = ' The error occurred while {} in the "{}" method' \ - '.'.format(self.current_process, - self.method_caught_in) - if len(self.messages) > 0: - error_message_5 = '\nAdditional Information:\n' - for index, message in enumerate(self.messages): - error_message_5 = '{}({}) {}\n'.format(error_message_5, - index + 1, message) - else: - error_message_5 = '' - error_message = '{}{}{}{}{}{}'.format(error_message_0, error_message_1, - error_message_2, error_message_3, - error_message_4, error_message_5) - #if self.debug: - # tb_string = ''.join(self.org_tb_string) - # error_message = '{}\nCall Stack\n{}'.format(error_message, - # tb_string) - Exception.__init__(self, error_message) - - -class VerbosityLevel(Enum): - quiet = 1 - normal = 2 - verbose = 3 - - -class PackageContainerType(Enum): - simulation = 1 - model = 2 - package = 3 - - -class ExtFileAction(Enum): - copy_all = 1 - copy_none = 2 - copy_relative_paths = 3 - - -class MFFilePath(object): - def __init__(self, file_path, model_name): - self.file_path = file_path - self.model_name = {model_name:0} - - def isabs(self): - return os.path.isabs(self.file_path) - - -class MFFileMgmt(object): - """ - Class containing MODFLOW path data - - Parameters - ---------- - - path : string - path on disk to the simulation - - Attributes - ---------- - - sim_path : string - path to the simulation - model_relative_path : OrderedDict - dictionary of relative paths to each model folder - - Methods - ------- - - get_model_path : (key : string) : string - returns the model working path for the model key - set_sim_path : string - sets the simulation working path - - """ - def __init__(self, path): - self._sim_path = '' - self.set_sim_path(path) - - # keys:fully pathed filenames, vals:FilePath instances - self.existing_file_dict = {} - # keys:filenames,vals:instance name - - self.model_relative_path = collections.OrderedDict() - - self._last_loaded_sim_path = None - self._last_loaded_model_relative_path = collections.OrderedDict() - - def copy_files(self, copy_relative_only=True): - num_files_copied = 0 - if self._last_loaded_sim_path is not None: - for mffile_path in self.existing_file_dict.values(): - # resolve previous simulation path. if mf6 changes - # so that paths are relative to the model folder, then - # this call should have "model_name" instead of "None" - path_old = self.resolve_path(mffile_path, None, - True) - if os.path.isfile(path_old) and \ - (not mffile_path.isabs() or not copy_relative_only): - # change "None" to "model_name" as above if mf6 - # supports model relative paths - path_new = self.resolve_path(mffile_path, - None) - if path_old != path_new: - new_folders = os.path.split(path_new)[0] - if not os.path.exists(new_folders): - os.makedirs(new_folders) - try: - copyfile(path_old, - path_new) - except: - type_, value_, traceback_ = sys.exc_info() - raise MFDataException(self.structure.get_model(), - self.structure.get_package(), - self._path, - 'appending data', - self.structure.name, - inspect.stack()[0][3], type_, - value_, - traceback_, None, - self._simulation_data.debug) - - num_files_copied += 1 - return num_files_copied - - def get_updated_path(self, external_file_path, model_name, - ext_file_action): - external_file_path = self.string_to_file_path(external_file_path) - if ext_file_action == ExtFileAction.copy_all: - if os.path.isabs(external_file_path): - # move file path to local model or simulation path - file_name = os.path.split(external_file_path)[1] - if model_name: - return os.path.join(self.get_model_path(model_name), - file_name) - else: - return os.path.join(self.get_sim_path(), file_name) - else: - return external_file_path - elif ext_file_action == ExtFileAction.copy_relative_paths: - return external_file_path - elif ext_file_action == ExtFileAction.copy_none: - if os.path.isabs(external_file_path): - return external_file_path - else: - return os.path.join(self._build_relative_path(model_name), - external_file_path) - else: - return None - - def _build_relative_path(self, model_name): - old_abs_path = self.resolve_path('', model_name, True) - current_abs_path = self.resolve_path('', model_name, False) - return os.path.relpath(old_abs_path, current_abs_path) - - def strip_model_relative_path(self, model_name, path): - if model_name in self.model_relative_path: - model_rel_path = self.model_relative_path[model_name] - new_path = None - while path: - path, leaf = os.path.split(path) - if leaf != model_rel_path: - if new_path: - new_path = os.path.join(leaf, new_path) - else: - new_path = leaf - return new_path - - @staticmethod - def unique_file_name(file_name, lookup): - num = 0 - while MFFileMgmt._build_file(file_name, num) in lookup: - num += 1 - return MFFileMgmt._build_file(file_name, num) - - @staticmethod - def _build_file(file_name, num): - file, ext = os.path.splitext(file_name) - if ext: - return '{}_{}{}'.format(file, num, ext) - else: - return '{}_{}'.format(file, num) - - @staticmethod - def string_to_file_path(fp_string): - file_delimiters = ['/','\\'] - new_string = fp_string - for delimiter in file_delimiters: - arr_string = new_string.split(delimiter) - if len(arr_string) > 1: - if os.path.isabs(fp_string): - new_string = '{}{}{}'.format(arr_string[0], delimiter, - arr_string[1]) - else: - new_string = os.path.join(arr_string[0], arr_string[1]) - if len(arr_string) > 2: - for path_piece in arr_string[2:]: - new_string = os.path.join(new_string, path_piece) - return new_string - - def set_last_accessed_path(self): - self._last_loaded_sim_path = self._sim_path - self.set_last_accessed_model_path() - - def set_last_accessed_model_path(self): - for key, item in self.model_relative_path.items(): - self._last_loaded_model_relative_path[key] = copy.deepcopy(item) - - def get_model_path(self, key, last_loaded_path=False): - if last_loaded_path: - return os.path.join(self._last_loaded_sim_path, - self._last_loaded_model_relative_path[key]) - else: - if key in self.model_relative_path: - return os.path.join(self._sim_path, self.model_relative_path[key]) - else: - return self._sim_path - - def get_sim_path(self, last_loaded_path=False): - if last_loaded_path: - return self._last_loaded_sim_path - else: - return self._sim_path - - def add_ext_file(self, file_path, model_name): - if file_path in self.existing_file_dict: - if model_name not in self.existing_file_dict[file_path].model_name: - self.existing_file_dict[file_path].model_name[model_name] = 0 - else: - new_file_path = MFFilePath(file_path, - model_name) - self.existing_file_dict[file_path] = new_file_path - - def set_sim_path(self, path): - """ - set the file path to the simulation files - - Parameters - ---------- - path : string - full path or relative path from working directory to - simulation folder - - Returns - ------- - - Examples - -------- - self.simulation_data.mfdata.set_sim_path() - """ - - # recalculate paths for everything - # resolve path type - path = self.string_to_file_path(path) - if os.path.isabs(path): - self._sim_path = path - else: - # assume path is relative to working directory - self._sim_path = os.path.join(os.getcwd(), path) - - def resolve_path(self, path, model_name, last_loaded_path=False, - move_abs_paths=False): - if isinstance(path, MFFilePath): - file_path = path.file_path - else: - file_path = path - - if os.path.isabs(file_path): - # path is an absolute path - if move_abs_paths: - if model_name is None: - return self.get_sim_path(last_loaded_path) - else: - return self.get_model_path(model_name, last_loaded_path) - else: - return file_path - else: - # path is a relative path - if model_name is None: - return os.path.join(self.get_sim_path(last_loaded_path), - file_path) - else: - return os.path.join(self.get_model_path(model_name, - last_loaded_path), - file_path) - - -class PackageContainer(object): - """ - Base class for any class containing packages. - - Parameters - ---------- - simulation_data : SimulationData - the simulation's SimulationData object - name : string - name of the package container object - - Attributes - ---------- - _packagelist : list - packages contained in the package container - package_type_dict : dictionary - dictionary of packages by package type - package_name_dict : dictionary - dictionary of packages by package name - package_key_dict : dictionary - dictionary of packages by package key - - Methods - ------- - package_factory : (package_type : string, model_type : string) : - MFPackage subclass - Static method that returns the appropriate package type object based - on the package_type and model_type strings - get_package : (name : string) : MFPackage or [MfPackage] - finds a package by package name, package key, package type, or partial - package name. returns either a single package, a list of packages, - or None - register_package : (package : MFPackage) : (tuple, PackageStructure) - base class method for package registration - """ - - def __init__(self, simulation_data, name): - self.type = 'PackageContainer' - self.simulation_data = simulation_data - self.name = name - self._packagelist = [] - self.package_type_dict = {} - self.package_name_dict = {} - self.package_key_dict = {} - - @staticmethod - def package_factory(package_type, model_type): - package_abbr = '{}{}'.format(model_type, package_type) - package_utl_abbr = 'utl{}'.format(package_type) - package_list = [] - # iterate through python files - package_file_paths = PackageContainer.get_package_file_paths() - for package_file_path in package_file_paths: - module = PackageContainer.get_module(package_file_path) - if module is not None: - # iterate imported items - for item in dir(module): - value = PackageContainer.get_module_val(module, item, - 'package_abbr') - if value is not None: - abbr = value.package_abbr - if package_type is None: - # don't store packages "group" classes - if len(abbr) <= 8 or abbr[-8:] != 'packages': - package_list.append(value) - else: - # check package type - if value.package_abbr == package_abbr or \ - value.package_abbr == package_utl_abbr: - return value - if package_type is None: - return package_list - else: - return None - - @staticmethod - def model_factory(model_type): - package_file_paths = PackageContainer.get_package_file_paths() - for package_file_path in package_file_paths: - module = PackageContainer.get_module(package_file_path) - if module is not None: - # iterate imported items - for item in dir(module): - value = PackageContainer.get_module_val(module, item, - 'model_type') - if value is not None and value.model_type == model_type: - return value - return None - - @staticmethod - def get_module_val(module, item, attrb): - value = getattr(module, item) - # verify this is a class - if not value or not inspect.isclass(value) or not \ - hasattr(value, attrb): - return None - return value - - @staticmethod - def get_module(package_file_path): - package_file_name = os.path.basename(package_file_path) - module_path = os.path.splitext(package_file_name)[0] - module_name = '{}{}{}'.format('Modflow', module_path[2].upper(), - module_path[3:]) - if module_name.startswith("__"): - return None - - # import - return importlib.import_module("flopy.mf6.modflow.{}".format( - module_path)) - - @staticmethod - def get_package_file_paths(): - base_path = os.path.split(os.path.realpath(__file__))[0] - package_path = os.path.join(base_path, 'modflow') - return glob.glob(os.path.join(package_path, "*.py")) - - @property - def package_dict(self): - return self.package_name_dict.copy() - - @property - def package_names(self): - return list(self.package_name_dict.keys()) - - def _add_package(self, package, path): - # put in packages list and update lookup dictionaries - self._packagelist.append(package) - if package.package_name is not None: - self.package_name_dict[package.package_name.lower()] = package - self.package_key_dict[path[-1].lower()] = package - if package.package_type not in self.package_type_dict: - self.package_type_dict[package.package_type.lower()] = [] - self.package_type_dict[package.package_type.lower()].append(package) - - def _remove_package(self, package): - self._packagelist.remove(package) - if package.package_name is not None and \ - package.package_name.lower() in self.package_name_dict: - del self.package_name_dict[package.package_name.lower()] - del self.package_key_dict[package.path[-1].lower()] - package_list = self.package_type_dict[package.package_type.lower()] - package_list.remove(package) - if len(package_list) == 0: - del self.package_type_dict[package.package_type.lower()] - - # collect keys of items to be removed from main dictionary - items_to_remove = [] - for key in self.simulation_data.mfdata: - is_subkey = True - for pitem, ditem in zip(package.path, key): - if pitem != ditem: - is_subkey = False - break - if is_subkey: - items_to_remove.append(key) - - # remove items from main dictionary - for key in items_to_remove: - del self.simulation_data.mfdata[key] - - def get_package(self, name=None): - """ - Get a package. - - Parameters - ---------- - name : str - Name of the package, 'RIV', 'LPF', etc. - - Returns - ------- - pp : Package object - - """ - if name is None: - return self._packagelist[:] - - # search for full package name - if name.lower() in self.package_name_dict: - return self.package_name_dict[name.lower()] - - # search for package type - if name.lower() in self.package_type_dict: - if len(self.package_type_dict[name.lower()]) == 0: - return None - elif len(self.package_type_dict[name.lower()]) == 1: - return self.package_type_dict[name.lower()][0] - else: - return self.package_type_dict[name.lower()] - - # search for package key - if name.lower() in self.package_key_dict: - return self.package_key_dict[name.lower()] - - # search for partial and case-insensitive package name - for pp in self._packagelist: - if pp.package_name is not None: - # get first package of the type requested - package_name = pp.package_name.lower() - if len(package_name) > len(name): - package_name = package_name[0:len(name)] - if package_name.lower() == name.lower(): - return pp - - return None - - def register_package(self, package): - path = (package.package_name,) - return (path, None) - - @staticmethod - def _load_only_dict(load_only): - if load_only is None: - return None - if isinstance(load_only, dict): - return load_only - if not isinstance(load_only, collections.Iterable): - raise FlopyException('load_only must be iterable or None. ' - 'load_only value of "{}" is ' - 'invalid'.format(load_only)) - load_only_dict = {} - for item in load_only: - load_only_dict[item.lower()] = True - return load_only_dict - - @staticmethod - def _in_pkg_list(pkg_list, pkg_type, pkg_name): - if pkg_type is not None: - pkg_type = pkg_type.lower() - if pkg_name is not None: - pkg_name = pkg_name.lower() - if pkg_type in pkg_list or pkg_name in pkg_list: - return True - - # split to make cases like "gwf6-gwf6" easier to process - pkg_type = pkg_type.split('-') - try: - # if there is a number on the end of the package try - # excluding it - int(pkg_type[0][-1]) - for key in pkg_list.keys(): - key = key.split('-') - if len(key) == len(pkg_type): - matches = True - for key_item, pkg_item in zip(key, pkg_type): - if pkg_item[0:-1] != key_item and pkg_item != key_item: - matches = False - if matches: - return True - except ValueError: - return False +import glob +import importlib +import inspect, sys, traceback +import os, collections, copy +from shutil import copyfile +from enum import Enum + + +# internal handled exceptions +class MFInvalidTransientBlockHeaderException(Exception): + """ + Exception related to parsing a transient block header + """ + + def __init__(self, error): + Exception.__init__(self, + "MFInvalidTransientBlockHeaderException: {}".format( + error)) + + +class ReadAsArraysException(Exception): + """ + Attempted to load ReadAsArrays package as non-ReadAsArraysPackage + """ + + def __init__(self, error): + Exception.__init__(self, "ReadAsArraysException: {}".format(error)) + + +# external exceptions for users +class FlopyException(Exception): + """ + General Flopy Exception + """ + + def __init__(self, error, location=''): + self.message = error + Exception.__init__(self, + "FlopyException: {} ({})".format(error, location)) + + +class StructException(Exception): + """ + Exception related to the package file structure + """ + + def __init__(self, error, location): + self.message = error + Exception.__init__(self, + "StructException: {} ({})".format(error, location)) + + +class MFDataException(Exception): + """ + Exception related to MODFLOW input/output data + """ + def __init__(self, model=None, package=None, path=None, + current_process=None, data_element=None, + method_caught_in=None, org_type=None, org_value=None, + org_traceback=None, message=None, debug=None, + mfdata_except=None): + if mfdata_except is not None and \ + isinstance(mfdata_except, MFDataException): + # copy constructor - copying values from original exception + self.model = mfdata_except.model + self.package = mfdata_except.package + self.current_process = mfdata_except.current_process + self.data_element = mfdata_except.data_element + self.path = mfdata_except.path + self.messages = mfdata_except.messages + self.debug = mfdata_except.debug + self.method_caught_in = mfdata_except.method_caught_in + self.org_type = mfdata_except.org_type + self.org_value = mfdata_except.org_value + self.org_traceback = mfdata_except.org_traceback + self.org_tb_string = mfdata_except.org_tb_string + else: + self.messages = [] + if mfdata_except is not None and \ + (isinstance(mfdata_except, StructException) or + isinstance(mfdata_except, FlopyException)): + self.messages.append(mfdata_except.message) + self.model = None + self.package = None + self.current_process = None + self.data_element = None + self.path = None + self.debug = False + self.method_caught_in = None + self.org_type = None + self.org_value = None + self.org_traceback = None + self.org_tb_string = None + # override/assign any values that are not none + if model is not None: + self.model = model + if package is not None: + self.package = package + if current_process is not None: + self.current_process = current_process + if data_element is not None: + self.data_element = data_element + if path is not None: + self.path = path + if message is not None: + self.messages.append(message) + if debug is not None: + self.debug = debug + if method_caught_in is not None: + self.method_caught_in = method_caught_in + if org_type is not None: + self.org_type = org_type + if org_value is not None: + self.org_value = org_value + if org_traceback is not None: + self.org_traceback = org_traceback + self.org_tb_string = traceback.format_exception(self.org_type, + self.org_value, + self.org_traceback) + # build error string + error_message_0 = 'An error occurred in ' + if self.data_element is not None and self.data_element != '': + error_message_1 = 'data element "{}"' \ + ' '.format(self.data_element) + else: + error_message_1 = '' + if self.model is not None and self.model != '': + error_message_2 = 'model "{}" '.format(self.model) + else: + error_message_2 = '' + error_message_3 = 'package "{}".'.format(self.package) + error_message_4 = ' The error occurred while {} in the "{}" method' \ + '.'.format(self.current_process, + self.method_caught_in) + if len(self.messages) > 0: + error_message_5 = '\nAdditional Information:\n' + for index, message in enumerate(self.messages): + error_message_5 = '{}({}) {}\n'.format(error_message_5, + index + 1, message) + else: + error_message_5 = '' + error_message = '{}{}{}{}{}{}'.format(error_message_0, error_message_1, + error_message_2, error_message_3, + error_message_4, error_message_5) + #if self.debug: + # tb_string = ''.join(self.org_tb_string) + # error_message = '{}\nCall Stack\n{}'.format(error_message, + # tb_string) + Exception.__init__(self, error_message) + + +class VerbosityLevel(Enum): + quiet = 1 + normal = 2 + verbose = 3 + + +class PackageContainerType(Enum): + simulation = 1 + model = 2 + package = 3 + + +class ExtFileAction(Enum): + copy_all = 1 + copy_none = 2 + copy_relative_paths = 3 + + +class MFFilePath(object): + def __init__(self, file_path, model_name): + self.file_path = file_path + self.model_name = {model_name:0} + + def isabs(self): + return os.path.isabs(self.file_path) + + +class MFFileMgmt(object): + """ + Class containing MODFLOW path data + + Parameters + ---------- + + path : string + path on disk to the simulation + + Attributes + ---------- + + sim_path : string + path to the simulation + model_relative_path : OrderedDict + dictionary of relative paths to each model folder + + Methods + ------- + + get_model_path : (key : string) : string + returns the model working path for the model key + set_sim_path : string + sets the simulation working path + + """ + def __init__(self, path): + self._sim_path = '' + self.set_sim_path(path) + + # keys:fully pathed filenames, vals:FilePath instances + self.existing_file_dict = {} + # keys:filenames,vals:instance name + + self.model_relative_path = collections.OrderedDict() + + self._last_loaded_sim_path = None + self._last_loaded_model_relative_path = collections.OrderedDict() + + def copy_files(self, copy_relative_only=True): + num_files_copied = 0 + if self._last_loaded_sim_path is not None: + for mffile_path in self.existing_file_dict.values(): + # resolve previous simulation path. if mf6 changes + # so that paths are relative to the model folder, then + # this call should have "model_name" instead of "None" + path_old = self.resolve_path(mffile_path, None, + True) + if os.path.isfile(path_old) and \ + (not mffile_path.isabs() or not copy_relative_only): + # change "None" to "model_name" as above if mf6 + # supports model relative paths + path_new = self.resolve_path(mffile_path, + None) + if path_old != path_new: + new_folders = os.path.split(path_new)[0] + if not os.path.exists(new_folders): + os.makedirs(new_folders) + try: + copyfile(path_old, + path_new) + except: + type_, value_, traceback_ = sys.exc_info() + raise MFDataException(self.structure.get_model(), + self.structure.get_package(), + self._path, + 'appending data', + self.structure.name, + inspect.stack()[0][3], type_, + value_, + traceback_, None, + self._simulation_data.debug) + + num_files_copied += 1 + return num_files_copied + + def get_updated_path(self, external_file_path, model_name, + ext_file_action): + external_file_path = self.string_to_file_path(external_file_path) + if ext_file_action == ExtFileAction.copy_all: + if os.path.isabs(external_file_path): + # move file path to local model or simulation path + file_name = os.path.split(external_file_path)[1] + if model_name: + return os.path.join(self.get_model_path(model_name), + file_name) + else: + return os.path.join(self.get_sim_path(), file_name) + else: + return external_file_path + elif ext_file_action == ExtFileAction.copy_relative_paths: + return external_file_path + elif ext_file_action == ExtFileAction.copy_none: + if os.path.isabs(external_file_path): + return external_file_path + else: + return os.path.join(self._build_relative_path(model_name), + external_file_path) + else: + return None + + def _build_relative_path(self, model_name): + old_abs_path = self.resolve_path('', model_name, True) + current_abs_path = self.resolve_path('', model_name, False) + return os.path.relpath(old_abs_path, current_abs_path) + + def strip_model_relative_path(self, model_name, path): + if model_name in self.model_relative_path: + model_rel_path = self.model_relative_path[model_name] + new_path = None + while path: + path, leaf = os.path.split(path) + if leaf != model_rel_path: + if new_path: + new_path = os.path.join(leaf, new_path) + else: + new_path = leaf + return new_path + + @staticmethod + def unique_file_name(file_name, lookup): + num = 0 + while MFFileMgmt._build_file(file_name, num) in lookup: + num += 1 + return MFFileMgmt._build_file(file_name, num) + + @staticmethod + def _build_file(file_name, num): + file, ext = os.path.splitext(file_name) + if ext: + return '{}_{}{}'.format(file, num, ext) + else: + return '{}_{}'.format(file, num) + + @staticmethod + def string_to_file_path(fp_string): + file_delimiters = ['/','\\'] + new_string = fp_string + for delimiter in file_delimiters: + arr_string = new_string.split(delimiter) + if len(arr_string) > 1: + if os.path.isabs(fp_string): + new_string = '{}{}{}'.format(arr_string[0], delimiter, + arr_string[1]) + else: + new_string = os.path.join(arr_string[0], arr_string[1]) + if len(arr_string) > 2: + for path_piece in arr_string[2:]: + new_string = os.path.join(new_string, path_piece) + return new_string + + def set_last_accessed_path(self): + self._last_loaded_sim_path = self._sim_path + self.set_last_accessed_model_path() + + def set_last_accessed_model_path(self): + for key, item in self.model_relative_path.items(): + self._last_loaded_model_relative_path[key] = copy.deepcopy(item) + + def get_model_path(self, key, last_loaded_path=False): + if last_loaded_path: + return os.path.join(self._last_loaded_sim_path, + self._last_loaded_model_relative_path[key]) + else: + if key in self.model_relative_path: + return os.path.join(self._sim_path, self.model_relative_path[key]) + else: + return self._sim_path + + def get_sim_path(self, last_loaded_path=False): + if last_loaded_path: + return self._last_loaded_sim_path + else: + return self._sim_path + + def add_ext_file(self, file_path, model_name): + if file_path in self.existing_file_dict: + if model_name not in self.existing_file_dict[file_path].model_name: + self.existing_file_dict[file_path].model_name[model_name] = 0 + else: + new_file_path = MFFilePath(file_path, + model_name) + self.existing_file_dict[file_path] = new_file_path + + def set_sim_path(self, path): + """ + set the file path to the simulation files + + Parameters + ---------- + path : string + full path or relative path from working directory to + simulation folder + + Returns + ------- + + Examples + -------- + self.simulation_data.mfdata.set_sim_path() + """ + + # recalculate paths for everything + # resolve path type + path = self.string_to_file_path(path) + if os.path.isabs(path): + self._sim_path = path + else: + # assume path is relative to working directory + self._sim_path = os.path.join(os.getcwd(), path) + + def resolve_path(self, path, model_name, last_loaded_path=False, + move_abs_paths=False): + if isinstance(path, MFFilePath): + file_path = path.file_path + else: + file_path = path + + if os.path.isabs(file_path): + # path is an absolute path + if move_abs_paths: + if model_name is None: + return self.get_sim_path(last_loaded_path) + else: + return self.get_model_path(model_name, last_loaded_path) + else: + return file_path + else: + # path is a relative path + if model_name is None: + return os.path.join(self.get_sim_path(last_loaded_path), + file_path) + else: + return os.path.join(self.get_model_path(model_name, + last_loaded_path), + file_path) + + +class PackageContainer(object): + """ + Base class for any class containing packages. + + Parameters + ---------- + simulation_data : SimulationData + the simulation's SimulationData object + name : string + name of the package container object + + Attributes + ---------- + _packagelist : list + packages contained in the package container + package_type_dict : dictionary + dictionary of packages by package type + package_name_dict : dictionary + dictionary of packages by package name + package_key_dict : dictionary + dictionary of packages by package key + + Methods + ------- + package_factory : (package_type : string, model_type : string) : + MFPackage subclass + Static method that returns the appropriate package type object based + on the package_type and model_type strings + get_package : (name : string) : MFPackage or [MfPackage] + finds a package by package name, package key, package type, or partial + package name. returns either a single package, a list of packages, + or None + register_package : (package : MFPackage) : (tuple, PackageStructure) + base class method for package registration + """ + + def __init__(self, simulation_data, name): + self.type = 'PackageContainer' + self.simulation_data = simulation_data + self.name = name + self._packagelist = [] + self.package_type_dict = {} + self.package_name_dict = {} + self.package_key_dict = {} + + @staticmethod + def package_factory(package_type, model_type): + package_abbr = '{}{}'.format(model_type, package_type) + package_utl_abbr = 'utl{}'.format(package_type) + package_list = [] + # iterate through python files + package_file_paths = PackageContainer.get_package_file_paths() + for package_file_path in package_file_paths: + module = PackageContainer.get_module(package_file_path) + if module is not None: + # iterate imported items + for item in dir(module): + value = PackageContainer.get_module_val(module, item, + 'package_abbr') + if value is not None: + abbr = value.package_abbr + if package_type is None: + # don't store packages "group" classes + if len(abbr) <= 8 or abbr[-8:] != 'packages': + package_list.append(value) + else: + # check package type + if value.package_abbr == package_abbr or \ + value.package_abbr == package_utl_abbr: + return value + if package_type is None: + return package_list + else: + return None + + @staticmethod + def model_factory(model_type): + package_file_paths = PackageContainer.get_package_file_paths() + for package_file_path in package_file_paths: + module = PackageContainer.get_module(package_file_path) + if module is not None: + # iterate imported items + for item in dir(module): + value = PackageContainer.get_module_val(module, item, + 'model_type') + if value is not None and value.model_type == model_type: + return value + return None + + @staticmethod + def get_module_val(module, item, attrb): + value = getattr(module, item) + # verify this is a class + if not value or not inspect.isclass(value) or not \ + hasattr(value, attrb): + return None + return value + + @staticmethod + def get_module(package_file_path): + package_file_name = os.path.basename(package_file_path) + module_path = os.path.splitext(package_file_name)[0] + module_name = '{}{}{}'.format('Modflow', module_path[2].upper(), + module_path[3:]) + if module_name.startswith("__"): + return None + + # import + return importlib.import_module("flopy.mf6.modflow.{}".format( + module_path)) + + @staticmethod + def get_package_file_paths(): + base_path = os.path.split(os.path.realpath(__file__))[0] + package_path = os.path.join(base_path, 'modflow') + return glob.glob(os.path.join(package_path, "*.py")) + + @property + def package_dict(self): + return self.package_name_dict.copy() + + @property + def package_names(self): + return list(self.package_name_dict.keys()) + + def _add_package(self, package, path): + # put in packages list and update lookup dictionaries + self._packagelist.append(package) + if package.package_name is not None: + self.package_name_dict[package.package_name.lower()] = package + self.package_key_dict[path[-1].lower()] = package + if package.package_type not in self.package_type_dict: + self.package_type_dict[package.package_type.lower()] = [] + self.package_type_dict[package.package_type.lower()].append(package) + + def _remove_package(self, package): + self._packagelist.remove(package) + if package.package_name is not None and \ + package.package_name.lower() in self.package_name_dict: + del self.package_name_dict[package.package_name.lower()] + del self.package_key_dict[package.path[-1].lower()] + package_list = self.package_type_dict[package.package_type.lower()] + package_list.remove(package) + if len(package_list) == 0: + del self.package_type_dict[package.package_type.lower()] + + # collect keys of items to be removed from main dictionary + items_to_remove = [] + for key in self.simulation_data.mfdata: + is_subkey = True + for pitem, ditem in zip(package.path, key): + if pitem != ditem: + is_subkey = False + break + if is_subkey: + items_to_remove.append(key) + + # remove items from main dictionary + for key in items_to_remove: + del self.simulation_data.mfdata[key] + + def get_package(self, name=None): + """ + Get a package. + + Parameters + ---------- + name : str + Name of the package, 'RIV', 'LPF', etc. + + Returns + ------- + pp : Package object + + """ + if name is None: + return self._packagelist[:] + + # search for full package name + if name.lower() in self.package_name_dict: + return self.package_name_dict[name.lower()] + + # search for package type + if name.lower() in self.package_type_dict: + if len(self.package_type_dict[name.lower()]) == 0: + return None + elif len(self.package_type_dict[name.lower()]) == 1: + return self.package_type_dict[name.lower()][0] + else: + return self.package_type_dict[name.lower()] + + # search for package key + if name.lower() in self.package_key_dict: + return self.package_key_dict[name.lower()] + + # search for partial and case-insensitive package name + for pp in self._packagelist: + if pp.package_name is not None: + # get first package of the type requested + package_name = pp.package_name.lower() + if len(package_name) > len(name): + package_name = package_name[0:len(name)] + if package_name.lower() == name.lower(): + return pp + + return None + + def register_package(self, package): + path = (package.package_name,) + return (path, None) + + @staticmethod + def _load_only_dict(load_only): + if load_only is None: + return None + if isinstance(load_only, dict): + return load_only + if not isinstance(load_only, collections.Iterable): + raise FlopyException('load_only must be iterable or None. ' + 'load_only value of "{}" is ' + 'invalid'.format(load_only)) + load_only_dict = {} + for item in load_only: + load_only_dict[item.lower()] = True + return load_only_dict + + @staticmethod + def _in_pkg_list(pkg_list, pkg_type, pkg_name): + if pkg_type is not None: + pkg_type = pkg_type.lower() + if pkg_name is not None: + pkg_name = pkg_name.lower() + if pkg_type in pkg_list or pkg_name in pkg_list: + return True + + # split to make cases like "gwf6-gwf6" easier to process + pkg_type = pkg_type.split('-') + try: + # if there is a number on the end of the package try + # excluding it + int(pkg_type[0][-1]) + for key in pkg_list.keys(): + key = key.split('-') + if len(key) == len(pkg_type): + matches = True + for key_item, pkg_item in zip(key, pkg_type): + if pkg_item[0:-1] != key_item and pkg_item != key_item: + matches = False + if matches: + return True + except ValueError: + return False return False \ No newline at end of file diff --git a/flopy/mf6/mfmodel.py b/flopy/mf6/mfmodel.py index d5d9512e48..2898097f05 100644 --- a/flopy/mf6/mfmodel.py +++ b/flopy/mf6/mfmodel.py @@ -1,1179 +1,1179 @@ -""" -mfmodel module. Contains the MFModel class - -""" -import os, sys, inspect, warnings -import numpy as np -from .mfbase import PackageContainer, ExtFileAction, PackageContainerType, \ - MFDataException, ReadAsArraysException, FlopyException, \ - VerbosityLevel -from .mfpackage import MFPackage -from .coordinates import modeldimensions -from ..utils import datautil -from ..discretization.structuredgrid import StructuredGrid -from ..discretization.vertexgrid import VertexGrid -from ..discretization.unstructuredgrid import UnstructuredGrid -from ..discretization.grid import Grid -from flopy.discretization.modeltime import ModelTime -from ..mbase import ModelInterface -from .utils.mfenums import DiscretizationType -from .data import mfstructure -from ..utils.check import mf6check - - -class MFModel(PackageContainer, ModelInterface): - """ - MODFLOW Model Class. Represents a single model in a simulation. - - Parameters - ---------- - simulation_data : MFSimulationData - simulation data object - structure : MFModelStructure - structure of this type of model - modelname : string - name of the model - model_nam_file : string - relative path to the model name file from model working folder - version : string - version of modflow - exe_name : string - model executable name - model_ws : string - model working folder path - disfile : string - relative path to dis file from model working folder - grid_type : string - type of grid the model will use (structured, unstructured, vertices) - verbose : bool - verbose setting for model operations (default False) - - Attributes - ---------- - model_name : string - name of the model - exe_name : string - model executable name - packages : OrderedDict(MFPackage) - dictionary of model packages - _name_file_io : MFNameFile - name file - - Methods - ------- - load : (simulation : MFSimulationData, model_name : string, - namfile : string, type : string, version : string, exe_name : string, - model_ws : string, strict : boolean) : MFSimulation - a class method that loads a model from files - write - writes the simulation to files - remove_package : (package_name : string) - removes package from the model. package_name can be the - package's name, type, or package object to be removed from - the model - set_model_relative_path : (path : string) - sets the file path to the model folder and updates all model file paths - is_valid : () : boolean - checks the validity of the model and all of its packages - rename_all_packages : (name : string) - renames all packages in the model - set_all_data_external - sets the model's list and array data to be stored externally - - See Also - -------- - - Notes - ----- - - Examples - -------- - - """ - def __init__(self, simulation, model_type='gwf6', modelname='model', - model_nam_file=None, version='mf6', - exe_name='mf6.exe', add_to_simulation=True, - structure=None, model_rel_path='.', verbose=False, **kwargs): - super(MFModel, self).__init__(simulation.simulation_data, modelname) - self.simulation = simulation - self.simulation_data = simulation.simulation_data - self.name = modelname - self.name_file = None - self._version = version - self.model_type = model_type - self.type = 'Model' - - if model_nam_file is None: - model_nam_file = '{}.nam'.format(modelname) - - if add_to_simulation: - self.structure = simulation.register_model(self, model_type, - modelname, - model_nam_file) - else: - self.structure = structure - self.set_model_relative_path(model_rel_path) - self.exe_name = exe_name - self.dimensions = modeldimensions.ModelDimensions(self.name, - self.simulation_data) - self.simulation_data.model_dimensions[modelname] = self.dimensions - self._ftype_num_dict = {} - self._package_paths = {} - self._verbose = verbose - - if model_nam_file is None: - self.model_nam_file = '{}.nam'.format(modelname) - else: - self.model_nam_file = model_nam_file - - # check for spatial reference info in kwargs - xll = kwargs.pop("xll", None) - yll = kwargs.pop("yll", None) - self._xul = kwargs.pop("xul", None) - if self._xul is not None: - warnings.warn('xul/yul have been deprecated. Use xll/yll instead.', - DeprecationWarning) - self._yul = kwargs.pop("yul", None) - if self._yul is not None: - warnings.warn('xul/yul have been deprecated. Use xll/yll instead.', - DeprecationWarning) - rotation = kwargs.pop("rotation", 0.) - proj4 = kwargs.pop("proj4_str", None) - # build model grid object - self._modelgrid = Grid(proj4=proj4, xoff=xll, yoff=yll, - angrot=rotation) - - self.start_datetime = None - # check for extraneous kwargs - if len(kwargs) > 0: - kwargs_str = ', '.join(kwargs.keys()) - excpt_str = 'Extraneous kwargs "{}" provided to ' \ - 'MFModel.'.format(kwargs_str) - raise FlopyException(excpt_str) - - # build model name file - # create name file based on model type - support different model types - package_obj = self.package_factory('nam', model_type[0:3]) - if not package_obj: - excpt_str = 'Name file could not be found for model' \ - '{}.'.format(model_type[0:3]) - raise FlopyException(excpt_str) - - self.name_file = package_obj(self, filename=self.model_nam_file, - pname=self.name) - - def __getattr__(self, item): - """ - __getattr__ - used to allow for getting packages as if they are - attributes - - Parameters - ---------- - item : str - 3 character package name (case insensitive) - - - Returns - ------- - pp : Package object - Package object of type :class:`flopy.pakbase.Package` - - """ - if item == 'name_file' or not hasattr(self, 'name_file'): - raise AttributeError(item) - - package = self.get_package(item) - if package is not None: - return package - raise AttributeError(item) - - def __repr__(self): - return self._get_data_str(True) - - def __str__(self): - return self._get_data_str(False) - - def _get_data_str(self, formal): - file_mgr = self.simulation_data.mfpath - data_str = 'name = {}\nmodel_type = {}\nversion = {}\nmodel_' \ - 'relative_path = {}' \ - '\n\n'.format(self.name, self.model_type, self.version, - file_mgr.model_relative_path[self.name]) - - for package in self.packagelist: - pk_str = package._get_data_str(formal, False) - if formal: - if len(pk_str.strip()) > 0: - data_str = '{}###################\nPackage {}\n' \ - '###################\n\n' \ - '{}\n'.format(data_str, package._get_pname(), - pk_str) - else: - pk_str = package._get_data_str(formal, False) - if len(pk_str.strip()) > 0: - data_str = '{}###################\nPackage {}\n' \ - '###################\n\n' \ - '{}\n'.format(data_str, package._get_pname(), - pk_str) - return data_str - - @property - def nper(self): - try: - return self.simulation.tdis.nper.array - except AttributeError: - return None - - @property - def modeltime(self): - tdis = self.simulation.get_package('tdis') - period_data = tdis.perioddata.get_data() - - # build steady state data - sto = self.get_package('sto') - if sto is None: - steady = np.full((len(period_data['perlen'])), True, dtype=bool) - else: - steady = np.full((len(period_data['perlen'])), False, dtype=bool) - ss_periods = sto.steady_state.get_active_key_dict() - tr_periods = sto.transient.get_active_key_dict() - if ss_periods: - last_ss_value = False - # loop through steady state array - for index, value in enumerate(steady): - # resolve if current index is steady state or transient - if index in ss_periods: - last_ss_value = True - elif index in tr_periods: - last_ss_value = False - if last_ss_value == True: - steady[index] = True - - # build model time - itmuni = tdis.time_units.get_data() - start_date_time = tdis.start_date_time.get_data() - if itmuni is None: - itmuni = 0 - if start_date_time is None: - start_date_time = '01-01-1970' - data_frame = {'perlen': period_data['perlen'], - 'nstp': period_data['nstp'], - 'tsmult': period_data['tsmult']} - self._model_time = ModelTime(data_frame, itmuni, start_date_time, - steady) - return self._model_time - - @property - def modeldiscrit(self): - if self.get_grid_type() == DiscretizationType.DIS: - dis = self.get_package('dis') - return StructuredGrid(nlay=dis.nlay.get_data(), - nrow=dis.nrow.get_data(), - ncol=dis.ncol.get_data()) - elif self.get_grid_type() == DiscretizationType.DISV: - dis = self.get_package('disv') - return VertexGrid(ncpl=dis.ncpl.get_data(), - nlay=dis.nlay.get_data()) - elif self.get_grid_type() == DiscretizationType.DISU: - dis = self.get_package('disu') - return UnstructuredGrid(nodes=dis.nodes.get_data()) - - @property - def modelgrid(self): - if not self._mg_resync: - return self._modelgrid - if self.get_grid_type() == DiscretizationType.DIS: - dis = self.get_package('dis') - if not hasattr(dis, '_init_complete'): - if not hasattr(dis, 'delr'): - # dis package has not yet been initialized - return self._modelgrid - else: - # dis package has been partially initialized - self._modelgrid = StructuredGrid( - delc=dis.delc.array, delr=dis.delr.array, - top=None, botm=None, idomain=None, lenuni=None, - proj4=self._modelgrid.proj4, epsg=self._modelgrid.epsg, - xoff=self._modelgrid.xoffset, - yoff=self._modelgrid.yoffset, - angrot=self._modelgrid.angrot) - else: - self._modelgrid = StructuredGrid( - delc=dis.delc.array, delr=dis.delr.array, - top=dis.top.array, botm=dis.botm.array, - idomain=dis.idomain.array, lenuni=dis.length_units.array, - proj4=self._modelgrid.proj4, epsg=self._modelgrid.epsg, - xoff=self._modelgrid.xoffset, yoff=self._modelgrid.yoffset, - angrot=self._modelgrid.angrot) - elif self.get_grid_type() == DiscretizationType.DISV: - dis = self.get_package('disv') - if not hasattr(dis, '_init_complete'): - if not hasattr(dis, 'cell2d'): - # disv package has not yet been initialized - return self._modelgrid - else: - # disv package has been partially initialized - self._modelgrid = VertexGrid(vertices=dis.vertices.array, - cell2d=dis.cell2d.array, - top=None, - botm=None, - idomain=None, - lenuni=None, - proj4=self._modelgrid.proj4, - epsg=self._modelgrid.epsg, - xoff=self._modelgrid.xoffset, - yoff=self._modelgrid.yoffset, - angrot=self._modelgrid.angrot) - else: - self._modelgrid = VertexGrid( - vertices=dis.vertices.array, cell2d=dis.cell2d.array, - top=dis.top.array, botm=dis.botm.array, - idomain=dis.idomain.array, lenuni=dis.length_units.array, - proj4=self._modelgrid.proj4, epsg=self._modelgrid.epsg, - xoff=self._modelgrid.xoffset, yoff=self._modelgrid.yoffset, - angrot=self._modelgrid.angrot) - elif self.get_grid_type() == DiscretizationType.DISU: - dis = self.get_package('disu') - if not hasattr(dis, '_init_complete'): - # disu package has not yet been fully initialized - return self._modelgrid - cell2d = dis.cell2d.array - idomain = np.ones(dis.nodes.array, np.int32) - if cell2d is None: - if self.simulation.simulation_data.verbosity_level.value >= \ - VerbosityLevel.normal.value: - print('WARNING: cell2d information missing. Functionality of ' - 'the UnstructuredGrid will be limited.') - iverts = None - xcenters = None - ycenters = None - else: - iverts = [list(i)[4:] for i in cell2d] - xcenters = dis.cell2d.array['xc'] - ycenters = dis.cell2d.array['yc'] - vertices = dis.vertices.array - if vertices is None: - if self.simulation.simulation_data.verbosity_level.value >= \ - VerbosityLevel.normal.value: - print('WARNING: vertices information missing. Functionality ' - 'of the UnstructuredGrid will be limited.') - vertices = None - else: - vertices = np.array(vertices) - - self._modelgrid = UnstructuredGrid( - vertices=vertices, iverts=iverts, - xcenters=xcenters, - ycenters=ycenters, top=dis.top.array, - botm=dis.bot.array, idomain=idomain, - lenuni=dis.length_units.array, proj4=self._modelgrid.proj4, - epsg=self._modelgrid.epsg, xoff=self._modelgrid.xoffset, - yoff=self._modelgrid.yoffset, angrot=self._modelgrid.angrot, - nodes=dis.nodes.get_data()) - elif self.get_grid_type() == DiscretizationType.DISL: - dis = self.get_package('disl') - if not hasattr(dis, '_init_complete'): - if not hasattr(dis, 'cell1d'): - # disv package has not yet been initialized - return self._modelgrid - else: - # disv package has been partially initialized - self._modelgrid = VertexGrid(vertices=dis.vertices.array, - cell1d=dis.cell1d.array, - top=None, - botm=None, - idomain=None, - lenuni=None, - proj4=self._modelgrid.proj4, - epsg=self._modelgrid.epsg, - xoff=self._modelgrid.xoffset, - yoff=self._modelgrid.yoffset, - angrot=self._modelgrid.angrot) - else: - self._modelgrid = VertexGrid( - vertices=dis.vertices.array, cell1d=dis.cell1d.array, - top=dis.top.array, botm=dis.botm.array, - idomain=dis.idomain.array, lenuni=dis.length_units.array, - proj4=self._modelgrid.proj4, epsg=self._modelgrid.epsg, - xoff=self._modelgrid.xoffset, yoff=self._modelgrid.yoffset, - angrot=self._modelgrid.angrot) - else: - return self._modelgrid - - if self.get_grid_type() != DiscretizationType.DISV: - # get coordinate data from dis file - xorig = dis.xorigin.get_data() - yorig = dis.yorigin.get_data() - angrot = dis.angrot.get_data() - else: - xorig = self._modelgrid.xoffset - yorig = self._modelgrid.yoffset - angrot = self._modelgrid.angrot - - # resolve offsets - if xorig is None: - xorig = self._modelgrid.xoffset - if xorig is None: - if self._xul is not None: - xorig = self._modelgrid._xul_to_xll(self._xul) - else: - xorig = 0.0 - if yorig is None: - yorig = self._modelgrid.yoffset - if yorig is None: - if self._yul is not None: - yorig = self._modelgrid._yul_to_yll(self._yul) - else: - yorig = 0.0 - if angrot is None: - angrot = self._modelgrid.angrot - self._modelgrid.set_coord_info(xorig, yorig, angrot, - self._modelgrid.epsg, - self._modelgrid.proj4) - self._mg_resync = not self._modelgrid.is_complete - return self._modelgrid - - @property - def packagelist(self): - return self._packagelist - - @property - def namefile(self): - return self.model_nam_file - - @property - def model_ws(self): - file_mgr = self.simulation_data.mfpath - return file_mgr.get_model_path(self.name) - - @property - def exename(self): - return self.exe_name - - @property - def version(self): - return self._version - - @property - def solver_tols(self): - ims = self.get_ims_package() - if ims is not None: - rclose = ims.rcloserecord.get_data() - if rclose is not None: - rclose = rclose[0][0] - return ims.inner_hclose.get_data(), rclose - return None - - @property - def laytyp(self): - try: - return self.npf.icelltype.array - except AttributeError: - return None - - @property - def hdry(self): - return -1e30 - - @property - def hnoflo(self): - return 1e30 - - @property - def laycbd(self): - return None - - def export(self, f, **kwargs): - from ..export import utils - return utils.model_export(f, self, **kwargs) - - @property - def verbose(self): - return self._verbose - - @verbose.setter - def verbose(self, verbose): - self._verbose = verbose - - def check(self, f=None, verbose=True, level=1): - """ - Check model data for common errors. - - Parameters - ---------- - f : str or file handle - String defining file name or file handle for summary file - of check method output. If a string is passed a file handle - is created. If f is None, check method does not write - results to a summary file. (default is None) - verbose : bool - Boolean flag used to determine if check method results are - written to the screen - level : int - Check method analysis level. If level=0, summary checks are - performed. If level=1, full checks are performed. - - Returns - ------- - None - - Examples - -------- - - >>> import flopy - >>> m = flopy.modflow.Modflow.load('model.nam') - >>> m.check() - """ - # check instance for model-level check - chk = mf6check(self, f=f, verbose=verbose, level=level) - - return self._check(chk, level) - - @classmethod - def load_base(cls, simulation, structure, modelname='NewModel', - model_nam_file='modflowtest.nam', mtype='gwf', version='mf6', - exe_name='mf6.exe', strict=True, model_rel_path='.', - load_only=None): - """ - Load an existing model. - - Parameters - ---------- - simulation : MFSimulation - simulation object that this model is a part of - simulation_data : MFSimulationData - simulation data object - structure : MFModelStructure - structure of this type of model - model_name : string - name of the model - model_nam_file : string - relative path to the model name file from model working folder - version : string - version of modflow - exe_name : string - model executable name - model_ws : string - model working folder relative to simulation working folder - strict : boolean - strict mode when loading files - model_rel_path : string - relative path of model folder to simulation folder - load_only : list - list of package abbreviations or package names corresponding to - packages that flopy will load. default is None, which loads all - packages. the discretization packages will load regardless of this - setting. subpackages, like time series and observations, will also - load regardless of this setting. - example list: ['ic', 'maw', 'npf', 'oc', 'my_well_package_1'] - - Returns - ------- - model : MFModel - - Examples - -------- - """ - instance = cls(simulation, mtype, modelname, - model_nam_file=model_nam_file, - version=version, exe_name=exe_name, - add_to_simulation=False, structure=structure, - model_rel_path=model_rel_path) - - # build case consistent load_only dictionary for quick lookups - load_only = instance._load_only_dict(load_only) - - # load name file - instance.name_file.load(strict) - - # order packages - vnum = mfstructure.MFStructure().get_version_string() - # FIX: Transport - Priority packages maybe should not be hard coded - priority_packages = {'dis{}'.format(vnum): 1,'disv{}'.format(vnum): 1, - 'disu{}'.format(vnum): 1} - packages_ordered = [] - package_recarray = instance.simulation_data.mfdata[(modelname, 'nam', - 'packages', - 'packages')] - for item in package_recarray.get_data(): - if item[0] in priority_packages: - packages_ordered.insert(0, (item[0], item[1], item[2])) - else: - packages_ordered.append((item[0], item[1], item[2])) - - # load packages - sim_struct = mfstructure.MFStructure().sim_struct - instance._ftype_num_dict = {} - for ftype, fname, pname in packages_ordered: - ftype_orig = ftype - ftype = ftype[0:-1].lower() - if ftype in structure.package_struct_objs or ftype in \ - sim_struct.utl_struct_objs: - if load_only is not None and not \ - instance._in_pkg_list(priority_packages, ftype_orig, - pname) \ - and not instance._in_pkg_list(load_only, ftype_orig, - pname): - if simulation.simulation_data.verbosity_level.value >= \ - VerbosityLevel.normal.value: - print(' skipping package {}...'.format(ftype)) - continue - if model_rel_path and model_rel_path != '.': - # strip off model relative path from the file path - filemgr = simulation.simulation_data.mfpath - fname = filemgr.strip_model_relative_path(modelname, - fname) - if simulation.simulation_data.verbosity_level.value >= \ - VerbosityLevel.normal.value: - print(' loading package {}...'.format(ftype)) - # load package - instance.load_package(ftype, fname, pname, strict, None) - - # load referenced packages - if modelname in instance.simulation_data.referenced_files: - for ref_file in \ - instance.simulation_data.referenced_files[modelname].values(): - if (ref_file.file_type in structure.package_struct_objs or - ref_file.file_type in sim_struct.utl_struct_objs) and \ - not ref_file.loaded: - instance.load_package(ref_file.file_type, - ref_file.file_name, None, strict, - ref_file.reference_path) - ref_file.loaded = True - - # TODO: fix jagged lists where appropriate - - return instance - - def write(self, ext_file_action=ExtFileAction.copy_relative_paths): - """ - write model to model files - - Parameters - ---------- - ext_file_action : ExtFileAction - defines what to do with external files when the simulation path has - changed. defaults to copy_relative_paths which copies only files - with relative paths, leaving files defined by absolute paths fixed. - - Returns - ------- - - Examples - -------- - """ - - # write name file - if self.simulation_data.verbosity_level.value >= \ - VerbosityLevel.normal.value: - print(' writing model name file...') - - self.name_file.write(ext_file_action=ext_file_action) - - # write packages - for pp in self.packagelist: - if self.simulation_data.verbosity_level.value >= \ - VerbosityLevel.normal.value: - print(' writing package {}...'.format(pp._get_pname())) - pp.write(ext_file_action=ext_file_action) - - def get_grid_type(self): - """ - Return the type of grid used by model 'model_name' in simulation - containing simulation data 'simulation_data'. - - Returns - ------- - grid type : DiscretizationType - """ - package_recarray = self.name_file.packages - structure = mfstructure.MFStructure() - if package_recarray.search_data( - 'dis{}'.format(structure.get_version_string()), - 0) is not None: - return DiscretizationType.DIS - elif package_recarray.search_data( - 'disv{}'.format(structure.get_version_string()), - 0) is not None: - return DiscretizationType.DISV - elif package_recarray.search_data( - 'disu{}'.format(structure.get_version_string()), - 0) is not None: - return DiscretizationType.DISU - elif package_recarray.search_data( - 'disl{}'.format(structure.get_version_string()), - 0) is not None: - return DiscretizationType.DISL - - return DiscretizationType.UNDEFINED - - def get_ims_package(self): - solution_group = self.simulation.name_file.solutiongroup.get_data() - for record in solution_group: - for model_name in record[2:]: - if model_name == self.name: - return self.simulation.get_ims_package(record[1]) - return None - - def get_steadystate_list(self): - ss_list = [] - tdis = self.simulation.get_package('tdis') - period_data = tdis.perioddata.get_data() - index = 0 - pd_len = len(period_data) - while index < pd_len: - ss_list.append(True) - index += 1 - - storage = self.get_package('sto') - if storage is not None: - tr_keys = storage.transient.get_keys(True) - ss_keys = storage.steady_state.get_keys(True) - for key in tr_keys: - ss_list[key] = False - for ss_list_key in range(key + 1, len(ss_list)): - for ss_key in ss_keys: - if ss_key == ss_list_key: - break - ss_list[key] = False - return ss_list - - def is_valid(self): - """ - checks the validity of the model and all of its packages - - Parameters - ---------- - - Returns - ------- - valid : boolean - - Examples - -------- - """ - - # valid name file - if not self.name_file.is_valid(): - return False - - # valid packages - for pp in self.packagelist: - if not pp.is_valid(): - return False - - # required packages exist - for package_struct in self.structure.package_struct_objs.values(): - if not package_struct.optional and not package_struct.file_type \ - in self.package_type_dict: - return False - - return True - - def set_model_relative_path(self, model_ws): - """ - sets the file path to the model folder relative to the simulation - folder and updates all model file paths, placing them in the model - folder - - Parameters - ---------- - model_ws : string - model working folder relative to simulation working folder - - Returns - ------- - - Examples - -------- - """ - # update path in the file manager - file_mgr = self.simulation_data.mfpath - file_mgr.set_last_accessed_model_path() - path = file_mgr.string_to_file_path(model_ws) - file_mgr.model_relative_path[self.name] = path - - if model_ws and model_ws != '.' and self.simulation.name_file is not \ - None: - # update model name file location in simulation name file - models = self.simulation.name_file.models - models_data = models.get_data() - for index, entry in enumerate(models_data): - old_model_file_name = os.path.split(entry[1])[1] - old_model_base_name = os.path.splitext(old_model_file_name)[0] - if old_model_base_name.lower() == self.name.lower() or \ - self.name == entry[2]: - models_data[index][1] = os.path.join(path, - old_model_file_name) - break - models.set_data(models_data) - - if self.name_file is not None: - # update listing file location in model name file - list_file = self.name_file.list.get_data() - if list_file: - path, list_file_name = os.path.split(list_file) - try: - self.name_file.list.set_data(os.path.join( - path, list_file_name)) - except MFDataException as mfde: - message = 'Error occurred while setting relative ' \ - 'path "{}" in model '\ - '"{}".'.format(os.path.join(path, - list_file_name), - self.name) - raise MFDataException(mfdata_except=mfde, - model=self.model_name, - package=self.name_file. - _get_pname(), - message=message) - # update package file locations in model name file - packages = self.name_file.packages - packages_data = packages.get_data() - for index, entry in enumerate(packages_data): - old_package_name = os.path.split(entry[1])[1] - packages_data[index][1] = os.path.join(path, - old_package_name) - packages.set_data(packages_data) - - # update files referenced from within packages - for package in self.packagelist: - package.set_model_relative_path(model_ws) - - def _remove_package_from_dictionaries(self, package): - # remove package from local dictionaries and lists - if package.path in self._package_paths: - del self._package_paths[package.path] - self._remove_package(package) - - def remove_package(self, package_name): - """ - removes a package and all child packages from the model - - Parameters - ---------- - package_name : str - package name, package type, or package object to be removed from - the model - - Returns - ------- - - Examples - -------- - """ - if isinstance(package_name, MFPackage): - packages = [package_name] - else: - packages = self.get_package(package_name) - if not isinstance(packages, list): - packages = [packages] - for package in packages: - if package.model_or_sim.name != self.name: - except_text = 'Package can not be removed from model {} ' \ - 'since it is ' \ - 'not part of ' - raise mfstructure.FlopyException(except_text) - - self._remove_package_from_dictionaries(package) - - try: - # remove package from name file - package_data = self.name_file.packages.get_data() - except MFDataException as mfde: - message = 'Error occurred while reading package names ' \ - 'from name file in model ' \ - '"{}".'.format(self.name) - raise MFDataException(mfdata_except=mfde, - model=self.model_name, - package=self.name_file._get_pname(), - message=message) - try: - new_rec_array = None - for item in package_data: - if item[1] != package._filename: - if new_rec_array is None: - new_rec_array = np.rec.array([item.tolist()], - package_data.dtype) - else: - new_rec_array = np.hstack((item, new_rec_array)) - except: - type_, value_, traceback_ = sys.exc_info() - raise MFDataException(self.structure.get_model(), - self.structure.get_package(), - self._path, - 'building package recarray', - self.structure.name, - inspect.stack()[0][3], - type_, value_, traceback_, None, - self._simulation_data.debug) - try: - self.name_file.packages.set_data(new_rec_array) - except MFDataException as mfde: - message = 'Error occurred while setting package names ' \ - 'from name file in model "{}". Package name ' \ - 'data:\n{}'.format(self.name, new_rec_array) - raise MFDataException(mfdata_except=mfde, - model=self.model_name, - package=self.name_file._get_pname(), - message=message) - - # build list of child packages - child_package_list = [] - for pkg in self.packagelist: - if pkg.parent_file is not None and pkg.parent_file.path == \ - package.path: - child_package_list.append(pkg) - # remove child packages - for child_package in child_package_list: - self._remove_package_from_dictionaries(child_package) - - def rename_all_packages(self, name): - package_type_count = {} - self.name_file.filename = '{}.nam'.format(name) - for package in self.packagelist: - if package.package_type not in package_type_count: - package.filename = '{}.{}'.format(name, package.package_type) - package_type_count[package.package_type] = 1 - else: - package_type_count[package.package_type] += 1 - package.filename = '{}_{}.{}'.format( - name, package_type_count[package.package_type], - package.package_type) - - def set_all_data_external(self): - for package in self.packagelist: - package.set_all_data_external() - - def register_package(self, package, add_to_package_list=True, - set_package_name=True, set_package_filename=True): - """ - registers a package with the model - - Parameters - ---------- - package : MFPackage - package to register - add_to_package_list : bool - add package to lookup list - set_package_name : bool - produce a package name for this package - set_package_filename : bool - produce a filename for this package - - Returns - ------- - (path : tuple, package structure : MFPackageStructure) - - Examples - -------- - """ - package.container_type = [PackageContainerType.model] - if package.parent_file is not None: - path = package.parent_file.path + (package.package_type,) - else: - path = (self.name, package.package_type) - package_struct = \ - self.structure.get_package_struct(package.package_type) - if add_to_package_list and path in self._package_paths: - if not package_struct.multi_package_support: - # package of this type already exists, replace it - self.remove_package(package.package_type) - if self.simulation_data.verbosity_level.value >= \ - VerbosityLevel.normal.value: - print('WARNING: Package with type {} already exists. ' - 'Replacing existing package' - '.'.format(package.package_type)) - elif not set_package_name and package.package_name in \ - self.package_name_dict: - # package of this type with this name already - # exists, replace it - self.remove_package( - self.package_name_dict[package.package_name]) - if self.simulation_data.verbosity_level.value >= \ - VerbosityLevel.normal.value: - print( - 'WARNING: Package with name {} already exists. ' - 'Replacing existing package' - '.'.format(package.package_name)) - - # make sure path is unique - if path in self._package_paths: - path_iter = datautil.PathIter(path) - for new_path in path_iter: - if new_path not in self._package_paths: - path = new_path - break - self._package_paths[path] = 1 - - if package.package_type.lower() == 'nam': - return path, self.structure.name_file_struct_obj - - if set_package_name: - # produce a default package name - if package_struct is not None and \ - package_struct.multi_package_support: - # check for other registered packages of this type - name_iter = datautil.NameIter(package.package_type, False) - for package_name in name_iter: - if package_name not in self.package_name_dict: - package.package_name = package_name - break - else: - package.package_name = package.package_type - - if set_package_filename: - package._filename = '{}.{}'.format(self.name, package.package_type) - - if add_to_package_list: - self._add_package(package, path) - - # add obs file to name file if it does not have a parent - if package.package_type in self.structure.package_struct_objs or \ - (package.package_type == 'obs' and package.parent_file is None): - # update model name file - pkg_type = package.package_type.upper() - if len(pkg_type) > 3 and pkg_type[-1] == 'A': - pkg_type = pkg_type[0:-1] - # Model Assumption - assuming all name files have a package - # recarray - self.name_file.packages.\ - update_record(['{}6'.format(pkg_type), package._filename, - package.package_name], 0) - if package_struct is not None: - return (path, package_struct) - else: - if self.simulation_data.verbosity_level.value >= \ - VerbosityLevel.normal.value: - print('WARNING: Unable to register unsupported file type {} ' - 'for model {}.'.format(package.package_type, self.name)) - return None, None - - def load_package(self, ftype, fname, pname, strict, ref_path, - dict_package_name=None, parent_package=None): - """ - loads a package from a file - - Parameters - ---------- - ftype : string - the file type - fname : string - the name of the file containing the package input - pname : string - the user-defined name for the package - strict : bool - strict mode when loading the file - ref_path : string - path to the file. uses local path if set to None - dict_package_name : string - package name for dictionary lookup - parent_package : MFPackage - parent package - - Examples - -------- - """ - if ref_path is not None: - fname = os.path.join(ref_path, fname) - sim_struct = mfstructure.MFStructure().sim_struct - if (ftype in self.structure.package_struct_objs and - self.structure.package_struct_objs[ftype].multi_package_support) or \ - (ftype in sim_struct.utl_struct_objs and - sim_struct.utl_struct_objs[ftype].multi_package_support): - # resolve dictionary name for package - if dict_package_name is not None: - if parent_package is not None: - dict_package_name = '{}_{}'.format(parent_package.path[-1], - ftype) - else: - # use dict_package_name as the base name - if ftype in self._ftype_num_dict: - self._ftype_num_dict[dict_package_name] += 1 - else: - self._ftype_num_dict[dict_package_name] = 0 - dict_package_name = '{}_{}'.format(dict_package_name, - self._ftype_num_dict[ - dict_package_name]) - else: - # use ftype as the base name - if ftype in self._ftype_num_dict: - self._ftype_num_dict[ftype] += 1 - else: - self._ftype_num_dict[ftype] = 0 - if pname is not None: - dict_package_name = pname - else: - dict_package_name = '{}_{}'.format(ftype, - self._ftype_num_dict[ - ftype]) - else: - dict_package_name = ftype - - # clean up model type text - model_type = self.structure.model_type - while datautil.DatumUtil.is_int(model_type[-1]): - model_type = model_type[0:-1] - - # create package - package_obj = self.package_factory(ftype, model_type) - package = package_obj(self, filename=fname, pname=dict_package_name, - loading_package=True, - parent_file=parent_package) - try: - package.load(strict) - except ReadAsArraysException: - # create ReadAsArrays package and load it instead - package_obj = self.package_factory('{}a'.format(ftype), model_type) - package = package_obj(self, filename=fname, pname=dict_package_name, - loading_package=True, - parent_file=parent_package) - package.load(strict) - - # register child package with the model - self._add_package(package, package.path) - if parent_package is not None: - # register child package with the parent package - parent_package._add_package(package, package.path) - - return package - - def plot(self, SelPackList=None, **kwargs): - """ - Plot 2-D, 3-D, transient 2-D, and stress period list (MfList) - model input data from a model instance - - Args: - model: Flopy model instance - SelPackList: (list) list of package names to plot, if none - all packages will be plotted - - **kwargs : dict - filename_base : str - Base file name that will be used to automatically generate file - names for output image files. Plots will be exported as image - files if file_name_base is not None. (default is None) - file_extension : str - Valid matplotlib.pyplot file extension for savefig(). Only used - if filename_base is not None. (default is 'png') - mflay : int - MODFLOW zero-based layer number to return. If None, then all - all layers will be included. (default is None) - kper : int - MODFLOW zero-based stress period number to return. - (default is zero) - key : str - MfList dictionary key. (default is None) - - Returns: - axes : list - Empty list is returned if filename_base is not None. Otherwise - a list of matplotlib.pyplot.axis are returned. - """ - from flopy.plot.plotutil import PlotUtilities - - axes = PlotUtilities._plot_model_helper(self, - SelPackList=SelPackList, - **kwargs) - +""" +mfmodel module. Contains the MFModel class + +""" +import os, sys, inspect, warnings +import numpy as np +from .mfbase import PackageContainer, ExtFileAction, PackageContainerType, \ + MFDataException, ReadAsArraysException, FlopyException, \ + VerbosityLevel +from .mfpackage import MFPackage +from .coordinates import modeldimensions +from ..utils import datautil +from ..discretization.structuredgrid import StructuredGrid +from ..discretization.vertexgrid import VertexGrid +from ..discretization.unstructuredgrid import UnstructuredGrid +from ..discretization.grid import Grid +from flopy.discretization.modeltime import ModelTime +from ..mbase import ModelInterface +from .utils.mfenums import DiscretizationType +from .data import mfstructure +from ..utils.check import mf6check + + +class MFModel(PackageContainer, ModelInterface): + """ + MODFLOW Model Class. Represents a single model in a simulation. + + Parameters + ---------- + simulation_data : MFSimulationData + simulation data object + structure : MFModelStructure + structure of this type of model + modelname : string + name of the model + model_nam_file : string + relative path to the model name file from model working folder + version : string + version of modflow + exe_name : string + model executable name + model_ws : string + model working folder path + disfile : string + relative path to dis file from model working folder + grid_type : string + type of grid the model will use (structured, unstructured, vertices) + verbose : bool + verbose setting for model operations (default False) + + Attributes + ---------- + model_name : string + name of the model + exe_name : string + model executable name + packages : OrderedDict(MFPackage) + dictionary of model packages + _name_file_io : MFNameFile + name file + + Methods + ------- + load : (simulation : MFSimulationData, model_name : string, + namfile : string, type : string, version : string, exe_name : string, + model_ws : string, strict : boolean) : MFSimulation + a class method that loads a model from files + write + writes the simulation to files + remove_package : (package_name : string) + removes package from the model. package_name can be the + package's name, type, or package object to be removed from + the model + set_model_relative_path : (path : string) + sets the file path to the model folder and updates all model file paths + is_valid : () : boolean + checks the validity of the model and all of its packages + rename_all_packages : (name : string) + renames all packages in the model + set_all_data_external + sets the model's list and array data to be stored externally + + See Also + -------- + + Notes + ----- + + Examples + -------- + + """ + def __init__(self, simulation, model_type='gwf6', modelname='model', + model_nam_file=None, version='mf6', + exe_name='mf6.exe', add_to_simulation=True, + structure=None, model_rel_path='.', verbose=False, **kwargs): + super(MFModel, self).__init__(simulation.simulation_data, modelname) + self.simulation = simulation + self.simulation_data = simulation.simulation_data + self.name = modelname + self.name_file = None + self._version = version + self.model_type = model_type + self.type = 'Model' + + if model_nam_file is None: + model_nam_file = '{}.nam'.format(modelname) + + if add_to_simulation: + self.structure = simulation.register_model(self, model_type, + modelname, + model_nam_file) + else: + self.structure = structure + self.set_model_relative_path(model_rel_path) + self.exe_name = exe_name + self.dimensions = modeldimensions.ModelDimensions(self.name, + self.simulation_data) + self.simulation_data.model_dimensions[modelname] = self.dimensions + self._ftype_num_dict = {} + self._package_paths = {} + self._verbose = verbose + + if model_nam_file is None: + self.model_nam_file = '{}.nam'.format(modelname) + else: + self.model_nam_file = model_nam_file + + # check for spatial reference info in kwargs + xll = kwargs.pop("xll", None) + yll = kwargs.pop("yll", None) + self._xul = kwargs.pop("xul", None) + if self._xul is not None: + warnings.warn('xul/yul have been deprecated. Use xll/yll instead.', + DeprecationWarning) + self._yul = kwargs.pop("yul", None) + if self._yul is not None: + warnings.warn('xul/yul have been deprecated. Use xll/yll instead.', + DeprecationWarning) + rotation = kwargs.pop("rotation", 0.) + proj4 = kwargs.pop("proj4_str", None) + # build model grid object + self._modelgrid = Grid(proj4=proj4, xoff=xll, yoff=yll, + angrot=rotation) + + self.start_datetime = None + # check for extraneous kwargs + if len(kwargs) > 0: + kwargs_str = ', '.join(kwargs.keys()) + excpt_str = 'Extraneous kwargs "{}" provided to ' \ + 'MFModel.'.format(kwargs_str) + raise FlopyException(excpt_str) + + # build model name file + # create name file based on model type - support different model types + package_obj = self.package_factory('nam', model_type[0:3]) + if not package_obj: + excpt_str = 'Name file could not be found for model' \ + '{}.'.format(model_type[0:3]) + raise FlopyException(excpt_str) + + self.name_file = package_obj(self, filename=self.model_nam_file, + pname=self.name) + + def __getattr__(self, item): + """ + __getattr__ - used to allow for getting packages as if they are + attributes + + Parameters + ---------- + item : str + 3 character package name (case insensitive) + + + Returns + ------- + pp : Package object + Package object of type :class:`flopy.pakbase.Package` + + """ + if item == 'name_file' or not hasattr(self, 'name_file'): + raise AttributeError(item) + + package = self.get_package(item) + if package is not None: + return package + raise AttributeError(item) + + def __repr__(self): + return self._get_data_str(True) + + def __str__(self): + return self._get_data_str(False) + + def _get_data_str(self, formal): + file_mgr = self.simulation_data.mfpath + data_str = 'name = {}\nmodel_type = {}\nversion = {}\nmodel_' \ + 'relative_path = {}' \ + '\n\n'.format(self.name, self.model_type, self.version, + file_mgr.model_relative_path[self.name]) + + for package in self.packagelist: + pk_str = package._get_data_str(formal, False) + if formal: + if len(pk_str.strip()) > 0: + data_str = '{}###################\nPackage {}\n' \ + '###################\n\n' \ + '{}\n'.format(data_str, package._get_pname(), + pk_str) + else: + pk_str = package._get_data_str(formal, False) + if len(pk_str.strip()) > 0: + data_str = '{}###################\nPackage {}\n' \ + '###################\n\n' \ + '{}\n'.format(data_str, package._get_pname(), + pk_str) + return data_str + + @property + def nper(self): + try: + return self.simulation.tdis.nper.array + except AttributeError: + return None + + @property + def modeltime(self): + tdis = self.simulation.get_package('tdis') + period_data = tdis.perioddata.get_data() + + # build steady state data + sto = self.get_package('sto') + if sto is None: + steady = np.full((len(period_data['perlen'])), True, dtype=bool) + else: + steady = np.full((len(period_data['perlen'])), False, dtype=bool) + ss_periods = sto.steady_state.get_active_key_dict() + tr_periods = sto.transient.get_active_key_dict() + if ss_periods: + last_ss_value = False + # loop through steady state array + for index, value in enumerate(steady): + # resolve if current index is steady state or transient + if index in ss_periods: + last_ss_value = True + elif index in tr_periods: + last_ss_value = False + if last_ss_value == True: + steady[index] = True + + # build model time + itmuni = tdis.time_units.get_data() + start_date_time = tdis.start_date_time.get_data() + if itmuni is None: + itmuni = 0 + if start_date_time is None: + start_date_time = '01-01-1970' + data_frame = {'perlen': period_data['perlen'], + 'nstp': period_data['nstp'], + 'tsmult': period_data['tsmult']} + self._model_time = ModelTime(data_frame, itmuni, start_date_time, + steady) + return self._model_time + + @property + def modeldiscrit(self): + if self.get_grid_type() == DiscretizationType.DIS: + dis = self.get_package('dis') + return StructuredGrid(nlay=dis.nlay.get_data(), + nrow=dis.nrow.get_data(), + ncol=dis.ncol.get_data()) + elif self.get_grid_type() == DiscretizationType.DISV: + dis = self.get_package('disv') + return VertexGrid(ncpl=dis.ncpl.get_data(), + nlay=dis.nlay.get_data()) + elif self.get_grid_type() == DiscretizationType.DISU: + dis = self.get_package('disu') + return UnstructuredGrid(nodes=dis.nodes.get_data()) + + @property + def modelgrid(self): + if not self._mg_resync: + return self._modelgrid + if self.get_grid_type() == DiscretizationType.DIS: + dis = self.get_package('dis') + if not hasattr(dis, '_init_complete'): + if not hasattr(dis, 'delr'): + # dis package has not yet been initialized + return self._modelgrid + else: + # dis package has been partially initialized + self._modelgrid = StructuredGrid( + delc=dis.delc.array, delr=dis.delr.array, + top=None, botm=None, idomain=None, lenuni=None, + proj4=self._modelgrid.proj4, epsg=self._modelgrid.epsg, + xoff=self._modelgrid.xoffset, + yoff=self._modelgrid.yoffset, + angrot=self._modelgrid.angrot) + else: + self._modelgrid = StructuredGrid( + delc=dis.delc.array, delr=dis.delr.array, + top=dis.top.array, botm=dis.botm.array, + idomain=dis.idomain.array, lenuni=dis.length_units.array, + proj4=self._modelgrid.proj4, epsg=self._modelgrid.epsg, + xoff=self._modelgrid.xoffset, yoff=self._modelgrid.yoffset, + angrot=self._modelgrid.angrot) + elif self.get_grid_type() == DiscretizationType.DISV: + dis = self.get_package('disv') + if not hasattr(dis, '_init_complete'): + if not hasattr(dis, 'cell2d'): + # disv package has not yet been initialized + return self._modelgrid + else: + # disv package has been partially initialized + self._modelgrid = VertexGrid(vertices=dis.vertices.array, + cell2d=dis.cell2d.array, + top=None, + botm=None, + idomain=None, + lenuni=None, + proj4=self._modelgrid.proj4, + epsg=self._modelgrid.epsg, + xoff=self._modelgrid.xoffset, + yoff=self._modelgrid.yoffset, + angrot=self._modelgrid.angrot) + else: + self._modelgrid = VertexGrid( + vertices=dis.vertices.array, cell2d=dis.cell2d.array, + top=dis.top.array, botm=dis.botm.array, + idomain=dis.idomain.array, lenuni=dis.length_units.array, + proj4=self._modelgrid.proj4, epsg=self._modelgrid.epsg, + xoff=self._modelgrid.xoffset, yoff=self._modelgrid.yoffset, + angrot=self._modelgrid.angrot) + elif self.get_grid_type() == DiscretizationType.DISU: + dis = self.get_package('disu') + if not hasattr(dis, '_init_complete'): + # disu package has not yet been fully initialized + return self._modelgrid + cell2d = dis.cell2d.array + idomain = np.ones(dis.nodes.array, np.int32) + if cell2d is None: + if self.simulation.simulation_data.verbosity_level.value >= \ + VerbosityLevel.normal.value: + print('WARNING: cell2d information missing. Functionality of ' + 'the UnstructuredGrid will be limited.') + iverts = None + xcenters = None + ycenters = None + else: + iverts = [list(i)[4:] for i in cell2d] + xcenters = dis.cell2d.array['xc'] + ycenters = dis.cell2d.array['yc'] + vertices = dis.vertices.array + if vertices is None: + if self.simulation.simulation_data.verbosity_level.value >= \ + VerbosityLevel.normal.value: + print('WARNING: vertices information missing. Functionality ' + 'of the UnstructuredGrid will be limited.') + vertices = None + else: + vertices = np.array(vertices) + + self._modelgrid = UnstructuredGrid( + vertices=vertices, iverts=iverts, + xcenters=xcenters, + ycenters=ycenters, top=dis.top.array, + botm=dis.bot.array, idomain=idomain, + lenuni=dis.length_units.array, proj4=self._modelgrid.proj4, + epsg=self._modelgrid.epsg, xoff=self._modelgrid.xoffset, + yoff=self._modelgrid.yoffset, angrot=self._modelgrid.angrot, + nodes=dis.nodes.get_data()) + elif self.get_grid_type() == DiscretizationType.DISL: + dis = self.get_package('disl') + if not hasattr(dis, '_init_complete'): + if not hasattr(dis, 'cell1d'): + # disv package has not yet been initialized + return self._modelgrid + else: + # disv package has been partially initialized + self._modelgrid = VertexGrid(vertices=dis.vertices.array, + cell1d=dis.cell1d.array, + top=None, + botm=None, + idomain=None, + lenuni=None, + proj4=self._modelgrid.proj4, + epsg=self._modelgrid.epsg, + xoff=self._modelgrid.xoffset, + yoff=self._modelgrid.yoffset, + angrot=self._modelgrid.angrot) + else: + self._modelgrid = VertexGrid( + vertices=dis.vertices.array, cell1d=dis.cell1d.array, + top=dis.top.array, botm=dis.botm.array, + idomain=dis.idomain.array, lenuni=dis.length_units.array, + proj4=self._modelgrid.proj4, epsg=self._modelgrid.epsg, + xoff=self._modelgrid.xoffset, yoff=self._modelgrid.yoffset, + angrot=self._modelgrid.angrot) + else: + return self._modelgrid + + if self.get_grid_type() != DiscretizationType.DISV: + # get coordinate data from dis file + xorig = dis.xorigin.get_data() + yorig = dis.yorigin.get_data() + angrot = dis.angrot.get_data() + else: + xorig = self._modelgrid.xoffset + yorig = self._modelgrid.yoffset + angrot = self._modelgrid.angrot + + # resolve offsets + if xorig is None: + xorig = self._modelgrid.xoffset + if xorig is None: + if self._xul is not None: + xorig = self._modelgrid._xul_to_xll(self._xul) + else: + xorig = 0.0 + if yorig is None: + yorig = self._modelgrid.yoffset + if yorig is None: + if self._yul is not None: + yorig = self._modelgrid._yul_to_yll(self._yul) + else: + yorig = 0.0 + if angrot is None: + angrot = self._modelgrid.angrot + self._modelgrid.set_coord_info(xorig, yorig, angrot, + self._modelgrid.epsg, + self._modelgrid.proj4) + self._mg_resync = not self._modelgrid.is_complete + return self._modelgrid + + @property + def packagelist(self): + return self._packagelist + + @property + def namefile(self): + return self.model_nam_file + + @property + def model_ws(self): + file_mgr = self.simulation_data.mfpath + return file_mgr.get_model_path(self.name) + + @property + def exename(self): + return self.exe_name + + @property + def version(self): + return self._version + + @property + def solver_tols(self): + ims = self.get_ims_package() + if ims is not None: + rclose = ims.rcloserecord.get_data() + if rclose is not None: + rclose = rclose[0][0] + return ims.inner_hclose.get_data(), rclose + return None + + @property + def laytyp(self): + try: + return self.npf.icelltype.array + except AttributeError: + return None + + @property + def hdry(self): + return -1e30 + + @property + def hnoflo(self): + return 1e30 + + @property + def laycbd(self): + return None + + def export(self, f, **kwargs): + from ..export import utils + return utils.model_export(f, self, **kwargs) + + @property + def verbose(self): + return self._verbose + + @verbose.setter + def verbose(self, verbose): + self._verbose = verbose + + def check(self, f=None, verbose=True, level=1): + """ + Check model data for common errors. + + Parameters + ---------- + f : str or file handle + String defining file name or file handle for summary file + of check method output. If a string is passed a file handle + is created. If f is None, check method does not write + results to a summary file. (default is None) + verbose : bool + Boolean flag used to determine if check method results are + written to the screen + level : int + Check method analysis level. If level=0, summary checks are + performed. If level=1, full checks are performed. + + Returns + ------- + None + + Examples + -------- + + >>> import flopy + >>> m = flopy.modflow.Modflow.load('model.nam') + >>> m.check() + """ + # check instance for model-level check + chk = mf6check(self, f=f, verbose=verbose, level=level) + + return self._check(chk, level) + + @classmethod + def load_base(cls, simulation, structure, modelname='NewModel', + model_nam_file='modflowtest.nam', mtype='gwf', version='mf6', + exe_name='mf6.exe', strict=True, model_rel_path='.', + load_only=None): + """ + Load an existing model. + + Parameters + ---------- + simulation : MFSimulation + simulation object that this model is a part of + simulation_data : MFSimulationData + simulation data object + structure : MFModelStructure + structure of this type of model + model_name : string + name of the model + model_nam_file : string + relative path to the model name file from model working folder + version : string + version of modflow + exe_name : string + model executable name + model_ws : string + model working folder relative to simulation working folder + strict : boolean + strict mode when loading files + model_rel_path : string + relative path of model folder to simulation folder + load_only : list + list of package abbreviations or package names corresponding to + packages that flopy will load. default is None, which loads all + packages. the discretization packages will load regardless of this + setting. subpackages, like time series and observations, will also + load regardless of this setting. + example list: ['ic', 'maw', 'npf', 'oc', 'my_well_package_1'] + + Returns + ------- + model : MFModel + + Examples + -------- + """ + instance = cls(simulation, mtype, modelname, + model_nam_file=model_nam_file, + version=version, exe_name=exe_name, + add_to_simulation=False, structure=structure, + model_rel_path=model_rel_path) + + # build case consistent load_only dictionary for quick lookups + load_only = instance._load_only_dict(load_only) + + # load name file + instance.name_file.load(strict) + + # order packages + vnum = mfstructure.MFStructure().get_version_string() + # FIX: Transport - Priority packages maybe should not be hard coded + priority_packages = {'dis{}'.format(vnum): 1,'disv{}'.format(vnum): 1, + 'disu{}'.format(vnum): 1} + packages_ordered = [] + package_recarray = instance.simulation_data.mfdata[(modelname, 'nam', + 'packages', + 'packages')] + for item in package_recarray.get_data(): + if item[0] in priority_packages: + packages_ordered.insert(0, (item[0], item[1], item[2])) + else: + packages_ordered.append((item[0], item[1], item[2])) + + # load packages + sim_struct = mfstructure.MFStructure().sim_struct + instance._ftype_num_dict = {} + for ftype, fname, pname in packages_ordered: + ftype_orig = ftype + ftype = ftype[0:-1].lower() + if ftype in structure.package_struct_objs or ftype in \ + sim_struct.utl_struct_objs: + if load_only is not None and not \ + instance._in_pkg_list(priority_packages, ftype_orig, + pname) \ + and not instance._in_pkg_list(load_only, ftype_orig, + pname): + if simulation.simulation_data.verbosity_level.value >= \ + VerbosityLevel.normal.value: + print(' skipping package {}...'.format(ftype)) + continue + if model_rel_path and model_rel_path != '.': + # strip off model relative path from the file path + filemgr = simulation.simulation_data.mfpath + fname = filemgr.strip_model_relative_path(modelname, + fname) + if simulation.simulation_data.verbosity_level.value >= \ + VerbosityLevel.normal.value: + print(' loading package {}...'.format(ftype)) + # load package + instance.load_package(ftype, fname, pname, strict, None) + + # load referenced packages + if modelname in instance.simulation_data.referenced_files: + for ref_file in \ + instance.simulation_data.referenced_files[modelname].values(): + if (ref_file.file_type in structure.package_struct_objs or + ref_file.file_type in sim_struct.utl_struct_objs) and \ + not ref_file.loaded: + instance.load_package(ref_file.file_type, + ref_file.file_name, None, strict, + ref_file.reference_path) + ref_file.loaded = True + + # TODO: fix jagged lists where appropriate + + return instance + + def write(self, ext_file_action=ExtFileAction.copy_relative_paths): + """ + write model to model files + + Parameters + ---------- + ext_file_action : ExtFileAction + defines what to do with external files when the simulation path has + changed. defaults to copy_relative_paths which copies only files + with relative paths, leaving files defined by absolute paths fixed. + + Returns + ------- + + Examples + -------- + """ + + # write name file + if self.simulation_data.verbosity_level.value >= \ + VerbosityLevel.normal.value: + print(' writing model name file...') + + self.name_file.write(ext_file_action=ext_file_action) + + # write packages + for pp in self.packagelist: + if self.simulation_data.verbosity_level.value >= \ + VerbosityLevel.normal.value: + print(' writing package {}...'.format(pp._get_pname())) + pp.write(ext_file_action=ext_file_action) + + def get_grid_type(self): + """ + Return the type of grid used by model 'model_name' in simulation + containing simulation data 'simulation_data'. + + Returns + ------- + grid type : DiscretizationType + """ + package_recarray = self.name_file.packages + structure = mfstructure.MFStructure() + if package_recarray.search_data( + 'dis{}'.format(structure.get_version_string()), + 0) is not None: + return DiscretizationType.DIS + elif package_recarray.search_data( + 'disv{}'.format(structure.get_version_string()), + 0) is not None: + return DiscretizationType.DISV + elif package_recarray.search_data( + 'disu{}'.format(structure.get_version_string()), + 0) is not None: + return DiscretizationType.DISU + elif package_recarray.search_data( + 'disl{}'.format(structure.get_version_string()), + 0) is not None: + return DiscretizationType.DISL + + return DiscretizationType.UNDEFINED + + def get_ims_package(self): + solution_group = self.simulation.name_file.solutiongroup.get_data() + for record in solution_group: + for model_name in record[2:]: + if model_name == self.name: + return self.simulation.get_ims_package(record[1]) + return None + + def get_steadystate_list(self): + ss_list = [] + tdis = self.simulation.get_package('tdis') + period_data = tdis.perioddata.get_data() + index = 0 + pd_len = len(period_data) + while index < pd_len: + ss_list.append(True) + index += 1 + + storage = self.get_package('sto') + if storage is not None: + tr_keys = storage.transient.get_keys(True) + ss_keys = storage.steady_state.get_keys(True) + for key in tr_keys: + ss_list[key] = False + for ss_list_key in range(key + 1, len(ss_list)): + for ss_key in ss_keys: + if ss_key == ss_list_key: + break + ss_list[key] = False + return ss_list + + def is_valid(self): + """ + checks the validity of the model and all of its packages + + Parameters + ---------- + + Returns + ------- + valid : boolean + + Examples + -------- + """ + + # valid name file + if not self.name_file.is_valid(): + return False + + # valid packages + for pp in self.packagelist: + if not pp.is_valid(): + return False + + # required packages exist + for package_struct in self.structure.package_struct_objs.values(): + if not package_struct.optional and not package_struct.file_type \ + in self.package_type_dict: + return False + + return True + + def set_model_relative_path(self, model_ws): + """ + sets the file path to the model folder relative to the simulation + folder and updates all model file paths, placing them in the model + folder + + Parameters + ---------- + model_ws : string + model working folder relative to simulation working folder + + Returns + ------- + + Examples + -------- + """ + # update path in the file manager + file_mgr = self.simulation_data.mfpath + file_mgr.set_last_accessed_model_path() + path = file_mgr.string_to_file_path(model_ws) + file_mgr.model_relative_path[self.name] = path + + if model_ws and model_ws != '.' and self.simulation.name_file is not \ + None: + # update model name file location in simulation name file + models = self.simulation.name_file.models + models_data = models.get_data() + for index, entry in enumerate(models_data): + old_model_file_name = os.path.split(entry[1])[1] + old_model_base_name = os.path.splitext(old_model_file_name)[0] + if old_model_base_name.lower() == self.name.lower() or \ + self.name == entry[2]: + models_data[index][1] = os.path.join(path, + old_model_file_name) + break + models.set_data(models_data) + + if self.name_file is not None: + # update listing file location in model name file + list_file = self.name_file.list.get_data() + if list_file: + path, list_file_name = os.path.split(list_file) + try: + self.name_file.list.set_data(os.path.join( + path, list_file_name)) + except MFDataException as mfde: + message = 'Error occurred while setting relative ' \ + 'path "{}" in model '\ + '"{}".'.format(os.path.join(path, + list_file_name), + self.name) + raise MFDataException(mfdata_except=mfde, + model=self.model_name, + package=self.name_file. + _get_pname(), + message=message) + # update package file locations in model name file + packages = self.name_file.packages + packages_data = packages.get_data() + for index, entry in enumerate(packages_data): + old_package_name = os.path.split(entry[1])[1] + packages_data[index][1] = os.path.join(path, + old_package_name) + packages.set_data(packages_data) + + # update files referenced from within packages + for package in self.packagelist: + package.set_model_relative_path(model_ws) + + def _remove_package_from_dictionaries(self, package): + # remove package from local dictionaries and lists + if package.path in self._package_paths: + del self._package_paths[package.path] + self._remove_package(package) + + def remove_package(self, package_name): + """ + removes a package and all child packages from the model + + Parameters + ---------- + package_name : str + package name, package type, or package object to be removed from + the model + + Returns + ------- + + Examples + -------- + """ + if isinstance(package_name, MFPackage): + packages = [package_name] + else: + packages = self.get_package(package_name) + if not isinstance(packages, list): + packages = [packages] + for package in packages: + if package.model_or_sim.name != self.name: + except_text = 'Package can not be removed from model {} ' \ + 'since it is ' \ + 'not part of ' + raise mfstructure.FlopyException(except_text) + + self._remove_package_from_dictionaries(package) + + try: + # remove package from name file + package_data = self.name_file.packages.get_data() + except MFDataException as mfde: + message = 'Error occurred while reading package names ' \ + 'from name file in model ' \ + '"{}".'.format(self.name) + raise MFDataException(mfdata_except=mfde, + model=self.model_name, + package=self.name_file._get_pname(), + message=message) + try: + new_rec_array = None + for item in package_data: + if item[1] != package._filename: + if new_rec_array is None: + new_rec_array = np.rec.array([item.tolist()], + package_data.dtype) + else: + new_rec_array = np.hstack((item, new_rec_array)) + except: + type_, value_, traceback_ = sys.exc_info() + raise MFDataException(self.structure.get_model(), + self.structure.get_package(), + self._path, + 'building package recarray', + self.structure.name, + inspect.stack()[0][3], + type_, value_, traceback_, None, + self._simulation_data.debug) + try: + self.name_file.packages.set_data(new_rec_array) + except MFDataException as mfde: + message = 'Error occurred while setting package names ' \ + 'from name file in model "{}". Package name ' \ + 'data:\n{}'.format(self.name, new_rec_array) + raise MFDataException(mfdata_except=mfde, + model=self.model_name, + package=self.name_file._get_pname(), + message=message) + + # build list of child packages + child_package_list = [] + for pkg in self.packagelist: + if pkg.parent_file is not None and pkg.parent_file.path == \ + package.path: + child_package_list.append(pkg) + # remove child packages + for child_package in child_package_list: + self._remove_package_from_dictionaries(child_package) + + def rename_all_packages(self, name): + package_type_count = {} + self.name_file.filename = '{}.nam'.format(name) + for package in self.packagelist: + if package.package_type not in package_type_count: + package.filename = '{}.{}'.format(name, package.package_type) + package_type_count[package.package_type] = 1 + else: + package_type_count[package.package_type] += 1 + package.filename = '{}_{}.{}'.format( + name, package_type_count[package.package_type], + package.package_type) + + def set_all_data_external(self): + for package in self.packagelist: + package.set_all_data_external() + + def register_package(self, package, add_to_package_list=True, + set_package_name=True, set_package_filename=True): + """ + registers a package with the model + + Parameters + ---------- + package : MFPackage + package to register + add_to_package_list : bool + add package to lookup list + set_package_name : bool + produce a package name for this package + set_package_filename : bool + produce a filename for this package + + Returns + ------- + (path : tuple, package structure : MFPackageStructure) + + Examples + -------- + """ + package.container_type = [PackageContainerType.model] + if package.parent_file is not None: + path = package.parent_file.path + (package.package_type,) + else: + path = (self.name, package.package_type) + package_struct = \ + self.structure.get_package_struct(package.package_type) + if add_to_package_list and path in self._package_paths: + if not package_struct.multi_package_support: + # package of this type already exists, replace it + self.remove_package(package.package_type) + if self.simulation_data.verbosity_level.value >= \ + VerbosityLevel.normal.value: + print('WARNING: Package with type {} already exists. ' + 'Replacing existing package' + '.'.format(package.package_type)) + elif not set_package_name and package.package_name in \ + self.package_name_dict: + # package of this type with this name already + # exists, replace it + self.remove_package( + self.package_name_dict[package.package_name]) + if self.simulation_data.verbosity_level.value >= \ + VerbosityLevel.normal.value: + print( + 'WARNING: Package with name {} already exists. ' + 'Replacing existing package' + '.'.format(package.package_name)) + + # make sure path is unique + if path in self._package_paths: + path_iter = datautil.PathIter(path) + for new_path in path_iter: + if new_path not in self._package_paths: + path = new_path + break + self._package_paths[path] = 1 + + if package.package_type.lower() == 'nam': + return path, self.structure.name_file_struct_obj + + if set_package_name: + # produce a default package name + if package_struct is not None and \ + package_struct.multi_package_support: + # check for other registered packages of this type + name_iter = datautil.NameIter(package.package_type, False) + for package_name in name_iter: + if package_name not in self.package_name_dict: + package.package_name = package_name + break + else: + package.package_name = package.package_type + + if set_package_filename: + package._filename = '{}.{}'.format(self.name, package.package_type) + + if add_to_package_list: + self._add_package(package, path) + + # add obs file to name file if it does not have a parent + if package.package_type in self.structure.package_struct_objs or \ + (package.package_type == 'obs' and package.parent_file is None): + # update model name file + pkg_type = package.package_type.upper() + if len(pkg_type) > 3 and pkg_type[-1] == 'A': + pkg_type = pkg_type[0:-1] + # Model Assumption - assuming all name files have a package + # recarray + self.name_file.packages.\ + update_record(['{}6'.format(pkg_type), package._filename, + package.package_name], 0) + if package_struct is not None: + return (path, package_struct) + else: + if self.simulation_data.verbosity_level.value >= \ + VerbosityLevel.normal.value: + print('WARNING: Unable to register unsupported file type {} ' + 'for model {}.'.format(package.package_type, self.name)) + return None, None + + def load_package(self, ftype, fname, pname, strict, ref_path, + dict_package_name=None, parent_package=None): + """ + loads a package from a file + + Parameters + ---------- + ftype : string + the file type + fname : string + the name of the file containing the package input + pname : string + the user-defined name for the package + strict : bool + strict mode when loading the file + ref_path : string + path to the file. uses local path if set to None + dict_package_name : string + package name for dictionary lookup + parent_package : MFPackage + parent package + + Examples + -------- + """ + if ref_path is not None: + fname = os.path.join(ref_path, fname) + sim_struct = mfstructure.MFStructure().sim_struct + if (ftype in self.structure.package_struct_objs and + self.structure.package_struct_objs[ftype].multi_package_support) or \ + (ftype in sim_struct.utl_struct_objs and + sim_struct.utl_struct_objs[ftype].multi_package_support): + # resolve dictionary name for package + if dict_package_name is not None: + if parent_package is not None: + dict_package_name = '{}_{}'.format(parent_package.path[-1], + ftype) + else: + # use dict_package_name as the base name + if ftype in self._ftype_num_dict: + self._ftype_num_dict[dict_package_name] += 1 + else: + self._ftype_num_dict[dict_package_name] = 0 + dict_package_name = '{}_{}'.format(dict_package_name, + self._ftype_num_dict[ + dict_package_name]) + else: + # use ftype as the base name + if ftype in self._ftype_num_dict: + self._ftype_num_dict[ftype] += 1 + else: + self._ftype_num_dict[ftype] = 0 + if pname is not None: + dict_package_name = pname + else: + dict_package_name = '{}_{}'.format(ftype, + self._ftype_num_dict[ + ftype]) + else: + dict_package_name = ftype + + # clean up model type text + model_type = self.structure.model_type + while datautil.DatumUtil.is_int(model_type[-1]): + model_type = model_type[0:-1] + + # create package + package_obj = self.package_factory(ftype, model_type) + package = package_obj(self, filename=fname, pname=dict_package_name, + loading_package=True, + parent_file=parent_package) + try: + package.load(strict) + except ReadAsArraysException: + # create ReadAsArrays package and load it instead + package_obj = self.package_factory('{}a'.format(ftype), model_type) + package = package_obj(self, filename=fname, pname=dict_package_name, + loading_package=True, + parent_file=parent_package) + package.load(strict) + + # register child package with the model + self._add_package(package, package.path) + if parent_package is not None: + # register child package with the parent package + parent_package._add_package(package, package.path) + + return package + + def plot(self, SelPackList=None, **kwargs): + """ + Plot 2-D, 3-D, transient 2-D, and stress period list (MfList) + model input data from a model instance + + Args: + model: Flopy model instance + SelPackList: (list) list of package names to plot, if none + all packages will be plotted + + **kwargs : dict + filename_base : str + Base file name that will be used to automatically generate file + names for output image files. Plots will be exported as image + files if file_name_base is not None. (default is None) + file_extension : str + Valid matplotlib.pyplot file extension for savefig(). Only used + if filename_base is not None. (default is 'png') + mflay : int + MODFLOW zero-based layer number to return. If None, then all + all layers will be included. (default is None) + kper : int + MODFLOW zero-based stress period number to return. + (default is zero) + key : str + MfList dictionary key. (default is None) + + Returns: + axes : list + Empty list is returned if filename_base is not None. Otherwise + a list of matplotlib.pyplot.axis are returned. + """ + from flopy.plot.plotutil import PlotUtilities + + axes = PlotUtilities._plot_model_helper(self, + SelPackList=SelPackList, + **kwargs) + return axes \ No newline at end of file diff --git a/flopy/mf6/mfpackage.py b/flopy/mf6/mfpackage.py index 3bf1b6b4b7..aba68918ec 100644 --- a/flopy/mf6/mfpackage.py +++ b/flopy/mf6/mfpackage.py @@ -1,1977 +1,1977 @@ -import os -import sys -import errno -import inspect -import numpy as np -from collections import OrderedDict - -from .mfbase import PackageContainer, ExtFileAction, PackageContainerType -from .mfbase import MFFileMgmt, MFDataException, ReadAsArraysException, \ - MFInvalidTransientBlockHeaderException, VerbosityLevel, \ - FlopyException -from .data.mfstructure import DatumType -from .data import mfstructure, mfdata -from ..utils import datautil -from .data import mfdataarray, mfdatalist, mfdatascalar -from .coordinates import modeldimensions -from ..pakbase import PackageInterface -from .data.mfdatautil import MFComment -from ..utils.check import mf6check - - -class MFBlockHeader(object): - """ - Represents the header of a block in a MF6 input file - - Parameters - ---------- - name : string - block name - variable_strings : list - list of strings that appear after the block name - comment : MFComment - comment text in the block header - - Attributes - ---------- - name : string - block name - variable_strings : list - list of strings that appear after the block name - comment : MFComment - comment text in the block header - data_items : list - list of MFVariable of the variables contained in this block - - Methods - ------- - write_header : (fd : file object) - writes block header to file object 'fd' - write_footer : (fd : file object) - writes block footer to file object 'fd' - set_all_data_external - sets the block's list and array data to be stored externally - - """ - def __init__(self, name, variable_strings, comment, simulation_data=None, - path=None): - self.name = name - self.variable_strings = variable_strings - if not ((simulation_data is None and path is None) or - (simulation_data is not None and path is not None)): - raise FlopyException('Block header must be initialized with both ' - 'simulation_data and path or with neither.') - if simulation_data is None: - self.comment = comment - self.simulation_data = None - self.path = None - self.comment_path = None - else: - self.connect_to_dict(simulation_data, path, comment) - # TODO: Get data_items from dictionary - self.data_items = [] - - def build_header_variables(self, simulation_data, block_header_structure, - block_path, data, dimensions): - self.data_items = [] - var_path = block_path + (block_header_structure[0].name,) - - # fix up data - fixed_data = [] - if block_header_structure[0].data_item_structures[0].type == \ - DatumType.keyword: - data_item = block_header_structure[0].data_item_structures[0] - fixed_data.append(data_item.name) - if isinstance(data, tuple): - data = list(data) - if isinstance(data, list): - fixed_data = fixed_data + data - else: - fixed_data.append(data) - if len(fixed_data) > 0: - fixed_data = [tuple(fixed_data)] - # create data object - new_data = MFBlock.data_factory(simulation_data, None, - block_header_structure[0], True, - var_path, dimensions, fixed_data) - self.data_items.append(new_data) - - def is_same_header(self, block_header): - if len(self.variable_strings) > 0: - if len(self.variable_strings) != \ - len(block_header.variable_strings): - return False - else: - for sitem, oitem in zip(self.variable_strings, - block_header.variable_strings): - if sitem != oitem: - return False - return True - elif len(self.data_items) > 0 and \ - len(block_header.variable_strings) > 0: - typ_obj = self.data_items[0].structure.data_item_structures[0].\ - type_obj - if typ_obj == int or typ_obj == float: - return bool(self.variable_strings[0] == \ - block_header.variable_strings[0]) - else: - return True - elif len(self.data_items) == len(block_header.variable_strings): - return True - return False - - def get_comment(self): - if self.simulation_data is None: - return self.comment - else: - return self.simulation_data.mfdata[self.comment_path] - - def connect_to_dict(self, simulation_data, path, comment=None): - self.simulation_data = simulation_data - self.path = path - self.comment_path = path + ('blk_hdr_comment',) - if comment is None: - simulation_data.mfdata[self.comment_path] = self.comment - else: - simulation_data.mfdata[self.comment_path] = comment - self.comment = None - - def write_header(self, fd): - fd.write('BEGIN {}'.format(self.name)) - if len(self.data_items) > 0: - if isinstance(self.data_items[0], mfdatascalar.MFScalar): - one_based = self.data_items[0].structure.type == \ - DatumType.integer - entry = self.data_items[0].get_file_entry(values_only=True, - one_based=one_based) - else: - entry = self.data_items[0].get_file_entry() - fd.write('{}'.format(entry.rstrip())) - if len(self.data_items) > 1: - for data_item in self.data_items[1:]: - entry = data_item.get_file_entry(values_only=True) - fd.write('%s' % (entry.rstrip())) - if self.get_comment().text: - fd.write(' ') - self.get_comment().write(fd) - fd.write('\n') - - def write_footer(self, fd): - fd.write('END {}'.format(self.name)) - if len(self.data_items) > 0: - one_based = self.data_items[0].structure.type == \ - DatumType.integer - if isinstance(self.data_items[0], mfdatascalar.MFScalar): - entry = self.data_items[0].get_file_entry(values_only=True, - one_based=one_based) - else: - entry = self.data_items[0].get_file_entry() - fd.write('{}'.format(entry.rstrip())) - fd.write('\n') - - def get_transient_key(self): - transient_key = None - for index in range(0, len(self.data_items)): - if self.data_items[index].structure.type != DatumType.keyword: - transient_key = self.data_items[index].get_data() - if isinstance(transient_key, np.recarray): - item_struct = self.data_items[index].structure - key_index = item_struct.first_non_keyword_index() - if not (key_index is not None and - len(transient_key[0]) > key_index): - if key_index is None: - raise FlopyException('Block header index could ' - 'not be determined.') - else: - raise FlopyException('Block header index "{}" ' - 'must be less than "{}"' - '.'.format( - key_index, len(transient_key[0]))) - transient_key = transient_key[0][key_index] - break - return transient_key - - -class MFBlock(object): - """ - Represents a block in a MF6 input file - - - Parameters - ---------- - simulation_data : MFSimulationData - data specific to this simulation - dimensions : MFDimensions - describes model dimensions including model grid and simulation time - structure : MFVariableStructure - structure describing block - path : tuple - unique path to block - - Attributes - ---------- - block_headers : MFBlockHeaderIO - block header text (BEGIN/END), header variables, comments in the header - structure : MFBlockStructure - structure describing block - path : tuple - unique path to block - datasets : OrderDict - dictionary of dataset objects with keys that are the name of the - dataset - datasets_keyword : dict - dictionary of dataset objects with keys that are key words to identify - start of dataset - enabled : boolean - block is being used - - Methods - ------- - get_block_header_info : (line : string, path : tuple) - static method that parses a line as a block header and returns a - MFBlockHeader class representing the block header in that line - load : (block_header : MFBlockHeader, fd : file, strict : boolean) - loads block from file object. file object must be advanced to - beginning of block before calling - write : (fd : file) - writes block to a file object - is_valid : () - returns true of the block is valid - - See Also - -------- - - Notes - ----- - - Examples - -------- - - """ - - def __init__(self, simulation_data, dimensions, structure, path, - model_or_sim, container_package): - self._simulation_data = simulation_data - self._dimensions = dimensions - self._model_or_sim = model_or_sim - self._container_package = container_package - self.block_headers = [MFBlockHeader(structure.name, [], - MFComment('', path, simulation_data, - 0), - simulation_data, path)] - self.structure = structure - self.path = path - self.datasets = OrderedDict() - self.datasets_keyword = {} - self.blk_trailing_comment_path = path + ('blk_trailing_comment',) - self.blk_post_comment_path = path + ('blk_post_comment',) - if self.blk_trailing_comment_path not in simulation_data.mfdata: - simulation_data.mfdata[self.blk_trailing_comment_path] = \ - MFComment('', '', simulation_data, 0) - if self.blk_post_comment_path not in simulation_data.mfdata: - simulation_data.mfdata[self.blk_post_comment_path] = \ - MFComment('\n', '', simulation_data, 0) - # initially disable if optional - self.enabled = structure.number_non_optional_data() > 0 - self.loaded = False - self.external_file_name = None - self._structure_init() - - def __repr__(self): - return self._get_data_str(True) - - def __str__(self): - return self._get_data_str(False) - - def _get_data_str(self, formal): - data_str = '' - for dataset in self.datasets.values(): - if formal: - ds_repr = repr(dataset) - if len(ds_repr.strip()) > 0: - data_str = '{}{}\n{}\n'.format(data_str, - dataset.structure.name, - repr(dataset)) - else: - ds_str = str(dataset) - if len(ds_str.strip()) > 0: - data_str = '{}{}\n{}\n'.format(data_str, - dataset.structure.name, - str(dataset)) - return data_str - - # return an MFScalar, MFList, or MFArray - @staticmethod - def data_factory(sim_data, model_or_sim, structure, enable, path, dimensions, - data=None, package=None): - data_type = structure.get_datatype() - # examine the data structure and determine the data type - if data_type == mfstructure.DataType.scalar_keyword or \ - data_type == mfstructure.DataType.scalar: - return mfdatascalar.MFScalar(sim_data, model_or_sim, structure, data, - enable, path, dimensions) - elif data_type == mfstructure.DataType.scalar_keyword_transient or \ - data_type == mfstructure.DataType.scalar_transient: - trans_scalar = mfdatascalar.MFScalarTransient(sim_data, - model_or_sim, - structure, - enable, path, - dimensions) - if data is not None: - trans_scalar.set_data(data, key=0) - return trans_scalar - elif data_type == mfstructure.DataType.array: - return mfdataarray.MFArray(sim_data, model_or_sim, structure, data, - enable, path, dimensions) - elif data_type == mfstructure.DataType.array_transient: - trans_array = mfdataarray.MFTransientArray(sim_data, model_or_sim, - structure, enable, path, - dimensions) - if data is not None: - trans_array.set_data(data, key=0) - return trans_array - elif data_type == mfstructure.DataType.list: - return mfdatalist.MFList(sim_data, model_or_sim, structure, data, - enable,path, dimensions, package) - elif data_type == mfstructure.DataType.list_transient: - trans_list = mfdatalist.MFTransientList(sim_data, model_or_sim, - structure, enable, path, - dimensions, package) - if data is not None: - trans_list.set_data(data, key=0, autofill=True) - return trans_list - elif data_type == mfstructure.DataType.list_multiple: - mult_list = mfdatalist.MFMultipleList(sim_data, model_or_sim, - structure, enable, path, - dimensions, package) - if data is not None: - mult_list.set_data(data, key=0, autofill=True) - return mult_list - - def _structure_init(self): - # load datasets keywords into dictionary - for dataset_struct in self.structure.data_structures.values(): - for keyword in dataset_struct.get_keywords(): - self.datasets_keyword[keyword] = dataset_struct - # load block header data items into dictionary - for dataset in self.structure.block_header_structure: - self._new_dataset(dataset.name, dataset, True, None) - - def set_model_relative_path(self, model_ws): - # update datasets - for key, dataset in self.datasets.items(): - if dataset.structure.file_data: - try: - file_data = dataset.get_data() - except MFDataException as mfde: - raise MFDataException(mfdata_except=mfde, - model= - self._container_package.model_name, - package= - self._container_package._get_pname(), - message='Error occurred while ' - 'getting file data from ' - '"{}"'.format( - dataset.structure.name)) - if file_data: - # update file path location for all file paths - for file_line in file_data: - old_file_name = os.path.split(file_line[0])[1] - file_line[0] = os.path.join(model_ws, old_file_name) - # update block headers - for block_header in self.block_headers: - for dataset in block_header.data_items: - if dataset.structure.file_data: - try: - file_data = dataset.get_data() - except MFDataException as mfde: - raise MFDataException(mfdata_except=mfde, - model=self._container_package. - model_name, - package=self._container_package. - _get_pname(), - message='Error occurred while ' - 'getting file data from ' - '"{}"'.format( - dataset.structure.name)) - - if file_data: - # update file path location for all file paths - for file_line in file_data: - old_file_path, old_file_name = \ - os.path.split(file_line[1]) - new_file_path = os.path.join(model_ws, - old_file_name) - # update transient keys of datasets within the - # block - for key, idataset in self.datasets.items(): - if isinstance(idataset, mfdata.MFTransient): - idataset.update_transient_key(file_line[1], - new_file_path) - file_line[1] = os.path.join(model_ws, - old_file_name) - - def add_dataset(self, dataset_struct, data, var_path): - try: - self.datasets[var_path[-1]] = self.data_factory( - self._simulation_data, self._model_or_sim, dataset_struct, - True, var_path, self._dimensions, data, - self._container_package) - except MFDataException as mfde: - raise MFDataException(mfdata_except=mfde, - model=self._container_package.model_name, - package=self._container_package._get_pname(), - message='Error occurred while adding' - ' dataset "{}" to block ' - '"{}"'.format(dataset_struct.name, - self.structure.name)) - - self._simulation_data.mfdata[var_path] = self.datasets[var_path[-1]] - dtype = dataset_struct.get_datatype() - if dtype == mfstructure.DataType.list_transient or \ - dtype == mfstructure.DataType.list_multiple or \ - dtype == mfstructure.DataType.array_transient: - # build repeating block header(s) - if isinstance(data, dict): - # Add block headers for each dictionary key - for index in data: - if isinstance(index, tuple): - header_list = list(index) - else: - header_list = [index] - self._build_repeating_header(header_list) - elif isinstance(data, list): - # Add a single block header of value 0 - self._build_repeating_header([0]) - elif dtype != mfstructure.DataType.list_multiple and \ - data is not None: - self._build_repeating_header([[0]]) - - return self.datasets[var_path[-1]] - - def _build_repeating_header(self, header_data): - if self._header_exists(header_data[0]): - return - if len(self.block_headers[-1].data_items) == 1 and \ - self.block_headers[-1].data_items[0].get_data() is not None: - block_header_path = self.path + (len(self.block_headers) + 1,) - block_header = MFBlockHeader(self.structure.name, [], - MFComment('', self.path, - self._simulation_data, 0), - self._simulation_data, - block_header_path) - self.block_headers.append(block_header) - else: - block_header_path = self.path + (len(self.block_headers),) - struct = self.structure - last_header = self.block_headers[-1] - try: - last_header.build_header_variables(self._simulation_data, - struct.block_header_structure, - block_header_path, - header_data, - self._dimensions) - except MFDataException as mfde: - raise MFDataException(mfdata_except=mfde, - model=self._container_package.model_name, - package=self._container_package._get_pname(), - message='Error occurred while building' - ' block header variables for block ' - '"{}"'.format(last_header.name)) - - def _new_dataset(self, key, dataset_struct, block_header=False, - initial_val=None): - dataset_path = self.path + (key,) - if block_header: - if dataset_struct.type == DatumType.integer and \ - initial_val is not None \ - and len(initial_val) >= 1 and \ - dataset_struct.get_record_size()[0] == 1: - # stress periods are stored 0 based - initial_val = int(initial_val[0]) - 1 - if isinstance(initial_val, list): - initial_val = [tuple(initial_val)] - try: - new_data = MFBlock.data_factory(self._simulation_data, - self._model_or_sim, - dataset_struct, True, - dataset_path, self._dimensions, - initial_val, - self._container_package) - except MFDataException as mfde: - raise MFDataException(mfdata_except=mfde, - model=self._container_package.model_name, - package=self._container_package._get_pname(), - message='Error occurred while adding' - ' dataset "{}" to block ' - '"{}"'.format( - dataset_struct.name, - self.structure.name)) - self.block_headers[-1].data_items.append(new_data) - else: - try: - self.datasets[key] = self.data_factory(self._simulation_data, - self._model_or_sim, - dataset_struct, True, - dataset_path, - self._dimensions, - initial_val, - self._container_package) - except MFDataException as mfde: - raise MFDataException(mfdata_except=mfde, - model=self._container_package.model_name, - package=self._container_package._get_pname(), - message='Error occurred while adding' - ' dataset "{}" to block ' - '"{}"'.format( - dataset_struct.name, - self.structure.name)) - for keyword in dataset_struct.get_keywords(): - self.datasets_keyword[keyword] = dataset_struct - - def is_empty(self): - for key, dataset in self.datasets.items(): - try: - has_data = dataset.has_data() - except MFDataException as mfde: - raise MFDataException(mfdata_except=mfde, - model=self._container_package.model_name, - package=self._container_package._get_pname(), - message='Error occurred while verifying' - ' data of dataset "{}" in block ' - '"{}"'.format( - dataset.structure.name, - self.structure.name)) - - if has_data is not None and has_data: - return False - return True - - def load(self, block_header, fd, strict=True): - # verify number of header variables - if len(block_header.variable_strings) < \ - self.structure.number_non_optional_block_header_data(): - if self._simulation_data.verbosity_level.value >= \ - VerbosityLevel.normal.value: - warning_str = 'WARNING: Block header for block "{}" does not ' \ - 'contain the correct number of ' \ - 'variables {}'.format(block_header.name, self.path) - print(warning_str) - return - - if self.loaded: - # verify header has not already been loaded - for bh_current in self.block_headers: - if bh_current.is_same_header(block_header): - if self._simulation_data.verbosity_level.value >= \ - VerbosityLevel.normal.value: - warning_str = 'WARNING: Block header for block "{}" is ' \ - 'not a unique block header ' \ - '{}'.format(block_header.name, self.path) - print(warning_str) - return - - # init - self.enabled = True - if not self.loaded: - self.block_headers = [] - self.block_headers.append(block_header) - - # process any header variable - if len(self.structure.block_header_structure) > 0: - dataset = self.structure.block_header_structure[0] - self._new_dataset(dataset.name, dataset, True, - self.block_headers[-1].variable_strings) - - # handle special readasarrays case - if self._container_package.structure.read_as_arrays: - # auxiliary variables may appear with aux variable name as keyword - aux_vars = self._container_package.auxiliary.get_data() - if aux_vars is not None: - for var_name in list(aux_vars[0])[1:]: - self.datasets_keyword[(var_name,)] = \ - self._container_package.aux.structure - - comments = [] - - # capture any initial comments - initial_comment = MFComment('', '', 0) - fd_block = fd - line = fd_block.readline() - datautil.PyListUtil.reset_delimiter_used() - arr_line = datautil.PyListUtil.split_data_line(line) - while MFComment.is_comment(line, True): - initial_comment.add_text(line) - line = fd_block.readline() - arr_line = datautil.PyListUtil.split_data_line(line) - - # if block not empty - external_file_info = None - if not (len(arr_line[0]) > 2 and arr_line[0][:3].upper() == 'END'): - if arr_line[0].lower() == 'open/close': - # open block contents from external file - fd_block.readline() - fd_path = os.path.split(os.path.realpath(fd_block.name))[0] - try: - if self._simulation_data.verbosity_level.value >= \ - VerbosityLevel.verbose.value: - print(' opening external file "{}"..' - '.'.format(arr_line[1])) - external_file_info = arr_line - fd_block = open(os.path.join(fd_path, arr_line[1]), - 'r') - # read first line of external file - line = fd_block.readline() - arr_line = datautil.PyListUtil.split_data_line(line) - except: - type_, value_, traceback_ = sys.exc_info() - message = 'Error reading external file specified in ' \ - 'line "{}"'.format(line) - raise MFDataException(self._container_package.model_name, - self._container_package._get_pname(), - self.path, 'reading external file', - self.structure.name, - inspect.stack()[0][3], type_, - value_, traceback_, message, - self._simulation_data.debug) - if len(self.structure.data_structures) <= 1: - # load a single data set - dataset = self.datasets[next(iter(self.datasets))] - try: - if self._simulation_data.verbosity_level.value >= \ - VerbosityLevel.verbose.value: - print(' loading data {}..' - '.'.format(dataset.structure.name)) - next_line = dataset.load(line, fd_block, - self.block_headers[-1], - initial_comment, - external_file_info) - except MFDataException as mfde: - raise MFDataException( - mfdata_except=mfde, model=self._container_package. - model_name, - package=self._container_package._get_pname(), - message='Error occurred while loading data "{}" in ' - 'block "{}" from file "{}"' - '.'.format(dataset.structure.name, - self.structure.name, - fd_block.name)) - package_info_list = self._get_package_info(dataset) - if package_info_list is not None: - for package_info in package_info_list: - if self._simulation_data.verbosity_level.value >= \ - VerbosityLevel.verbose.value: - print(' loading child package {}..' - '.'.format(package_info[0])) - pkg = self._model_or_sim.load_package( - package_info[0], package_info[1], - package_info[1], True, package_info[2], - package_info[3], self._container_package) - if hasattr(self._container_package, package_info[0]): - package_group = getattr(self._container_package, - package_info[0]) - package_group._append_package(pkg, pkg.filename, - False) - - if next_line[1] is not None: - arr_line = datautil.PyListUtil.split_data_line( - next_line[1]) - else: - arr_line = '' - # capture any trailing comments - post_data_comments = MFComment('', '', - self._simulation_data, 0) - dataset.post_data_comments = post_data_comments - while arr_line and (len(next_line[1]) <= 2 or - arr_line[0][:3].upper() != 'END'): - next_line[1] = fd_block.readline().strip() - arr_line = datautil.PyListUtil.split_data_line( - next_line[1]) - if arr_line and (len(next_line[1]) <= 2 or - arr_line[0][:3].upper() != 'END'): - post_data_comments.add_text(' '.join(arr_line)) - else: - # look for keyword and store line as data or comment - try: - key, results = self._find_data_by_keyword(line, fd_block, - initial_comment) - except MFInvalidTransientBlockHeaderException as e: - warning_str = 'WARNING: {}'.format(e) - print(warning_str) - self.block_headers.pop() - return - - self._save_comments(arr_line, line, key, comments) - if results[1] is None or results[1][:3].upper() != 'END': - # block consists of unordered datasets - # load the data sets out of order based on - # initial constants - line = ' ' - while line != '': - line = fd_block.readline() - arr_line = datautil.PyListUtil.\ - split_data_line(line) - if arr_line: - # determine if at end of block - if len(arr_line[0]) > 2 and \ - arr_line[0][:3].upper() == 'END': - break - # look for keyword and store line as data o - # r comment - key, result = self._find_data_by_keyword( - line, fd_block, initial_comment) - self._save_comments(arr_line, line, key, comments) - if result[1] is not None and \ - result[1][:3].upper() == 'END': - break - - self._simulation_data.mfdata[self.blk_trailing_comment_path].text = \ - comments - self.loaded = True - self.is_valid() - - def _find_data_by_keyword(self, line, fd, initial_comment): - first_key = None - nothing_found = False - next_line = [True, line] - while next_line[0] and not nothing_found: - arr_line = datautil.PyListUtil.\ - split_data_line(next_line[1]) - key = datautil.find_keyword(arr_line, self.datasets_keyword) - if key is not None: - ds_name = self.datasets_keyword[key].name - try: - if self._simulation_data.verbosity_level.value >= \ - VerbosityLevel.verbose.value: - print(' loading data {}...'.format(ds_name)) - next_line = self.datasets[ds_name].load( - next_line[1], fd, self.block_headers[-1], - initial_comment) - except MFDataException as mfde: - raise MFDataException(mfdata_except=mfde, - model=self._container_package. - model_name, - package=self._container_package. - _get_pname(), - message='Error occurred while ' - 'loading data "{}" in ' - 'block "{}" from file "{}"' - '.'.format( - ds_name, self.structure.name, - fd.name)) - - # see if first item's name indicates a reference to - # another package - package_info_list = self._get_package_info(self.datasets[ - ds_name]) - if package_info_list is not None: - for package_info in package_info_list: - if self._simulation_data.verbosity_level.value >= \ - VerbosityLevel.verbose.value: - print(' loading child package {}..' - '.'.format(package_info[1])) - pkg = self._model_or_sim.load_package( - package_info[0], package_info[1], package_info[1], - True, package_info[2], package_info[3], - self._container_package) - if hasattr(self._container_package, package_info[0]): - package_group = getattr(self._container_package, - package_info[0]) - package_group._append_package(pkg, pkg.filename, - False) - if first_key is None: - first_key = key - nothing_found = False - elif arr_line[0].lower() == 'readasarrays' and \ - self.path[-1].lower() == 'options' and \ - self._container_package.structure.read_as_arrays == False: - error_msg = 'ERROR: Attempting to read a ReadAsArrays ' \ - 'package as a non-ReadAsArrays ' \ - 'package {}'.format(self.path) - raise ReadAsArraysException(error_msg) - else: - nothing_found = True - - if first_key is None: - # look for recarrays. if there is a lone recarray in this block, - # use it by default - recarrays = self.structure.get_all_recarrays() - if len(recarrays) != 1: - return key, [None, None] - dataset = self.datasets[recarrays[0].name] - ds_result = dataset.load(line, fd, self.block_headers[-1], - initial_comment) - - # see if first item's name indicates a reference to another package - package_info_list = self._get_package_info(dataset) - if package_info_list is not None: - for package_info in package_info_list: - if self._simulation_data.verbosity_level.value >= \ - VerbosityLevel.verbose.value: - print(' loading child package {}..' - '.'.format(package_info[0])) - pkg = self._model_or_sim.load_package( - package_info[0], package_info[1], None, True, - package_info[2], package_info[3], - self._container_package) - if hasattr(self._container_package, package_info[0]): - package_group = getattr(self._container_package, - package_info[0]) - package_group._append_package(pkg, pkg.filename, - False) - - return recarrays[0].keyword, ds_result - else: - return first_key, next_line - - def _get_package_info(self, dataset): - if not dataset.structure.file_data: - return None - for index in range(0, len(dataset.structure.data_item_structures)): - data_item = dataset.structure.data_item_structures[index] - if data_item.type == DatumType.keyword or data_item.type == \ - DatumType.string: - item_name = data_item.name - package_type = item_name[:-1] - model_type = self._model_or_sim.structure.model_type - if PackageContainer.package_factory(package_type, - model_type) is not None: - try: - data = dataset.get_data() - except MFDataException as mfde: - raise MFDataException(mfdata_except=mfde, - model=self._container_package. - model_name, - package=self._container_package. - _get_pname(), - message='Error occurred while ' - 'getting data from "{}" ' - 'in block "{}".'.format( - dataset.structure.name, - self.structure.name)) - if isinstance(data, np.recarray): - file_location = data[-1][index] - else: - file_location = data - package_info_list = [] - file_path, file_name = os.path.split(file_location) - dict_package_name = '{}_{}'.format(package_type, - self.path[-2]) - package_info_list.append((package_type, file_name, - file_path, - dict_package_name)) - return package_info_list - return None - return None - - def _save_comments(self, arr_line, line, key, comments): - # FIX: Save these comments somewhere in the data set - if not key in self.datasets_keyword: - if MFComment.is_comment(key, True): - if comments: - comments.append('\n') - comments.append(arr_line) - - def write(self, fd, ext_file_action=ExtFileAction.copy_relative_paths): - # never write an empty block - is_empty = self.is_empty() - if is_empty and self.structure.name.lower() != 'exchanges' and \ - self.structure.name.lower() != 'options': - return - if self.structure.repeating(): - repeating_datasets = self._find_repeating_datasets() - for repeating_dataset in repeating_datasets: - # resolve any missing block headers - self._add_missing_block_headers(repeating_dataset) - if len(repeating_datasets) > 0: - # loop through all block headers - for block_header in self.block_headers: - self._write_block(fd, block_header, ext_file_action) - else: - # write out block - self._write_block(fd, self.block_headers[0], ext_file_action) - - else: - self._write_block(fd, self.block_headers[0], ext_file_action) - - def _add_missing_block_headers(self, repeating_dataset): - for key in repeating_dataset.get_active_key_list(): - if not self._header_exists(key[0]): - self._build_repeating_header([key[0]]) - - def _header_exists(self, key): - if not isinstance(key, list): - comp_key_list = [key] - else: - comp_key_list = key - for block_header in self.block_headers: - transient_key = block_header.get_transient_key() - for comp_key in comp_key_list: - if transient_key is not None and transient_key == comp_key: - return True - return False - - def set_all_data_external(self, base_name): - for key, dataset in self.datasets.items(): - if isinstance(dataset, mfdataarray.MFArray) or \ - (isinstance(dataset, mfdatalist.MFList) and - dataset.structure.type == DatumType.recarray) and \ - dataset.enabled: - dataset.store_as_external_file( - '{}_{}.txt'.format(base_name, dataset.structure.name), - replace_existing_external=False) - - def _find_repeating_datasets(self): - repeating_datasets = [] - for key, dataset in self.datasets.items(): - if dataset.repeating: - repeating_datasets.append(dataset) - return repeating_datasets - - def _write_block(self, fd, block_header, ext_file_action): - # write block header - block_header.write_header(fd) - transient_key = None - if len(block_header.data_items) > 0: - transient_key = block_header.get_transient_key() - - if self.external_file_name is not None: - # write block contents to external file - indent_string = self._simulation_data.indent_string - fd.write('{}open/close {}\n'.format(indent_string, - self.external_file_name)) - fd_main = fd - fd_path = os.path.split(os.path.realpath(fd.name))[0] - try: - fd = open(os.path.join(fd_path, self.external_file_name), 'w') - except: - type_, value_, traceback_ = sys.exc_info() - message = 'Error reading external file ' \ - '"{}"'.format(self.external_file_name) - raise MFDataException(self._container_package.model_name, - self._container_package._get_pname(), - self.path, 'reading external file', - self.structure.name, - inspect.stack()[0][3], type_, - value_, traceback_, message, - self._simulation_data.debug) - - # write data sets - for key, dataset in self.datasets.items(): - try: - if transient_key is None: - if self._simulation_data.verbosity_level.value >= \ - VerbosityLevel.verbose.value: - print(' writing data {}..' - '.'.format(dataset.structure.name)) - fd.write(dataset.get_file_entry( - ext_file_action=ext_file_action)) - else: - if self._simulation_data.verbosity_level.value >= \ - VerbosityLevel.verbose.value: - print(' writing data {} ({})..' - '.'.format(dataset.structure.name, - transient_key)) - if dataset.repeating: - fd.write(dataset.get_file_entry( - transient_key, ext_file_action=ext_file_action)) - else: - fd.write(dataset.get_file_entry( - ext_file_action=ext_file_action)) - except MFDataException as mfde: - raise MFDataException(mfdata_except=mfde, - model=self._container_package.model_name, - package=self._container_package. - _get_pname(), - message='Error occurred while writing ' - 'data "{}" in block "{}" to file' - ' "{}".'.format( - dataset.structure.name, - self.structure.name, - fd.name)) - # write trailing comments - self._simulation_data.mfdata[self.blk_trailing_comment_path].write(fd) - - if self.external_file_name is not None: - # switch back writing to package file - fd.close() - fd = fd_main - - # write block footer - block_header.write_footer(fd) - - # write post block comments - self._simulation_data.mfdata[self.blk_post_comment_path].write(fd) - - # write extra line if comments are off - if not self._simulation_data.comments_on: - fd.write('\n') - - def is_allowed(self): - if self.structure.variable_dependant_path: - # fill in empty part of the path with the current path - if len(self.structure.variable_dependant_path) == 3: - dependant_var_path = (self.path[0],) + \ - self.structure.variable_dependant_path - elif len(self.structure.variable_dependant_path) == 2: - dependant_var_path = (self.path[0], self.path[1]) + \ - self.structure.variable_dependant_path - elif len(self.structure.variable_dependant_path) == 1: - dependant_var_path = (self.path[0], self.path[1], - self.path[2]) + \ - self.structure.variable_dependant_path - else: - dependant_var_path = None - - # get dependency - dependant_var = None - mf_data = self._simulation_data.mfdata - if dependant_var_path in mf_data: - dependant_var = mf_data[dependant_var_path] - - # resolve dependency - if self.structure.variable_value_when_active[0] == 'Exists': - exists = self.structure.variable_value_when_active[1] - if dependant_var and exists.lower() == 'true': - return True - elif not dependant_var and exists.lower() == 'false': - return True - else: - return False - elif not dependant_var: - return False - elif self.structure.variable_value_when_active[0] == '>': - min_val = self.structure.variable_value_when_active[1] - if dependant_var > float(min_val): - return True - else: - return False - elif self.structure.variable_value_when_active[0] == '<': - max_val = self.structure.variable_value_when_active[1] - if dependant_var < float(max_val): - return True - else: - return False - return True - - def is_valid(self): - # check data sets - for dataset in self.datasets.values(): - # Non-optional datasets must be enabled - if not dataset.structure.optional and not dataset.enabled: - return False - # Enabled blocks must be valid - if dataset.enabled and not dataset.is_valid: - return False - # check variables - for block_header in self.block_headers: - for dataset in block_header.data_items: - # Non-optional datasets must be enabled - if not dataset.structure.optional and not dataset.enabled: - return False - # Enabled blocks must be valid - if dataset.enabled and not dataset.is_valid(): - return False - - -class MFPackage(PackageContainer, PackageInterface): - """ - Provides an interface for the user to specify data to build a package. - - Parameters - ---------- - model_or_sim : MFModel of MFSimulation - the parent model or simulation containing this package - package_type : string - string defining the package type - filename : string - filename of file where this package is stored - pname : string - package name - loading_package : bool - whether or not to add this package to the parent container's package - list during initialization - parent_file : MFPackage - parent package that contains this package - - Attributes - ---------- - blocks : OrderedDict - dictionary of blocks contained in this package by block name - path : tuple - data dictionary path to this package - structure : PackageStructure - describes the blocks and data contain in this package - dimensions : PackageDimension - resolves data dimensions for data within this package - set_all_data_external - sets the package's list and array data to be stored externally - - Methods - ------- - build_mfdata : (var_name : variable name, data : data contained in this - object) : MFData subclass - Returns the appropriate data type object (mfdatalist, mfdataarray, or - mfdatascalar) giving that object the appropriate structure (looked - up based on var_name) and any data supplied - load : (strict : bool) : bool - Loads the package from file - is_valid : bool - Returns whether or not this package is valid - write - Writes the package to a file - get_file_path : string - Returns the package file's path - remove - Removes package from the simulation/model it is currently a part of - - See Also - -------- - - Notes - ----- - - Examples - -------- - - - """ - def __init__(self, model_or_sim, package_type, filename=None, pname=None, - loading_package=False, parent_file=None): - - self.model_or_sim = model_or_sim - self._data_list = [] - self._package_type = package_type - if model_or_sim.type == 'Model' and package_type.lower() != 'nam': - self.model_name = model_or_sim.name - else: - self.model_name = None - - if model_or_sim.type != 'Model' and model_or_sim.type != 'Simulation': - message = 'Invalid model_or_sim parameter. Expecting either a ' \ - 'model or a simulation. Instead type "{}" was ' \ - 'given.'.format(type(model_or_sim)) - type_, value_, traceback_ = sys.exc_info() - raise MFDataException(self.model_name, pname, '', 'initializing ' - 'package', None, inspect.stack()[0][3], - type_, value_, traceback_, message, - model_or_sim.simulation_data.debug) - - super(MFPackage, self).__init__(model_or_sim.simulation_data, - self.model_name) - - self.parent = model_or_sim - self._simulation_data = model_or_sim.simulation_data - self.parent_file = parent_file - self.blocks = OrderedDict() - self.container_type = [] - self.loading_package = loading_package - if pname is not None: - if not isinstance(pname, str): - message = 'Invalid pname parameter. Expecting type str. ' \ - 'Instead type "{}" was ' \ - 'given.'.format(type(pname)) - type_, value_, traceback_ = sys.exc_info() - raise MFDataException(self.model_name, pname, '', - 'initializing package', None, - inspect.stack()[0][3], type_, value_, - traceback_, message, - model_or_sim.simulation_data.debug) - - self.package_name = pname.lower() - else: - self.package_name = None - - if filename is None: - self._filename = MFFileMgmt.string_to_file_path('{}.{}'.format( - self.model_or_sim.name, package_type)) - else: - if not isinstance(filename, str): - message = 'Invalid fname parameter. Expecting type str. ' \ - 'Instead type "{}" was ' \ - 'given.'.format(type(filename)) - type_, value_, traceback_ = sys.exc_info() - raise MFDataException(self.model_name, pname, '', - 'initializing package', None, - inspect.stack()[0][3], type_, value_, - traceback_, message, - model_or_sim.simulation_data.debug) - - self._filename = MFFileMgmt.string_to_file_path(filename) - - self.path, \ - self.structure = model_or_sim.register_package(self, - not loading_package, - pname is None, - filename is None) - self.dimensions = self.create_package_dimensions() - - if self.path is None: - if self._simulation_data.verbosity_level.value >= \ - VerbosityLevel.normal.value: - print('WARNING: Package type {} failed to register property.' - ' {}'.format(self._package_type, self.path)) - if parent_file is not None: - self.container_type.append(PackageContainerType.package) - # init variables that may be used later - self.post_block_comments = None - self.last_error = None - self.bc_color = "black" - self.__inattr = False - self._child_package_groups = {} - - def __setattr__(self, name, value): - if hasattr(self, name) and getattr(self, name) is not None: - attribute = object.__getattribute__(self, name) - if attribute is not None and isinstance(attribute, mfdata.MFData): - try: - if isinstance(attribute, mfdatalist.MFList): - attribute.set_data(value, autofill=True) - else: - attribute.set_data(value) - except MFDataException as mfde: - raise MFDataException(mfdata_except=mfde, - model=self.model_name, - package=self._get_pname()) - return - super(MFPackage, self).__setattr__(name, value) - - def __repr__(self): - return self._get_data_str(True) - - def __str__(self): - return self._get_data_str(False) - - @property - def filename(self): - return self._filename - - @filename.setter - def filename(self, fname): - if isinstance(self.parent_file, MFPackage) and \ - self.structure.file_type in \ - self.parent_file._child_package_groups: - try: - child_pkg_group = self.parent_file._child_package_groups[ - self.structure.file_type] - child_pkg_group._update_filename(self._filename, fname) - except Exception: - print('WARNING: Unable to update file name for parent' - 'package of {}.'.format(self.name)) - self._filename = fname - - @property - def package_type(self): - return self._package_type - - @property - def name(self): - return [self.package_name] - - @name.setter - def name(self, name): - self.package_name = name - - @property - def parent(self): - return self._parent - - @parent.setter - def parent(self, parent): - self._parent = parent - - @property - def plotable(self): - if self.model_or_sim.type == "Simulation": - return False - else: - return True - - @property - def data_list(self): - # return [data_object, data_object, ...] - return self._data_list - - def check(self, f=None, verbose=True, level=1, checktype=None): - if checktype is None: - checktype = mf6check - return super(MFPackage, self).check(f, verbose, level, checktype) - - def _get_nan_exclusion_list(self): - excl_list = [] - if hasattr(self, 'stress_period_data'): - spd_struct = self.stress_period_data.structure - for item_struct in spd_struct.data_item_structures: - if item_struct.optional or item_struct.keystring_dict: - excl_list.append(item_struct.name) - return excl_list - - def _get_data_str(self, formal, show_data=True): - data_str = 'package_name = {}\nfilename = {}\npackage_type = {}' \ - '\nmodel_or_simulation_package = {}' \ - '\n{}_name = {}' \ - '\n'.format(self._get_pname(), self._filename, - self.package_type, - self.model_or_sim.type.lower(), - self.model_or_sim.type.lower(), - self.model_or_sim.name) - if self.parent_file is not None and formal: - data_str = '{}parent_file = ' \ - '{}\n\n'.format(data_str, self.parent_file._get_pname()) - else: - data_str = '{}\n'.format(data_str) - if show_data: - for block in self.blocks.values(): - if formal: - bl_repr = repr(block) - if len(bl_repr.strip()) > 0: - data_str = '{}Block {}\n--------------------\n{}' \ - '\n'.format(data_str, block.structure.name, - repr(block)) - else: - bl_str = str(block) - if len(bl_str.strip()) > 0: - data_str = '{}Block {}\n--------------------\n{}' \ - '\n'.format(data_str, block.structure.name, - str(block)) - return data_str - - def _get_pname(self): - if self.package_name is not None: - return '{}'.format(self.package_name) - else: - return '{}'.format(self._filename) - - def _get_block_header_info(self, line, path): - # init - header_variable_strs = [] - arr_clean_line = line.strip().split() - header_comment = MFComment('', path + (arr_clean_line[1],), - self._simulation_data, 0) - # break header into components - if len(arr_clean_line) < 2: - message = 'Block header does not contain a name. Name ' \ - 'expected in line "{}".'.format(line) - type_, value_, traceback_ = sys.exc_info() - raise MFDataException(self.model_name, - self._get_pname(), - self.path, - 'parsing block header', None, - inspect.stack()[0][3], - type_, value_, traceback_, message, - self._simulation_data.debug) - elif len(arr_clean_line) == 2: - return MFBlockHeader(arr_clean_line[1], header_variable_strs, - header_comment) - else: - # process text after block name - comment = False - for entry in arr_clean_line[2:]: - # if start of comment - if MFComment.is_comment(entry.strip()[0]): - comment = True - if comment: - header_comment.text = ' '.join([header_comment.text, - entry]) - else: - header_variable_strs.append(entry) - return MFBlockHeader(arr_clean_line[1], header_variable_strs, - header_comment) - - def _update_size_defs(self): - # build temporary data lookup by name - data_lookup = {} - for block in self.blocks.values(): - for dataset in block.datasets.values(): - data_lookup[dataset.structure.name] = dataset - - # loop through all data - for block in self.blocks.values(): - for dataset in block.datasets.values(): - # if data shape is 1-D - if dataset.structure.shape and \ - len(dataset.structure.shape) == 1: - # if shape name is data in this package - if dataset.structure.shape[0] in data_lookup: - size_def = data_lookup[dataset.structure.shape[0]] - size_def_name = size_def.structure.name - - if isinstance(dataset, mfdata.MFTransient): - # for transient data always use the maximum size - new_size = -1 - for key in dataset.get_active_key_list(): - try: - data = dataset.get_data(key=key[0]) - except (IOError, - OSError, - MFDataException): - # TODO: Handle case where external file - # path has been moved - data = None - if data is not None: - data_len = len(data) - if data_len > new_size: - new_size = data_len - else: - # for all other data set max to size - new_size = -1 - try: - data = dataset.get_data() - except (IOError, - OSError, - MFDataException): - # TODO: Handle case where external file - # path has been moved - data = None - if data is not None: - new_size = len(dataset.get_data()) - if size_def.get_data() != new_size >= 0: - # store current size - size_def.set_data(new_size) - - # informational message to the user - if self._simulation_data.verbosity_level.value >= \ - VerbosityLevel.normal.value: - print('INFORMATION: {} in {} changed to {} ' - 'based on size of ' - '{}'.format(size_def_name, - size_def.structure.path[:-1], - new_size, - dataset.structure.name)) - - def remove(self): - self.model_or_sim.remove_package(self) - - def build_child_packages_container(self, pkg_type, filerecord): - # get package class - package_obj = self.package_factory(pkg_type, - self.model_or_sim.model_type) - # create child package object - child_pkgs_name = 'utl{}packages'.format(pkg_type) - child_pkgs_obj = self.package_factory(child_pkgs_name, '') - child_pkgs = child_pkgs_obj(self.model_or_sim, self, pkg_type, - filerecord, None, package_obj) - setattr(self, pkg_type, child_pkgs) - self._child_package_groups[pkg_type] = child_pkgs - - def build_child_package(self, pkg_type, data, parameter_name, filerecord): - if not hasattr(self, pkg_type): - self.build_child_packages_container(pkg_type, filerecord) - if data is not None: - package_group = getattr(self, pkg_type) - # build child package file name - child_path = package_group._next_default_file_path() - # create new empty child package - package_obj = self.package_factory(pkg_type, - self.model_or_sim.model_type) - package = package_obj(self.model_or_sim, filename=child_path, - parent_file=self) - assert hasattr(package, parameter_name) - - if isinstance(data, dict): - # evaluate and add data to package - unused_data = {} - for key, value in data.items(): - # if key is an attribute of the child package - if isinstance(key, str) and hasattr(package, key): - # set child package attribute - child_data_attr = getattr(package, key) - if isinstance(child_data_attr, mfdatalist.MFList): - child_data_attr.set_data(value, autofill=True) - elif isinstance(child_data_attr, mfdata.MFData): - child_data_attr.set_data(value) - elif key == 'fname' or key == 'filename': - child_path = value - package._filename = value - else: - setattr(package, key, value) - else: - unused_data[key] = value - if unused_data: - setattr(package, parameter_name, unused_data) - else: - setattr(package, parameter_name, data) - - # append package to list - package_group._init_package(package, child_path) - - def build_mfdata(self, var_name, data=None): - if self.loading_package: - data = None - for key, block in self.structure.blocks.items(): - if var_name in block.data_structures: - if block.name not in self.blocks: - self.blocks[block.name] = MFBlock(self._simulation_data, - self.dimensions, block, - self.path + (key,), - self.model_or_sim, self) - dataset_struct = block.data_structures[var_name] - var_path = self.path + (key, var_name) - ds = self.blocks[block.name].add_dataset(dataset_struct, - data, var_path) - self._data_list.append(ds) - return ds - - message = 'Unable to find variable "{}" in package ' \ - '"{}".'.format(var_name, self.package_type) - type_, value_, traceback_ = sys.exc_info() - raise MFDataException(self.model_name, self._get_pname(), - self.path, 'building data objects', - None, inspect.stack()[0][3], - type_, value_, traceback_, message, - self._simulation_data.debug) - - def set_model_relative_path(self, model_ws): - # update blocks - for key, block in self.blocks.items(): - block.set_model_relative_path(model_ws) - # update sub-packages - for package in self._packagelist: - package.set_model_relative_path(model_ws) - - def set_all_data_external(self): - # set blocks - for key, block in self.blocks.items(): - file_name = os.path.split(self.filename)[1] - block.set_all_data_external(file_name) - # set sub-packages - for package in self._packagelist: - package.set_all_data_external() - - def load(self, strict=True): - # open file - try: - fd_input_file = open(self.get_file_path(), 'r') - except OSError as e: - if e.errno == errno.ENOENT: - message = 'File {} of type {} could not be opened' \ - '.'.format(self.get_file_path(), self.package_type) - type_, value_, traceback_ = sys.exc_info() - raise MFDataException(self.model_name, - self.package_name, - self.path, 'loading package file', - None, inspect.stack()[0][3], - type_, value_, traceback_, message, - self._simulation_data.debug) - - try: - self._load_blocks(fd_input_file, strict) - except ReadAsArraysException as err: - fd_input_file.close() - raise ReadAsArraysException(err) - # close file - fd_input_file.close() - - if self.simulation_data.auto_set_sizes: - self._update_size_defs() - - # return validity of file - return self.is_valid() - - def is_valid(self): - # Check blocks - for block in self.blocks.values(): - # Non-optional blocks must be enabled - if block.structure.number_non_optional_data() > 0 and \ - not block.enabled and block.is_allowed(): - self.last_error = 'Required block "{}" not ' \ - 'enabled'.format(block.block_header.name) - return False - # Enabled blocks must be valid - if block.enabled and not block.is_valid: - self.last_error = 'Invalid block ' \ - '"{}"'.format(block.block_header.name) - return False - - return True - - def _load_blocks(self, fd_input_file, strict=True, max_blocks=sys.maxsize): - # init - self._simulation_data.mfdata[self.path + ('pkg_hdr_comments',)] = \ - MFComment('', self.path, self._simulation_data) - self.post_block_comments = MFComment('', self.path, - self._simulation_data) - - blocks_read = 0 - found_first_block = False - line = ' ' - while line != '': - line = fd_input_file.readline() - clean_line = line.strip() - # If comment or empty line - if MFComment.is_comment(clean_line, True): - self._store_comment(line, found_first_block) - elif len(clean_line) > 4 and clean_line[:5].upper() == 'BEGIN': - # parse block header - try: - block_header_info = self._get_block_header_info(line, - self.path) - except MFDataException as mfde: - message = 'An error occurred while loading block header ' \ - 'in line "{}".'.format(line) - type_, value_, traceback_ = sys.exc_info() - raise MFDataException(self.model_name, self._get_pname(), - self.path, 'loading block header', - None, inspect.stack()[0][3], - type_, value_, traceback_, message, - self._simulation_data.debug, mfde) - - # if there is more than one possible block with the same name, - # resolve the correct block to use - block_key = block_header_info.name.lower() - block_num = 1 - possible_key = '{}-{}'.format(block_header_info.name.lower(), - block_num) - if possible_key in self.blocks: - block_key = possible_key - block_header_name = block_header_info.name.lower() - while block_key in self.blocks and \ - not self.blocks[block_key].is_allowed(): - block_key = '{}-{}'.format(block_header_name, - block_num) - block_num += 1 - - if block_key not in self.blocks: - # block name not recognized, load block as comments and - # issue a warning - if self.simulation_data.verbosity_level.value >= \ - VerbosityLevel.normal.value: - warning_str = 'WARNING: Block "{}" is not a valid block ' \ - 'name for file type ' \ - '{}.'.format(block_key, self.package_type) - print(warning_str) - self._store_comment(line, found_first_block) - while line != '': - line = fd_input_file.readline() - self._store_comment(line, found_first_block) - arr_line = datautil.PyListUtil.split_data_line(line) - if arr_line and (len(arr_line[0]) <= 2 or - arr_line[0][:3].upper() == 'END'): - break - else: - found_first_block = True - self.post_block_comments = \ - MFComment('', self.path, self._simulation_data) - skip_block = False - if self.blocks[block_key].loaded: - # Only blocks defined as repeating are allowed to have - # multiple entries - header_name = block_header_info.name - if not self.structure.blocks[header_name.lower()].\ - repeating(): - # warn and skip block - if self._simulation_data.verbosity_level.value >= \ - VerbosityLevel.normal.value: - warning_str = 'WARNING: Block "{}" has ' \ - 'multiple entries and is not ' \ - 'intended to be a repeating ' \ - 'block ({} package' \ - ')'.format(header_name, - self.package_type) - print(warning_str) - skip_block = True - - if not skip_block: - if self.simulation_data.verbosity_level.value >= \ - VerbosityLevel.verbose.value: - print(' loading block {}...'.format( - self.blocks[block_key].structure.name)) - - self.blocks[block_key].load(block_header_info, - fd_input_file, strict) - self._simulation_data.mfdata[self.blocks[block_key]. - blk_post_comment_path] = \ - self.post_block_comments - - blocks_read += 1 - if blocks_read >= max_blocks: - break - else: - if not (len(clean_line) == 0 or (len(line) > 2 and - line[:3].upper() == 'END')): - # Record file location of beginning of unresolved text - # treat unresolved text as a comment for now - self._store_comment(line, found_first_block) - - def write(self, ext_file_action=ExtFileAction.copy_relative_paths): - if self.simulation_data.auto_set_sizes: - self._update_size_defs() - - # create any folders in path - package_file_path = self.get_file_path() - package_folder = os.path.split(package_file_path)[0] - if package_folder and not os.path.isdir(package_folder): - os.makedirs(os.path.split(package_file_path)[0]) - - # open file - fd = open(package_file_path, 'w') - - # write blocks - self._write_blocks(fd, ext_file_action) - - fd.close() - - def create_package_dimensions(self): - model_dims = None - if self.container_type[0] == PackageContainerType.model: - model_dims = [modeldimensions.ModelDimensions( - self.path[0], self._simulation_data)] - else: - # this is a simulation file that does not correspond to a specific - # model. figure out which model to use and return a dimensions - # object for that model - if self.dfn_file_name[0:3] == 'exg': - exchange_rec_array = self._simulation_data.mfdata[ - ('nam', 'exchanges', 'exchanges')].get_data() - if exchange_rec_array is None: - return None - for exchange in exchange_rec_array: - if exchange[1].lower() == self._filename.lower(): - model_dims = [modeldimensions.ModelDimensions( - exchange[2], self._simulation_data), - modeldimensions.ModelDimensions( - exchange[3], self._simulation_data)] - break - elif self.parent_file is not None: - model_dims = [] - for md in self.parent_file.dimensions.model_dim: - model_name = md.model_name - model_dims.append(modeldimensions.ModelDimensions( - model_name, self._simulation_data)) - else: - model_dims = [modeldimensions.ModelDimensions( - None, self._simulation_data)] - return modeldimensions.PackageDimensions(model_dims, self.structure, - self.path) - - def _store_comment(self, line, found_first_block): - # Store comment - if found_first_block: - self.post_block_comments.text += line - else: - self._simulation_data.mfdata[self.path + - ('pkg_hdr_comments',)].text += line - - def _write_blocks(self, fd, ext_file_action): - # verify that all blocks are valid - if not self.is_valid(): - message = 'Unable to write out model file "{}" due to the ' \ - 'following error: ' \ - '{} ({})'.format(self._filename, self.last_error, - self.path) - type_, value_, traceback_ = sys.exc_info() - raise MFDataException(self.model_name, self._get_pname(), - self.path, 'writing package blocks', - None, inspect.stack()[0][3], - type_, value_, traceback_, message, - self._simulation_data.debug) - - # write initial comments - pkg_hdr_comments_path = self.path + ('pkg_hdr_comments',) - if pkg_hdr_comments_path in self._simulation_data.mfdata: - self._simulation_data.mfdata[self.path + - ('pkg_hdr_comments',)].write(fd, - False) - - # loop through blocks - block_num = 1 - for block in self.blocks.values(): - if self.simulation_data.verbosity_level.value >= \ - VerbosityLevel.verbose.value: - print(' writing block {}...'.format(block.structure.name)) - # write block - block.write(fd, ext_file_action=ext_file_action) - block_num += 1 - - def get_file_path(self): - if self.path[0] in self._simulation_data.mfpath.model_relative_path: - return os.path.join(self._simulation_data.mfpath.get_model_path( - self.path[0]), self._filename) - else: - return os.path.join(self._simulation_data.mfpath.get_sim_path(), - self._filename) - - def export(self, f, **kwargs): - """ - Method to export a package to netcdf or shapefile based on the - extension of the file name (.shp for shapefile, .nc for netcdf) - - Parameters - ---------- - f : str - filename - kwargs : keyword arguments - modelgrid : flopy.discretization.Grid instance - user supplied modelgrid which can be used for exporting - in lieu of the modelgrid associated with the model object - - Returns - ------- - None or Netcdf object - - """ - from flopy import export - return export.utils.package_export(f, self, **kwargs) - - def plot(self, **kwargs): - """ - Plot 2-D, 3-D, transient 2-D, and stress period list (MfList) - package input data - - Parameters - ---------- - package: flopy.pakbase.Package instance supplied for plotting - - **kwargs : dict - filename_base : str - Base file name that will be used to automatically generate file - names for output image files. Plots will be exported as image - files if file_name_base is not None. (default is None) - file_extension : str - Valid matplotlib.pyplot file extension for savefig(). Only used - if filename_base is not None. (default is 'png') - mflay : int - MODFLOW zero-based layer number to return. If None, then all - all layers will be included. (default is None) - kper : int - MODFLOW zero-based stress period number to return. (default is - zero) - key : str - MfList dictionary key. (default is None) - - Returns - ---------- - axes : list - Empty list is returned if filename_base is not None. Otherwise - a list of matplotlib.pyplot.axis are returned. - - """ - from flopy.plot.plotutil import PlotUtilities - - if not self.plotable: - raise TypeError("Simulation level packages are not plotable") - - axes = PlotUtilities._plot_package_helper(self, - **kwargs) - return axes - - -class MFChildPackages(object): - def __init__(self, model, parent, pkg_type, filerecord, package=None, - package_class=None): - self._packages = [] - self._filerecord = filerecord - if package is not None: - self._packages.append(package) - self._model = model - self._cpparent = parent - self._pkg_type = pkg_type - self._package_class = package_class - - def __getattr__(self, attr): - if '_packages' in self.__dict__ and len(self._packages) > 0 and \ - hasattr(self._packages[0], attr): - item = getattr(self._packages[0], attr) - return item - raise AttributeError(attr) - - def __getitem__(self, k): - if isinstance(k, int): - if k < len(self._packages): - return self._packages[k] - raise ValueError('Package index {} does not exist.'.format(k)) - - def __setattr__(self, key, value): - if key != '_packages' and key != '_model' and key != '_cpparent' and \ - key != '_inattr' and key != '_filerecord' and \ - key != '_package_class' and key != '_pkg_type': - if len(self._packages) == 0: - raise Exception('No {} package is currently attached to package' - ' {}. Use the initialize method to create a(n) ' - '{} package before attempting to access its ' - 'properties.'.format(self._pkg_type, - self._cpparent.filename, - self._pkg_type)) - package = self._packages[0] - setattr(package, key, value) - return - super(MFChildPackages, self).__setattr__(key, value) - - def __default_file_path_base(self, file_path, suffix=''): - stem = os.path.split(file_path)[1] - stem_lst = stem.split('.') - file_name = '.'.join(stem_lst[:-1]) - if len(stem_lst) > 1: - file_ext = stem_lst[-1] - return '{}.{}{}.{}'.format(file_name, file_ext, suffix, - self._pkg_type) - elif suffix != '': - return '{}.{}'.format(stem, self._pkg_type) - else: - return '{}.{}.{}'.format(stem, suffix, self._pkg_type) - - def __file_path_taken(self, possible_path): - for package in self._packages: - # Do case insensitive compare - if package.filename.lower() == possible_path.lower(): - return True - return False - - def _next_default_file_path(self): - possible_path = self.__default_file_path_base(self._cpparent.filename) - suffix = 0 - while self.__file_path_taken(possible_path): - possible_path = self.__default_file_path_base( - self._cpparent.filename, suffix) - suffix += 1 - return possible_path - - def _init_package(self, package, fname): - # clear out existing packages - self._remove_packages() - if fname is None: - # build a file name - fname = self._next_default_file_path() - package._filename = fname - # set file record variable - self._filerecord.set_data(fname, autofill=True) - # add the package to the list - self._packages.append(package) - - def _update_filename(self, old_fname, new_fname): - file_record = self._filerecord.get_data() - new_file_record_data = [] - if file_record is not None: - file_record_data = file_record[0] - for item in file_record_data: - if item.lower() == old_fname.lower(): - new_file_record_data.append((new_fname,)) - else: - new_file_record_data.append((item,)) - else: - new_file_record_data.append((new_fname,)) - self._filerecord.set_data(new_file_record_data) - - def _append_package(self, package, fname, update_frecord=True): - if fname is None: - # build a file name - fname = self._next_default_file_path() - package._filename = fname - - if update_frecord: - # set file record variable - file_record = self._filerecord.get_data() - file_record_data = file_record - new_file_record_data = [] - for item in file_record_data: - new_file_record_data.append((item[0],)) - new_file_record_data.append((fname,)) - self._filerecord.set_data(new_file_record_data) - - # add the package to the list - self._packages.append(package) - - def _remove_packages(self): - for package in self._packages: - self._model.remove_package(package) - self._packages = [] +import os +import sys +import errno +import inspect +import numpy as np +from collections import OrderedDict + +from .mfbase import PackageContainer, ExtFileAction, PackageContainerType +from .mfbase import MFFileMgmt, MFDataException, ReadAsArraysException, \ + MFInvalidTransientBlockHeaderException, VerbosityLevel, \ + FlopyException +from .data.mfstructure import DatumType +from .data import mfstructure, mfdata +from ..utils import datautil +from .data import mfdataarray, mfdatalist, mfdatascalar +from .coordinates import modeldimensions +from ..pakbase import PackageInterface +from .data.mfdatautil import MFComment +from ..utils.check import mf6check + + +class MFBlockHeader(object): + """ + Represents the header of a block in a MF6 input file + + Parameters + ---------- + name : string + block name + variable_strings : list + list of strings that appear after the block name + comment : MFComment + comment text in the block header + + Attributes + ---------- + name : string + block name + variable_strings : list + list of strings that appear after the block name + comment : MFComment + comment text in the block header + data_items : list + list of MFVariable of the variables contained in this block + + Methods + ------- + write_header : (fd : file object) + writes block header to file object 'fd' + write_footer : (fd : file object) + writes block footer to file object 'fd' + set_all_data_external + sets the block's list and array data to be stored externally + + """ + def __init__(self, name, variable_strings, comment, simulation_data=None, + path=None): + self.name = name + self.variable_strings = variable_strings + if not ((simulation_data is None and path is None) or + (simulation_data is not None and path is not None)): + raise FlopyException('Block header must be initialized with both ' + 'simulation_data and path or with neither.') + if simulation_data is None: + self.comment = comment + self.simulation_data = None + self.path = None + self.comment_path = None + else: + self.connect_to_dict(simulation_data, path, comment) + # TODO: Get data_items from dictionary + self.data_items = [] + + def build_header_variables(self, simulation_data, block_header_structure, + block_path, data, dimensions): + self.data_items = [] + var_path = block_path + (block_header_structure[0].name,) + + # fix up data + fixed_data = [] + if block_header_structure[0].data_item_structures[0].type == \ + DatumType.keyword: + data_item = block_header_structure[0].data_item_structures[0] + fixed_data.append(data_item.name) + if isinstance(data, tuple): + data = list(data) + if isinstance(data, list): + fixed_data = fixed_data + data + else: + fixed_data.append(data) + if len(fixed_data) > 0: + fixed_data = [tuple(fixed_data)] + # create data object + new_data = MFBlock.data_factory(simulation_data, None, + block_header_structure[0], True, + var_path, dimensions, fixed_data) + self.data_items.append(new_data) + + def is_same_header(self, block_header): + if len(self.variable_strings) > 0: + if len(self.variable_strings) != \ + len(block_header.variable_strings): + return False + else: + for sitem, oitem in zip(self.variable_strings, + block_header.variable_strings): + if sitem != oitem: + return False + return True + elif len(self.data_items) > 0 and \ + len(block_header.variable_strings) > 0: + typ_obj = self.data_items[0].structure.data_item_structures[0].\ + type_obj + if typ_obj == int or typ_obj == float: + return bool(self.variable_strings[0] == \ + block_header.variable_strings[0]) + else: + return True + elif len(self.data_items) == len(block_header.variable_strings): + return True + return False + + def get_comment(self): + if self.simulation_data is None: + return self.comment + else: + return self.simulation_data.mfdata[self.comment_path] + + def connect_to_dict(self, simulation_data, path, comment=None): + self.simulation_data = simulation_data + self.path = path + self.comment_path = path + ('blk_hdr_comment',) + if comment is None: + simulation_data.mfdata[self.comment_path] = self.comment + else: + simulation_data.mfdata[self.comment_path] = comment + self.comment = None + + def write_header(self, fd): + fd.write('BEGIN {}'.format(self.name)) + if len(self.data_items) > 0: + if isinstance(self.data_items[0], mfdatascalar.MFScalar): + one_based = self.data_items[0].structure.type == \ + DatumType.integer + entry = self.data_items[0].get_file_entry(values_only=True, + one_based=one_based) + else: + entry = self.data_items[0].get_file_entry() + fd.write('{}'.format(entry.rstrip())) + if len(self.data_items) > 1: + for data_item in self.data_items[1:]: + entry = data_item.get_file_entry(values_only=True) + fd.write('%s' % (entry.rstrip())) + if self.get_comment().text: + fd.write(' ') + self.get_comment().write(fd) + fd.write('\n') + + def write_footer(self, fd): + fd.write('END {}'.format(self.name)) + if len(self.data_items) > 0: + one_based = self.data_items[0].structure.type == \ + DatumType.integer + if isinstance(self.data_items[0], mfdatascalar.MFScalar): + entry = self.data_items[0].get_file_entry(values_only=True, + one_based=one_based) + else: + entry = self.data_items[0].get_file_entry() + fd.write('{}'.format(entry.rstrip())) + fd.write('\n') + + def get_transient_key(self): + transient_key = None + for index in range(0, len(self.data_items)): + if self.data_items[index].structure.type != DatumType.keyword: + transient_key = self.data_items[index].get_data() + if isinstance(transient_key, np.recarray): + item_struct = self.data_items[index].structure + key_index = item_struct.first_non_keyword_index() + if not (key_index is not None and + len(transient_key[0]) > key_index): + if key_index is None: + raise FlopyException('Block header index could ' + 'not be determined.') + else: + raise FlopyException('Block header index "{}" ' + 'must be less than "{}"' + '.'.format( + key_index, len(transient_key[0]))) + transient_key = transient_key[0][key_index] + break + return transient_key + + +class MFBlock(object): + """ + Represents a block in a MF6 input file + + + Parameters + ---------- + simulation_data : MFSimulationData + data specific to this simulation + dimensions : MFDimensions + describes model dimensions including model grid and simulation time + structure : MFVariableStructure + structure describing block + path : tuple + unique path to block + + Attributes + ---------- + block_headers : MFBlockHeaderIO + block header text (BEGIN/END), header variables, comments in the header + structure : MFBlockStructure + structure describing block + path : tuple + unique path to block + datasets : OrderDict + dictionary of dataset objects with keys that are the name of the + dataset + datasets_keyword : dict + dictionary of dataset objects with keys that are key words to identify + start of dataset + enabled : boolean + block is being used + + Methods + ------- + get_block_header_info : (line : string, path : tuple) + static method that parses a line as a block header and returns a + MFBlockHeader class representing the block header in that line + load : (block_header : MFBlockHeader, fd : file, strict : boolean) + loads block from file object. file object must be advanced to + beginning of block before calling + write : (fd : file) + writes block to a file object + is_valid : () + returns true of the block is valid + + See Also + -------- + + Notes + ----- + + Examples + -------- + + """ + + def __init__(self, simulation_data, dimensions, structure, path, + model_or_sim, container_package): + self._simulation_data = simulation_data + self._dimensions = dimensions + self._model_or_sim = model_or_sim + self._container_package = container_package + self.block_headers = [MFBlockHeader(structure.name, [], + MFComment('', path, simulation_data, + 0), + simulation_data, path)] + self.structure = structure + self.path = path + self.datasets = OrderedDict() + self.datasets_keyword = {} + self.blk_trailing_comment_path = path + ('blk_trailing_comment',) + self.blk_post_comment_path = path + ('blk_post_comment',) + if self.blk_trailing_comment_path not in simulation_data.mfdata: + simulation_data.mfdata[self.blk_trailing_comment_path] = \ + MFComment('', '', simulation_data, 0) + if self.blk_post_comment_path not in simulation_data.mfdata: + simulation_data.mfdata[self.blk_post_comment_path] = \ + MFComment('\n', '', simulation_data, 0) + # initially disable if optional + self.enabled = structure.number_non_optional_data() > 0 + self.loaded = False + self.external_file_name = None + self._structure_init() + + def __repr__(self): + return self._get_data_str(True) + + def __str__(self): + return self._get_data_str(False) + + def _get_data_str(self, formal): + data_str = '' + for dataset in self.datasets.values(): + if formal: + ds_repr = repr(dataset) + if len(ds_repr.strip()) > 0: + data_str = '{}{}\n{}\n'.format(data_str, + dataset.structure.name, + repr(dataset)) + else: + ds_str = str(dataset) + if len(ds_str.strip()) > 0: + data_str = '{}{}\n{}\n'.format(data_str, + dataset.structure.name, + str(dataset)) + return data_str + + # return an MFScalar, MFList, or MFArray + @staticmethod + def data_factory(sim_data, model_or_sim, structure, enable, path, dimensions, + data=None, package=None): + data_type = structure.get_datatype() + # examine the data structure and determine the data type + if data_type == mfstructure.DataType.scalar_keyword or \ + data_type == mfstructure.DataType.scalar: + return mfdatascalar.MFScalar(sim_data, model_or_sim, structure, data, + enable, path, dimensions) + elif data_type == mfstructure.DataType.scalar_keyword_transient or \ + data_type == mfstructure.DataType.scalar_transient: + trans_scalar = mfdatascalar.MFScalarTransient(sim_data, + model_or_sim, + structure, + enable, path, + dimensions) + if data is not None: + trans_scalar.set_data(data, key=0) + return trans_scalar + elif data_type == mfstructure.DataType.array: + return mfdataarray.MFArray(sim_data, model_or_sim, structure, data, + enable, path, dimensions) + elif data_type == mfstructure.DataType.array_transient: + trans_array = mfdataarray.MFTransientArray(sim_data, model_or_sim, + structure, enable, path, + dimensions) + if data is not None: + trans_array.set_data(data, key=0) + return trans_array + elif data_type == mfstructure.DataType.list: + return mfdatalist.MFList(sim_data, model_or_sim, structure, data, + enable,path, dimensions, package) + elif data_type == mfstructure.DataType.list_transient: + trans_list = mfdatalist.MFTransientList(sim_data, model_or_sim, + structure, enable, path, + dimensions, package) + if data is not None: + trans_list.set_data(data, key=0, autofill=True) + return trans_list + elif data_type == mfstructure.DataType.list_multiple: + mult_list = mfdatalist.MFMultipleList(sim_data, model_or_sim, + structure, enable, path, + dimensions, package) + if data is not None: + mult_list.set_data(data, key=0, autofill=True) + return mult_list + + def _structure_init(self): + # load datasets keywords into dictionary + for dataset_struct in self.structure.data_structures.values(): + for keyword in dataset_struct.get_keywords(): + self.datasets_keyword[keyword] = dataset_struct + # load block header data items into dictionary + for dataset in self.structure.block_header_structure: + self._new_dataset(dataset.name, dataset, True, None) + + def set_model_relative_path(self, model_ws): + # update datasets + for key, dataset in self.datasets.items(): + if dataset.structure.file_data: + try: + file_data = dataset.get_data() + except MFDataException as mfde: + raise MFDataException(mfdata_except=mfde, + model= + self._container_package.model_name, + package= + self._container_package._get_pname(), + message='Error occurred while ' + 'getting file data from ' + '"{}"'.format( + dataset.structure.name)) + if file_data: + # update file path location for all file paths + for file_line in file_data: + old_file_name = os.path.split(file_line[0])[1] + file_line[0] = os.path.join(model_ws, old_file_name) + # update block headers + for block_header in self.block_headers: + for dataset in block_header.data_items: + if dataset.structure.file_data: + try: + file_data = dataset.get_data() + except MFDataException as mfde: + raise MFDataException(mfdata_except=mfde, + model=self._container_package. + model_name, + package=self._container_package. + _get_pname(), + message='Error occurred while ' + 'getting file data from ' + '"{}"'.format( + dataset.structure.name)) + + if file_data: + # update file path location for all file paths + for file_line in file_data: + old_file_path, old_file_name = \ + os.path.split(file_line[1]) + new_file_path = os.path.join(model_ws, + old_file_name) + # update transient keys of datasets within the + # block + for key, idataset in self.datasets.items(): + if isinstance(idataset, mfdata.MFTransient): + idataset.update_transient_key(file_line[1], + new_file_path) + file_line[1] = os.path.join(model_ws, + old_file_name) + + def add_dataset(self, dataset_struct, data, var_path): + try: + self.datasets[var_path[-1]] = self.data_factory( + self._simulation_data, self._model_or_sim, dataset_struct, + True, var_path, self._dimensions, data, + self._container_package) + except MFDataException as mfde: + raise MFDataException(mfdata_except=mfde, + model=self._container_package.model_name, + package=self._container_package._get_pname(), + message='Error occurred while adding' + ' dataset "{}" to block ' + '"{}"'.format(dataset_struct.name, + self.structure.name)) + + self._simulation_data.mfdata[var_path] = self.datasets[var_path[-1]] + dtype = dataset_struct.get_datatype() + if dtype == mfstructure.DataType.list_transient or \ + dtype == mfstructure.DataType.list_multiple or \ + dtype == mfstructure.DataType.array_transient: + # build repeating block header(s) + if isinstance(data, dict): + # Add block headers for each dictionary key + for index in data: + if isinstance(index, tuple): + header_list = list(index) + else: + header_list = [index] + self._build_repeating_header(header_list) + elif isinstance(data, list): + # Add a single block header of value 0 + self._build_repeating_header([0]) + elif dtype != mfstructure.DataType.list_multiple and \ + data is not None: + self._build_repeating_header([[0]]) + + return self.datasets[var_path[-1]] + + def _build_repeating_header(self, header_data): + if self._header_exists(header_data[0]): + return + if len(self.block_headers[-1].data_items) == 1 and \ + self.block_headers[-1].data_items[0].get_data() is not None: + block_header_path = self.path + (len(self.block_headers) + 1,) + block_header = MFBlockHeader(self.structure.name, [], + MFComment('', self.path, + self._simulation_data, 0), + self._simulation_data, + block_header_path) + self.block_headers.append(block_header) + else: + block_header_path = self.path + (len(self.block_headers),) + struct = self.structure + last_header = self.block_headers[-1] + try: + last_header.build_header_variables(self._simulation_data, + struct.block_header_structure, + block_header_path, + header_data, + self._dimensions) + except MFDataException as mfde: + raise MFDataException(mfdata_except=mfde, + model=self._container_package.model_name, + package=self._container_package._get_pname(), + message='Error occurred while building' + ' block header variables for block ' + '"{}"'.format(last_header.name)) + + def _new_dataset(self, key, dataset_struct, block_header=False, + initial_val=None): + dataset_path = self.path + (key,) + if block_header: + if dataset_struct.type == DatumType.integer and \ + initial_val is not None \ + and len(initial_val) >= 1 and \ + dataset_struct.get_record_size()[0] == 1: + # stress periods are stored 0 based + initial_val = int(initial_val[0]) - 1 + if isinstance(initial_val, list): + initial_val = [tuple(initial_val)] + try: + new_data = MFBlock.data_factory(self._simulation_data, + self._model_or_sim, + dataset_struct, True, + dataset_path, self._dimensions, + initial_val, + self._container_package) + except MFDataException as mfde: + raise MFDataException(mfdata_except=mfde, + model=self._container_package.model_name, + package=self._container_package._get_pname(), + message='Error occurred while adding' + ' dataset "{}" to block ' + '"{}"'.format( + dataset_struct.name, + self.structure.name)) + self.block_headers[-1].data_items.append(new_data) + else: + try: + self.datasets[key] = self.data_factory(self._simulation_data, + self._model_or_sim, + dataset_struct, True, + dataset_path, + self._dimensions, + initial_val, + self._container_package) + except MFDataException as mfde: + raise MFDataException(mfdata_except=mfde, + model=self._container_package.model_name, + package=self._container_package._get_pname(), + message='Error occurred while adding' + ' dataset "{}" to block ' + '"{}"'.format( + dataset_struct.name, + self.structure.name)) + for keyword in dataset_struct.get_keywords(): + self.datasets_keyword[keyword] = dataset_struct + + def is_empty(self): + for key, dataset in self.datasets.items(): + try: + has_data = dataset.has_data() + except MFDataException as mfde: + raise MFDataException(mfdata_except=mfde, + model=self._container_package.model_name, + package=self._container_package._get_pname(), + message='Error occurred while verifying' + ' data of dataset "{}" in block ' + '"{}"'.format( + dataset.structure.name, + self.structure.name)) + + if has_data is not None and has_data: + return False + return True + + def load(self, block_header, fd, strict=True): + # verify number of header variables + if len(block_header.variable_strings) < \ + self.structure.number_non_optional_block_header_data(): + if self._simulation_data.verbosity_level.value >= \ + VerbosityLevel.normal.value: + warning_str = 'WARNING: Block header for block "{}" does not ' \ + 'contain the correct number of ' \ + 'variables {}'.format(block_header.name, self.path) + print(warning_str) + return + + if self.loaded: + # verify header has not already been loaded + for bh_current in self.block_headers: + if bh_current.is_same_header(block_header): + if self._simulation_data.verbosity_level.value >= \ + VerbosityLevel.normal.value: + warning_str = 'WARNING: Block header for block "{}" is ' \ + 'not a unique block header ' \ + '{}'.format(block_header.name, self.path) + print(warning_str) + return + + # init + self.enabled = True + if not self.loaded: + self.block_headers = [] + self.block_headers.append(block_header) + + # process any header variable + if len(self.structure.block_header_structure) > 0: + dataset = self.structure.block_header_structure[0] + self._new_dataset(dataset.name, dataset, True, + self.block_headers[-1].variable_strings) + + # handle special readasarrays case + if self._container_package.structure.read_as_arrays: + # auxiliary variables may appear with aux variable name as keyword + aux_vars = self._container_package.auxiliary.get_data() + if aux_vars is not None: + for var_name in list(aux_vars[0])[1:]: + self.datasets_keyword[(var_name,)] = \ + self._container_package.aux.structure + + comments = [] + + # capture any initial comments + initial_comment = MFComment('', '', 0) + fd_block = fd + line = fd_block.readline() + datautil.PyListUtil.reset_delimiter_used() + arr_line = datautil.PyListUtil.split_data_line(line) + while MFComment.is_comment(line, True): + initial_comment.add_text(line) + line = fd_block.readline() + arr_line = datautil.PyListUtil.split_data_line(line) + + # if block not empty + external_file_info = None + if not (len(arr_line[0]) > 2 and arr_line[0][:3].upper() == 'END'): + if arr_line[0].lower() == 'open/close': + # open block contents from external file + fd_block.readline() + fd_path = os.path.split(os.path.realpath(fd_block.name))[0] + try: + if self._simulation_data.verbosity_level.value >= \ + VerbosityLevel.verbose.value: + print(' opening external file "{}"..' + '.'.format(arr_line[1])) + external_file_info = arr_line + fd_block = open(os.path.join(fd_path, arr_line[1]), + 'r') + # read first line of external file + line = fd_block.readline() + arr_line = datautil.PyListUtil.split_data_line(line) + except: + type_, value_, traceback_ = sys.exc_info() + message = 'Error reading external file specified in ' \ + 'line "{}"'.format(line) + raise MFDataException(self._container_package.model_name, + self._container_package._get_pname(), + self.path, 'reading external file', + self.structure.name, + inspect.stack()[0][3], type_, + value_, traceback_, message, + self._simulation_data.debug) + if len(self.structure.data_structures) <= 1: + # load a single data set + dataset = self.datasets[next(iter(self.datasets))] + try: + if self._simulation_data.verbosity_level.value >= \ + VerbosityLevel.verbose.value: + print(' loading data {}..' + '.'.format(dataset.structure.name)) + next_line = dataset.load(line, fd_block, + self.block_headers[-1], + initial_comment, + external_file_info) + except MFDataException as mfde: + raise MFDataException( + mfdata_except=mfde, model=self._container_package. + model_name, + package=self._container_package._get_pname(), + message='Error occurred while loading data "{}" in ' + 'block "{}" from file "{}"' + '.'.format(dataset.structure.name, + self.structure.name, + fd_block.name)) + package_info_list = self._get_package_info(dataset) + if package_info_list is not None: + for package_info in package_info_list: + if self._simulation_data.verbosity_level.value >= \ + VerbosityLevel.verbose.value: + print(' loading child package {}..' + '.'.format(package_info[0])) + pkg = self._model_or_sim.load_package( + package_info[0], package_info[1], + package_info[1], True, package_info[2], + package_info[3], self._container_package) + if hasattr(self._container_package, package_info[0]): + package_group = getattr(self._container_package, + package_info[0]) + package_group._append_package(pkg, pkg.filename, + False) + + if next_line[1] is not None: + arr_line = datautil.PyListUtil.split_data_line( + next_line[1]) + else: + arr_line = '' + # capture any trailing comments + post_data_comments = MFComment('', '', + self._simulation_data, 0) + dataset.post_data_comments = post_data_comments + while arr_line and (len(next_line[1]) <= 2 or + arr_line[0][:3].upper() != 'END'): + next_line[1] = fd_block.readline().strip() + arr_line = datautil.PyListUtil.split_data_line( + next_line[1]) + if arr_line and (len(next_line[1]) <= 2 or + arr_line[0][:3].upper() != 'END'): + post_data_comments.add_text(' '.join(arr_line)) + else: + # look for keyword and store line as data or comment + try: + key, results = self._find_data_by_keyword(line, fd_block, + initial_comment) + except MFInvalidTransientBlockHeaderException as e: + warning_str = 'WARNING: {}'.format(e) + print(warning_str) + self.block_headers.pop() + return + + self._save_comments(arr_line, line, key, comments) + if results[1] is None or results[1][:3].upper() != 'END': + # block consists of unordered datasets + # load the data sets out of order based on + # initial constants + line = ' ' + while line != '': + line = fd_block.readline() + arr_line = datautil.PyListUtil.\ + split_data_line(line) + if arr_line: + # determine if at end of block + if len(arr_line[0]) > 2 and \ + arr_line[0][:3].upper() == 'END': + break + # look for keyword and store line as data o + # r comment + key, result = self._find_data_by_keyword( + line, fd_block, initial_comment) + self._save_comments(arr_line, line, key, comments) + if result[1] is not None and \ + result[1][:3].upper() == 'END': + break + + self._simulation_data.mfdata[self.blk_trailing_comment_path].text = \ + comments + self.loaded = True + self.is_valid() + + def _find_data_by_keyword(self, line, fd, initial_comment): + first_key = None + nothing_found = False + next_line = [True, line] + while next_line[0] and not nothing_found: + arr_line = datautil.PyListUtil.\ + split_data_line(next_line[1]) + key = datautil.find_keyword(arr_line, self.datasets_keyword) + if key is not None: + ds_name = self.datasets_keyword[key].name + try: + if self._simulation_data.verbosity_level.value >= \ + VerbosityLevel.verbose.value: + print(' loading data {}...'.format(ds_name)) + next_line = self.datasets[ds_name].load( + next_line[1], fd, self.block_headers[-1], + initial_comment) + except MFDataException as mfde: + raise MFDataException(mfdata_except=mfde, + model=self._container_package. + model_name, + package=self._container_package. + _get_pname(), + message='Error occurred while ' + 'loading data "{}" in ' + 'block "{}" from file "{}"' + '.'.format( + ds_name, self.structure.name, + fd.name)) + + # see if first item's name indicates a reference to + # another package + package_info_list = self._get_package_info(self.datasets[ + ds_name]) + if package_info_list is not None: + for package_info in package_info_list: + if self._simulation_data.verbosity_level.value >= \ + VerbosityLevel.verbose.value: + print(' loading child package {}..' + '.'.format(package_info[1])) + pkg = self._model_or_sim.load_package( + package_info[0], package_info[1], package_info[1], + True, package_info[2], package_info[3], + self._container_package) + if hasattr(self._container_package, package_info[0]): + package_group = getattr(self._container_package, + package_info[0]) + package_group._append_package(pkg, pkg.filename, + False) + if first_key is None: + first_key = key + nothing_found = False + elif arr_line[0].lower() == 'readasarrays' and \ + self.path[-1].lower() == 'options' and \ + self._container_package.structure.read_as_arrays == False: + error_msg = 'ERROR: Attempting to read a ReadAsArrays ' \ + 'package as a non-ReadAsArrays ' \ + 'package {}'.format(self.path) + raise ReadAsArraysException(error_msg) + else: + nothing_found = True + + if first_key is None: + # look for recarrays. if there is a lone recarray in this block, + # use it by default + recarrays = self.structure.get_all_recarrays() + if len(recarrays) != 1: + return key, [None, None] + dataset = self.datasets[recarrays[0].name] + ds_result = dataset.load(line, fd, self.block_headers[-1], + initial_comment) + + # see if first item's name indicates a reference to another package + package_info_list = self._get_package_info(dataset) + if package_info_list is not None: + for package_info in package_info_list: + if self._simulation_data.verbosity_level.value >= \ + VerbosityLevel.verbose.value: + print(' loading child package {}..' + '.'.format(package_info[0])) + pkg = self._model_or_sim.load_package( + package_info[0], package_info[1], None, True, + package_info[2], package_info[3], + self._container_package) + if hasattr(self._container_package, package_info[0]): + package_group = getattr(self._container_package, + package_info[0]) + package_group._append_package(pkg, pkg.filename, + False) + + return recarrays[0].keyword, ds_result + else: + return first_key, next_line + + def _get_package_info(self, dataset): + if not dataset.structure.file_data: + return None + for index in range(0, len(dataset.structure.data_item_structures)): + data_item = dataset.structure.data_item_structures[index] + if data_item.type == DatumType.keyword or data_item.type == \ + DatumType.string: + item_name = data_item.name + package_type = item_name[:-1] + model_type = self._model_or_sim.structure.model_type + if PackageContainer.package_factory(package_type, + model_type) is not None: + try: + data = dataset.get_data() + except MFDataException as mfde: + raise MFDataException(mfdata_except=mfde, + model=self._container_package. + model_name, + package=self._container_package. + _get_pname(), + message='Error occurred while ' + 'getting data from "{}" ' + 'in block "{}".'.format( + dataset.structure.name, + self.structure.name)) + if isinstance(data, np.recarray): + file_location = data[-1][index] + else: + file_location = data + package_info_list = [] + file_path, file_name = os.path.split(file_location) + dict_package_name = '{}_{}'.format(package_type, + self.path[-2]) + package_info_list.append((package_type, file_name, + file_path, + dict_package_name)) + return package_info_list + return None + return None + + def _save_comments(self, arr_line, line, key, comments): + # FIX: Save these comments somewhere in the data set + if not key in self.datasets_keyword: + if MFComment.is_comment(key, True): + if comments: + comments.append('\n') + comments.append(arr_line) + + def write(self, fd, ext_file_action=ExtFileAction.copy_relative_paths): + # never write an empty block + is_empty = self.is_empty() + if is_empty and self.structure.name.lower() != 'exchanges' and \ + self.structure.name.lower() != 'options': + return + if self.structure.repeating(): + repeating_datasets = self._find_repeating_datasets() + for repeating_dataset in repeating_datasets: + # resolve any missing block headers + self._add_missing_block_headers(repeating_dataset) + if len(repeating_datasets) > 0: + # loop through all block headers + for block_header in self.block_headers: + self._write_block(fd, block_header, ext_file_action) + else: + # write out block + self._write_block(fd, self.block_headers[0], ext_file_action) + + else: + self._write_block(fd, self.block_headers[0], ext_file_action) + + def _add_missing_block_headers(self, repeating_dataset): + for key in repeating_dataset.get_active_key_list(): + if not self._header_exists(key[0]): + self._build_repeating_header([key[0]]) + + def _header_exists(self, key): + if not isinstance(key, list): + comp_key_list = [key] + else: + comp_key_list = key + for block_header in self.block_headers: + transient_key = block_header.get_transient_key() + for comp_key in comp_key_list: + if transient_key is not None and transient_key == comp_key: + return True + return False + + def set_all_data_external(self, base_name): + for key, dataset in self.datasets.items(): + if isinstance(dataset, mfdataarray.MFArray) or \ + (isinstance(dataset, mfdatalist.MFList) and + dataset.structure.type == DatumType.recarray) and \ + dataset.enabled: + dataset.store_as_external_file( + '{}_{}.txt'.format(base_name, dataset.structure.name), + replace_existing_external=False) + + def _find_repeating_datasets(self): + repeating_datasets = [] + for key, dataset in self.datasets.items(): + if dataset.repeating: + repeating_datasets.append(dataset) + return repeating_datasets + + def _write_block(self, fd, block_header, ext_file_action): + # write block header + block_header.write_header(fd) + transient_key = None + if len(block_header.data_items) > 0: + transient_key = block_header.get_transient_key() + + if self.external_file_name is not None: + # write block contents to external file + indent_string = self._simulation_data.indent_string + fd.write('{}open/close {}\n'.format(indent_string, + self.external_file_name)) + fd_main = fd + fd_path = os.path.split(os.path.realpath(fd.name))[0] + try: + fd = open(os.path.join(fd_path, self.external_file_name), 'w') + except: + type_, value_, traceback_ = sys.exc_info() + message = 'Error reading external file ' \ + '"{}"'.format(self.external_file_name) + raise MFDataException(self._container_package.model_name, + self._container_package._get_pname(), + self.path, 'reading external file', + self.structure.name, + inspect.stack()[0][3], type_, + value_, traceback_, message, + self._simulation_data.debug) + + # write data sets + for key, dataset in self.datasets.items(): + try: + if transient_key is None: + if self._simulation_data.verbosity_level.value >= \ + VerbosityLevel.verbose.value: + print(' writing data {}..' + '.'.format(dataset.structure.name)) + fd.write(dataset.get_file_entry( + ext_file_action=ext_file_action)) + else: + if self._simulation_data.verbosity_level.value >= \ + VerbosityLevel.verbose.value: + print(' writing data {} ({})..' + '.'.format(dataset.structure.name, + transient_key)) + if dataset.repeating: + fd.write(dataset.get_file_entry( + transient_key, ext_file_action=ext_file_action)) + else: + fd.write(dataset.get_file_entry( + ext_file_action=ext_file_action)) + except MFDataException as mfde: + raise MFDataException(mfdata_except=mfde, + model=self._container_package.model_name, + package=self._container_package. + _get_pname(), + message='Error occurred while writing ' + 'data "{}" in block "{}" to file' + ' "{}".'.format( + dataset.structure.name, + self.structure.name, + fd.name)) + # write trailing comments + self._simulation_data.mfdata[self.blk_trailing_comment_path].write(fd) + + if self.external_file_name is not None: + # switch back writing to package file + fd.close() + fd = fd_main + + # write block footer + block_header.write_footer(fd) + + # write post block comments + self._simulation_data.mfdata[self.blk_post_comment_path].write(fd) + + # write extra line if comments are off + if not self._simulation_data.comments_on: + fd.write('\n') + + def is_allowed(self): + if self.structure.variable_dependant_path: + # fill in empty part of the path with the current path + if len(self.structure.variable_dependant_path) == 3: + dependant_var_path = (self.path[0],) + \ + self.structure.variable_dependant_path + elif len(self.structure.variable_dependant_path) == 2: + dependant_var_path = (self.path[0], self.path[1]) + \ + self.structure.variable_dependant_path + elif len(self.structure.variable_dependant_path) == 1: + dependant_var_path = (self.path[0], self.path[1], + self.path[2]) + \ + self.structure.variable_dependant_path + else: + dependant_var_path = None + + # get dependency + dependant_var = None + mf_data = self._simulation_data.mfdata + if dependant_var_path in mf_data: + dependant_var = mf_data[dependant_var_path] + + # resolve dependency + if self.structure.variable_value_when_active[0] == 'Exists': + exists = self.structure.variable_value_when_active[1] + if dependant_var and exists.lower() == 'true': + return True + elif not dependant_var and exists.lower() == 'false': + return True + else: + return False + elif not dependant_var: + return False + elif self.structure.variable_value_when_active[0] == '>': + min_val = self.structure.variable_value_when_active[1] + if dependant_var > float(min_val): + return True + else: + return False + elif self.structure.variable_value_when_active[0] == '<': + max_val = self.structure.variable_value_when_active[1] + if dependant_var < float(max_val): + return True + else: + return False + return True + + def is_valid(self): + # check data sets + for dataset in self.datasets.values(): + # Non-optional datasets must be enabled + if not dataset.structure.optional and not dataset.enabled: + return False + # Enabled blocks must be valid + if dataset.enabled and not dataset.is_valid: + return False + # check variables + for block_header in self.block_headers: + for dataset in block_header.data_items: + # Non-optional datasets must be enabled + if not dataset.structure.optional and not dataset.enabled: + return False + # Enabled blocks must be valid + if dataset.enabled and not dataset.is_valid(): + return False + + +class MFPackage(PackageContainer, PackageInterface): + """ + Provides an interface for the user to specify data to build a package. + + Parameters + ---------- + model_or_sim : MFModel of MFSimulation + the parent model or simulation containing this package + package_type : string + string defining the package type + filename : string + filename of file where this package is stored + pname : string + package name + loading_package : bool + whether or not to add this package to the parent container's package + list during initialization + parent_file : MFPackage + parent package that contains this package + + Attributes + ---------- + blocks : OrderedDict + dictionary of blocks contained in this package by block name + path : tuple + data dictionary path to this package + structure : PackageStructure + describes the blocks and data contain in this package + dimensions : PackageDimension + resolves data dimensions for data within this package + set_all_data_external + sets the package's list and array data to be stored externally + + Methods + ------- + build_mfdata : (var_name : variable name, data : data contained in this + object) : MFData subclass + Returns the appropriate data type object (mfdatalist, mfdataarray, or + mfdatascalar) giving that object the appropriate structure (looked + up based on var_name) and any data supplied + load : (strict : bool) : bool + Loads the package from file + is_valid : bool + Returns whether or not this package is valid + write + Writes the package to a file + get_file_path : string + Returns the package file's path + remove + Removes package from the simulation/model it is currently a part of + + See Also + -------- + + Notes + ----- + + Examples + -------- + + + """ + def __init__(self, model_or_sim, package_type, filename=None, pname=None, + loading_package=False, parent_file=None): + + self.model_or_sim = model_or_sim + self._data_list = [] + self._package_type = package_type + if model_or_sim.type == 'Model' and package_type.lower() != 'nam': + self.model_name = model_or_sim.name + else: + self.model_name = None + + if model_or_sim.type != 'Model' and model_or_sim.type != 'Simulation': + message = 'Invalid model_or_sim parameter. Expecting either a ' \ + 'model or a simulation. Instead type "{}" was ' \ + 'given.'.format(type(model_or_sim)) + type_, value_, traceback_ = sys.exc_info() + raise MFDataException(self.model_name, pname, '', 'initializing ' + 'package', None, inspect.stack()[0][3], + type_, value_, traceback_, message, + model_or_sim.simulation_data.debug) + + super(MFPackage, self).__init__(model_or_sim.simulation_data, + self.model_name) + + self.parent = model_or_sim + self._simulation_data = model_or_sim.simulation_data + self.parent_file = parent_file + self.blocks = OrderedDict() + self.container_type = [] + self.loading_package = loading_package + if pname is not None: + if not isinstance(pname, str): + message = 'Invalid pname parameter. Expecting type str. ' \ + 'Instead type "{}" was ' \ + 'given.'.format(type(pname)) + type_, value_, traceback_ = sys.exc_info() + raise MFDataException(self.model_name, pname, '', + 'initializing package', None, + inspect.stack()[0][3], type_, value_, + traceback_, message, + model_or_sim.simulation_data.debug) + + self.package_name = pname.lower() + else: + self.package_name = None + + if filename is None: + self._filename = MFFileMgmt.string_to_file_path('{}.{}'.format( + self.model_or_sim.name, package_type)) + else: + if not isinstance(filename, str): + message = 'Invalid fname parameter. Expecting type str. ' \ + 'Instead type "{}" was ' \ + 'given.'.format(type(filename)) + type_, value_, traceback_ = sys.exc_info() + raise MFDataException(self.model_name, pname, '', + 'initializing package', None, + inspect.stack()[0][3], type_, value_, + traceback_, message, + model_or_sim.simulation_data.debug) + + self._filename = MFFileMgmt.string_to_file_path(filename) + + self.path, \ + self.structure = model_or_sim.register_package(self, + not loading_package, + pname is None, + filename is None) + self.dimensions = self.create_package_dimensions() + + if self.path is None: + if self._simulation_data.verbosity_level.value >= \ + VerbosityLevel.normal.value: + print('WARNING: Package type {} failed to register property.' + ' {}'.format(self._package_type, self.path)) + if parent_file is not None: + self.container_type.append(PackageContainerType.package) + # init variables that may be used later + self.post_block_comments = None + self.last_error = None + self.bc_color = "black" + self.__inattr = False + self._child_package_groups = {} + + def __setattr__(self, name, value): + if hasattr(self, name) and getattr(self, name) is not None: + attribute = object.__getattribute__(self, name) + if attribute is not None and isinstance(attribute, mfdata.MFData): + try: + if isinstance(attribute, mfdatalist.MFList): + attribute.set_data(value, autofill=True) + else: + attribute.set_data(value) + except MFDataException as mfde: + raise MFDataException(mfdata_except=mfde, + model=self.model_name, + package=self._get_pname()) + return + super(MFPackage, self).__setattr__(name, value) + + def __repr__(self): + return self._get_data_str(True) + + def __str__(self): + return self._get_data_str(False) + + @property + def filename(self): + return self._filename + + @filename.setter + def filename(self, fname): + if isinstance(self.parent_file, MFPackage) and \ + self.structure.file_type in \ + self.parent_file._child_package_groups: + try: + child_pkg_group = self.parent_file._child_package_groups[ + self.structure.file_type] + child_pkg_group._update_filename(self._filename, fname) + except Exception: + print('WARNING: Unable to update file name for parent' + 'package of {}.'.format(self.name)) + self._filename = fname + + @property + def package_type(self): + return self._package_type + + @property + def name(self): + return [self.package_name] + + @name.setter + def name(self, name): + self.package_name = name + + @property + def parent(self): + return self._parent + + @parent.setter + def parent(self, parent): + self._parent = parent + + @property + def plotable(self): + if self.model_or_sim.type == "Simulation": + return False + else: + return True + + @property + def data_list(self): + # return [data_object, data_object, ...] + return self._data_list + + def check(self, f=None, verbose=True, level=1, checktype=None): + if checktype is None: + checktype = mf6check + return super(MFPackage, self).check(f, verbose, level, checktype) + + def _get_nan_exclusion_list(self): + excl_list = [] + if hasattr(self, 'stress_period_data'): + spd_struct = self.stress_period_data.structure + for item_struct in spd_struct.data_item_structures: + if item_struct.optional or item_struct.keystring_dict: + excl_list.append(item_struct.name) + return excl_list + + def _get_data_str(self, formal, show_data=True): + data_str = 'package_name = {}\nfilename = {}\npackage_type = {}' \ + '\nmodel_or_simulation_package = {}' \ + '\n{}_name = {}' \ + '\n'.format(self._get_pname(), self._filename, + self.package_type, + self.model_or_sim.type.lower(), + self.model_or_sim.type.lower(), + self.model_or_sim.name) + if self.parent_file is not None and formal: + data_str = '{}parent_file = ' \ + '{}\n\n'.format(data_str, self.parent_file._get_pname()) + else: + data_str = '{}\n'.format(data_str) + if show_data: + for block in self.blocks.values(): + if formal: + bl_repr = repr(block) + if len(bl_repr.strip()) > 0: + data_str = '{}Block {}\n--------------------\n{}' \ + '\n'.format(data_str, block.structure.name, + repr(block)) + else: + bl_str = str(block) + if len(bl_str.strip()) > 0: + data_str = '{}Block {}\n--------------------\n{}' \ + '\n'.format(data_str, block.structure.name, + str(block)) + return data_str + + def _get_pname(self): + if self.package_name is not None: + return '{}'.format(self.package_name) + else: + return '{}'.format(self._filename) + + def _get_block_header_info(self, line, path): + # init + header_variable_strs = [] + arr_clean_line = line.strip().split() + header_comment = MFComment('', path + (arr_clean_line[1],), + self._simulation_data, 0) + # break header into components + if len(arr_clean_line) < 2: + message = 'Block header does not contain a name. Name ' \ + 'expected in line "{}".'.format(line) + type_, value_, traceback_ = sys.exc_info() + raise MFDataException(self.model_name, + self._get_pname(), + self.path, + 'parsing block header', None, + inspect.stack()[0][3], + type_, value_, traceback_, message, + self._simulation_data.debug) + elif len(arr_clean_line) == 2: + return MFBlockHeader(arr_clean_line[1], header_variable_strs, + header_comment) + else: + # process text after block name + comment = False + for entry in arr_clean_line[2:]: + # if start of comment + if MFComment.is_comment(entry.strip()[0]): + comment = True + if comment: + header_comment.text = ' '.join([header_comment.text, + entry]) + else: + header_variable_strs.append(entry) + return MFBlockHeader(arr_clean_line[1], header_variable_strs, + header_comment) + + def _update_size_defs(self): + # build temporary data lookup by name + data_lookup = {} + for block in self.blocks.values(): + for dataset in block.datasets.values(): + data_lookup[dataset.structure.name] = dataset + + # loop through all data + for block in self.blocks.values(): + for dataset in block.datasets.values(): + # if data shape is 1-D + if dataset.structure.shape and \ + len(dataset.structure.shape) == 1: + # if shape name is data in this package + if dataset.structure.shape[0] in data_lookup: + size_def = data_lookup[dataset.structure.shape[0]] + size_def_name = size_def.structure.name + + if isinstance(dataset, mfdata.MFTransient): + # for transient data always use the maximum size + new_size = -1 + for key in dataset.get_active_key_list(): + try: + data = dataset.get_data(key=key[0]) + except (IOError, + OSError, + MFDataException): + # TODO: Handle case where external file + # path has been moved + data = None + if data is not None: + data_len = len(data) + if data_len > new_size: + new_size = data_len + else: + # for all other data set max to size + new_size = -1 + try: + data = dataset.get_data() + except (IOError, + OSError, + MFDataException): + # TODO: Handle case where external file + # path has been moved + data = None + if data is not None: + new_size = len(dataset.get_data()) + if size_def.get_data() != new_size >= 0: + # store current size + size_def.set_data(new_size) + + # informational message to the user + if self._simulation_data.verbosity_level.value >= \ + VerbosityLevel.normal.value: + print('INFORMATION: {} in {} changed to {} ' + 'based on size of ' + '{}'.format(size_def_name, + size_def.structure.path[:-1], + new_size, + dataset.structure.name)) + + def remove(self): + self.model_or_sim.remove_package(self) + + def build_child_packages_container(self, pkg_type, filerecord): + # get package class + package_obj = self.package_factory(pkg_type, + self.model_or_sim.model_type) + # create child package object + child_pkgs_name = 'utl{}packages'.format(pkg_type) + child_pkgs_obj = self.package_factory(child_pkgs_name, '') + child_pkgs = child_pkgs_obj(self.model_or_sim, self, pkg_type, + filerecord, None, package_obj) + setattr(self, pkg_type, child_pkgs) + self._child_package_groups[pkg_type] = child_pkgs + + def build_child_package(self, pkg_type, data, parameter_name, filerecord): + if not hasattr(self, pkg_type): + self.build_child_packages_container(pkg_type, filerecord) + if data is not None: + package_group = getattr(self, pkg_type) + # build child package file name + child_path = package_group._next_default_file_path() + # create new empty child package + package_obj = self.package_factory(pkg_type, + self.model_or_sim.model_type) + package = package_obj(self.model_or_sim, filename=child_path, + parent_file=self) + assert hasattr(package, parameter_name) + + if isinstance(data, dict): + # evaluate and add data to package + unused_data = {} + for key, value in data.items(): + # if key is an attribute of the child package + if isinstance(key, str) and hasattr(package, key): + # set child package attribute + child_data_attr = getattr(package, key) + if isinstance(child_data_attr, mfdatalist.MFList): + child_data_attr.set_data(value, autofill=True) + elif isinstance(child_data_attr, mfdata.MFData): + child_data_attr.set_data(value) + elif key == 'fname' or key == 'filename': + child_path = value + package._filename = value + else: + setattr(package, key, value) + else: + unused_data[key] = value + if unused_data: + setattr(package, parameter_name, unused_data) + else: + setattr(package, parameter_name, data) + + # append package to list + package_group._init_package(package, child_path) + + def build_mfdata(self, var_name, data=None): + if self.loading_package: + data = None + for key, block in self.structure.blocks.items(): + if var_name in block.data_structures: + if block.name not in self.blocks: + self.blocks[block.name] = MFBlock(self._simulation_data, + self.dimensions, block, + self.path + (key,), + self.model_or_sim, self) + dataset_struct = block.data_structures[var_name] + var_path = self.path + (key, var_name) + ds = self.blocks[block.name].add_dataset(dataset_struct, + data, var_path) + self._data_list.append(ds) + return ds + + message = 'Unable to find variable "{}" in package ' \ + '"{}".'.format(var_name, self.package_type) + type_, value_, traceback_ = sys.exc_info() + raise MFDataException(self.model_name, self._get_pname(), + self.path, 'building data objects', + None, inspect.stack()[0][3], + type_, value_, traceback_, message, + self._simulation_data.debug) + + def set_model_relative_path(self, model_ws): + # update blocks + for key, block in self.blocks.items(): + block.set_model_relative_path(model_ws) + # update sub-packages + for package in self._packagelist: + package.set_model_relative_path(model_ws) + + def set_all_data_external(self): + # set blocks + for key, block in self.blocks.items(): + file_name = os.path.split(self.filename)[1] + block.set_all_data_external(file_name) + # set sub-packages + for package in self._packagelist: + package.set_all_data_external() + + def load(self, strict=True): + # open file + try: + fd_input_file = open(self.get_file_path(), 'r') + except OSError as e: + if e.errno == errno.ENOENT: + message = 'File {} of type {} could not be opened' \ + '.'.format(self.get_file_path(), self.package_type) + type_, value_, traceback_ = sys.exc_info() + raise MFDataException(self.model_name, + self.package_name, + self.path, 'loading package file', + None, inspect.stack()[0][3], + type_, value_, traceback_, message, + self._simulation_data.debug) + + try: + self._load_blocks(fd_input_file, strict) + except ReadAsArraysException as err: + fd_input_file.close() + raise ReadAsArraysException(err) + # close file + fd_input_file.close() + + if self.simulation_data.auto_set_sizes: + self._update_size_defs() + + # return validity of file + return self.is_valid() + + def is_valid(self): + # Check blocks + for block in self.blocks.values(): + # Non-optional blocks must be enabled + if block.structure.number_non_optional_data() > 0 and \ + not block.enabled and block.is_allowed(): + self.last_error = 'Required block "{}" not ' \ + 'enabled'.format(block.block_header.name) + return False + # Enabled blocks must be valid + if block.enabled and not block.is_valid: + self.last_error = 'Invalid block ' \ + '"{}"'.format(block.block_header.name) + return False + + return True + + def _load_blocks(self, fd_input_file, strict=True, max_blocks=sys.maxsize): + # init + self._simulation_data.mfdata[self.path + ('pkg_hdr_comments',)] = \ + MFComment('', self.path, self._simulation_data) + self.post_block_comments = MFComment('', self.path, + self._simulation_data) + + blocks_read = 0 + found_first_block = False + line = ' ' + while line != '': + line = fd_input_file.readline() + clean_line = line.strip() + # If comment or empty line + if MFComment.is_comment(clean_line, True): + self._store_comment(line, found_first_block) + elif len(clean_line) > 4 and clean_line[:5].upper() == 'BEGIN': + # parse block header + try: + block_header_info = self._get_block_header_info(line, + self.path) + except MFDataException as mfde: + message = 'An error occurred while loading block header ' \ + 'in line "{}".'.format(line) + type_, value_, traceback_ = sys.exc_info() + raise MFDataException(self.model_name, self._get_pname(), + self.path, 'loading block header', + None, inspect.stack()[0][3], + type_, value_, traceback_, message, + self._simulation_data.debug, mfde) + + # if there is more than one possible block with the same name, + # resolve the correct block to use + block_key = block_header_info.name.lower() + block_num = 1 + possible_key = '{}-{}'.format(block_header_info.name.lower(), + block_num) + if possible_key in self.blocks: + block_key = possible_key + block_header_name = block_header_info.name.lower() + while block_key in self.blocks and \ + not self.blocks[block_key].is_allowed(): + block_key = '{}-{}'.format(block_header_name, + block_num) + block_num += 1 + + if block_key not in self.blocks: + # block name not recognized, load block as comments and + # issue a warning + if self.simulation_data.verbosity_level.value >= \ + VerbosityLevel.normal.value: + warning_str = 'WARNING: Block "{}" is not a valid block ' \ + 'name for file type ' \ + '{}.'.format(block_key, self.package_type) + print(warning_str) + self._store_comment(line, found_first_block) + while line != '': + line = fd_input_file.readline() + self._store_comment(line, found_first_block) + arr_line = datautil.PyListUtil.split_data_line(line) + if arr_line and (len(arr_line[0]) <= 2 or + arr_line[0][:3].upper() == 'END'): + break + else: + found_first_block = True + self.post_block_comments = \ + MFComment('', self.path, self._simulation_data) + skip_block = False + if self.blocks[block_key].loaded: + # Only blocks defined as repeating are allowed to have + # multiple entries + header_name = block_header_info.name + if not self.structure.blocks[header_name.lower()].\ + repeating(): + # warn and skip block + if self._simulation_data.verbosity_level.value >= \ + VerbosityLevel.normal.value: + warning_str = 'WARNING: Block "{}" has ' \ + 'multiple entries and is not ' \ + 'intended to be a repeating ' \ + 'block ({} package' \ + ')'.format(header_name, + self.package_type) + print(warning_str) + skip_block = True + + if not skip_block: + if self.simulation_data.verbosity_level.value >= \ + VerbosityLevel.verbose.value: + print(' loading block {}...'.format( + self.blocks[block_key].structure.name)) + + self.blocks[block_key].load(block_header_info, + fd_input_file, strict) + self._simulation_data.mfdata[self.blocks[block_key]. + blk_post_comment_path] = \ + self.post_block_comments + + blocks_read += 1 + if blocks_read >= max_blocks: + break + else: + if not (len(clean_line) == 0 or (len(line) > 2 and + line[:3].upper() == 'END')): + # Record file location of beginning of unresolved text + # treat unresolved text as a comment for now + self._store_comment(line, found_first_block) + + def write(self, ext_file_action=ExtFileAction.copy_relative_paths): + if self.simulation_data.auto_set_sizes: + self._update_size_defs() + + # create any folders in path + package_file_path = self.get_file_path() + package_folder = os.path.split(package_file_path)[0] + if package_folder and not os.path.isdir(package_folder): + os.makedirs(os.path.split(package_file_path)[0]) + + # open file + fd = open(package_file_path, 'w') + + # write blocks + self._write_blocks(fd, ext_file_action) + + fd.close() + + def create_package_dimensions(self): + model_dims = None + if self.container_type[0] == PackageContainerType.model: + model_dims = [modeldimensions.ModelDimensions( + self.path[0], self._simulation_data)] + else: + # this is a simulation file that does not correspond to a specific + # model. figure out which model to use and return a dimensions + # object for that model + if self.dfn_file_name[0:3] == 'exg': + exchange_rec_array = self._simulation_data.mfdata[ + ('nam', 'exchanges', 'exchanges')].get_data() + if exchange_rec_array is None: + return None + for exchange in exchange_rec_array: + if exchange[1].lower() == self._filename.lower(): + model_dims = [modeldimensions.ModelDimensions( + exchange[2], self._simulation_data), + modeldimensions.ModelDimensions( + exchange[3], self._simulation_data)] + break + elif self.parent_file is not None: + model_dims = [] + for md in self.parent_file.dimensions.model_dim: + model_name = md.model_name + model_dims.append(modeldimensions.ModelDimensions( + model_name, self._simulation_data)) + else: + model_dims = [modeldimensions.ModelDimensions( + None, self._simulation_data)] + return modeldimensions.PackageDimensions(model_dims, self.structure, + self.path) + + def _store_comment(self, line, found_first_block): + # Store comment + if found_first_block: + self.post_block_comments.text += line + else: + self._simulation_data.mfdata[self.path + + ('pkg_hdr_comments',)].text += line + + def _write_blocks(self, fd, ext_file_action): + # verify that all blocks are valid + if not self.is_valid(): + message = 'Unable to write out model file "{}" due to the ' \ + 'following error: ' \ + '{} ({})'.format(self._filename, self.last_error, + self.path) + type_, value_, traceback_ = sys.exc_info() + raise MFDataException(self.model_name, self._get_pname(), + self.path, 'writing package blocks', + None, inspect.stack()[0][3], + type_, value_, traceback_, message, + self._simulation_data.debug) + + # write initial comments + pkg_hdr_comments_path = self.path + ('pkg_hdr_comments',) + if pkg_hdr_comments_path in self._simulation_data.mfdata: + self._simulation_data.mfdata[self.path + + ('pkg_hdr_comments',)].write(fd, + False) + + # loop through blocks + block_num = 1 + for block in self.blocks.values(): + if self.simulation_data.verbosity_level.value >= \ + VerbosityLevel.verbose.value: + print(' writing block {}...'.format(block.structure.name)) + # write block + block.write(fd, ext_file_action=ext_file_action) + block_num += 1 + + def get_file_path(self): + if self.path[0] in self._simulation_data.mfpath.model_relative_path: + return os.path.join(self._simulation_data.mfpath.get_model_path( + self.path[0]), self._filename) + else: + return os.path.join(self._simulation_data.mfpath.get_sim_path(), + self._filename) + + def export(self, f, **kwargs): + """ + Method to export a package to netcdf or shapefile based on the + extension of the file name (.shp for shapefile, .nc for netcdf) + + Parameters + ---------- + f : str + filename + kwargs : keyword arguments + modelgrid : flopy.discretization.Grid instance + user supplied modelgrid which can be used for exporting + in lieu of the modelgrid associated with the model object + + Returns + ------- + None or Netcdf object + + """ + from flopy import export + return export.utils.package_export(f, self, **kwargs) + + def plot(self, **kwargs): + """ + Plot 2-D, 3-D, transient 2-D, and stress period list (MfList) + package input data + + Parameters + ---------- + package: flopy.pakbase.Package instance supplied for plotting + + **kwargs : dict + filename_base : str + Base file name that will be used to automatically generate file + names for output image files. Plots will be exported as image + files if file_name_base is not None. (default is None) + file_extension : str + Valid matplotlib.pyplot file extension for savefig(). Only used + if filename_base is not None. (default is 'png') + mflay : int + MODFLOW zero-based layer number to return. If None, then all + all layers will be included. (default is None) + kper : int + MODFLOW zero-based stress period number to return. (default is + zero) + key : str + MfList dictionary key. (default is None) + + Returns + ---------- + axes : list + Empty list is returned if filename_base is not None. Otherwise + a list of matplotlib.pyplot.axis are returned. + + """ + from flopy.plot.plotutil import PlotUtilities + + if not self.plotable: + raise TypeError("Simulation level packages are not plotable") + + axes = PlotUtilities._plot_package_helper(self, + **kwargs) + return axes + + +class MFChildPackages(object): + def __init__(self, model, parent, pkg_type, filerecord, package=None, + package_class=None): + self._packages = [] + self._filerecord = filerecord + if package is not None: + self._packages.append(package) + self._model = model + self._cpparent = parent + self._pkg_type = pkg_type + self._package_class = package_class + + def __getattr__(self, attr): + if '_packages' in self.__dict__ and len(self._packages) > 0 and \ + hasattr(self._packages[0], attr): + item = getattr(self._packages[0], attr) + return item + raise AttributeError(attr) + + def __getitem__(self, k): + if isinstance(k, int): + if k < len(self._packages): + return self._packages[k] + raise ValueError('Package index {} does not exist.'.format(k)) + + def __setattr__(self, key, value): + if key != '_packages' and key != '_model' and key != '_cpparent' and \ + key != '_inattr' and key != '_filerecord' and \ + key != '_package_class' and key != '_pkg_type': + if len(self._packages) == 0: + raise Exception('No {} package is currently attached to package' + ' {}. Use the initialize method to create a(n) ' + '{} package before attempting to access its ' + 'properties.'.format(self._pkg_type, + self._cpparent.filename, + self._pkg_type)) + package = self._packages[0] + setattr(package, key, value) + return + super(MFChildPackages, self).__setattr__(key, value) + + def __default_file_path_base(self, file_path, suffix=''): + stem = os.path.split(file_path)[1] + stem_lst = stem.split('.') + file_name = '.'.join(stem_lst[:-1]) + if len(stem_lst) > 1: + file_ext = stem_lst[-1] + return '{}.{}{}.{}'.format(file_name, file_ext, suffix, + self._pkg_type) + elif suffix != '': + return '{}.{}'.format(stem, self._pkg_type) + else: + return '{}.{}.{}'.format(stem, suffix, self._pkg_type) + + def __file_path_taken(self, possible_path): + for package in self._packages: + # Do case insensitive compare + if package.filename.lower() == possible_path.lower(): + return True + return False + + def _next_default_file_path(self): + possible_path = self.__default_file_path_base(self._cpparent.filename) + suffix = 0 + while self.__file_path_taken(possible_path): + possible_path = self.__default_file_path_base( + self._cpparent.filename, suffix) + suffix += 1 + return possible_path + + def _init_package(self, package, fname): + # clear out existing packages + self._remove_packages() + if fname is None: + # build a file name + fname = self._next_default_file_path() + package._filename = fname + # set file record variable + self._filerecord.set_data(fname, autofill=True) + # add the package to the list + self._packages.append(package) + + def _update_filename(self, old_fname, new_fname): + file_record = self._filerecord.get_data() + new_file_record_data = [] + if file_record is not None: + file_record_data = file_record[0] + for item in file_record_data: + if item.lower() == old_fname.lower(): + new_file_record_data.append((new_fname,)) + else: + new_file_record_data.append((item,)) + else: + new_file_record_data.append((new_fname,)) + self._filerecord.set_data(new_file_record_data) + + def _append_package(self, package, fname, update_frecord=True): + if fname is None: + # build a file name + fname = self._next_default_file_path() + package._filename = fname + + if update_frecord: + # set file record variable + file_record = self._filerecord.get_data() + file_record_data = file_record + new_file_record_data = [] + for item in file_record_data: + new_file_record_data.append((item[0],)) + new_file_record_data.append((fname,)) + self._filerecord.set_data(new_file_record_data) + + # add the package to the list + self._packages.append(package) + + def _remove_packages(self): + for package in self._packages: + self._model.remove_package(package) + self._packages = [] diff --git a/flopy/mf6/modflow/mfgnc.py b/flopy/mf6/modflow/mfgnc.py index d9709647bf..b03c87d996 100644 --- a/flopy/mf6/modflow/mfgnc.py +++ b/flopy/mf6/modflow/mfgnc.py @@ -1,143 +1,143 @@ -# DO NOT MODIFY THIS FILE DIRECTLY. THIS FILE MUST BE CREATED BY -# mf6/utils/createpackages.py -from .. import mfpackage -from ..data.mfdatautil import ListTemplateGenerator - - -class ModflowGnc(mfpackage.MFPackage): - """ - ModflowGnc defines a gnc package. - - Parameters - ---------- - simulation : MFSimulation - Simulation that this package is a part of. Package is automatically - added to simulation when it is initialized. - loading_package : bool - Do not set this parameter. It is intended for debugging and internal - processing purposes only. - print_input : boolean - * print_input (boolean) keyword to indicate that the list of GNC - information will be written to the listing file immediately after it - is read. - print_flows : boolean - * print_flows (boolean) keyword to indicate that the list of GNC flow - rates will be printed to the listing file for every stress period - time step in which "BUDGET PRINT" is specified in Output Control. If - there is no Output Control option and "PRINT_FLOWS" is specified, - then flow rates are printed for the last time step of each stress - period. - explicit : boolean - * explicit (boolean) keyword to indicate that the ghost node correction - is applied in an explicit manner on the right-hand side of the - matrix. The explicit approach will likely require additional outer - iterations. If the keyword is not specified, then the correction will - be applied in an implicit manner on the left-hand side. The implicit - approach will likely converge better, but may require additional - memory. If the EXPLICIT keyword is not specified, then the BICGSTAB - linear acceleration option should be specified within the LINEAR - block of the Sparse Matrix Solver. - numgnc : integer - * numgnc (integer) is the number of GNC entries. - numalphaj : integer - * numalphaj (integer) is the number of contributing factors. - gncdata : [cellidn, cellidm, cellidsj, alphasj] - * cellidn ((integer, ...)) is the cellid of the cell, :math:`n`, in - which the ghost node is located. For a structured grid that uses the - DIS input file, CELLIDN is the layer, row, and column numbers of the - cell. For a grid that uses the DISV input file, CELLIDN is the layer - number and CELL2D number for the two cells. If the model uses the - unstructured discretization (DISU) input file, then CELLIDN is the - node number for the cell. This argument is an index variable, which - means that it should be treated as zero-based when working with FloPy - and Python. Flopy will automatically subtract one when loading index - variables and add one when writing index variables. - * cellidm ((integer, ...)) is the cellid of the connecting cell, - :math:`m`, to which flow occurs from the ghost node. For a structured - grid that uses the DIS input file, CELLIDM is the layer, row, and - column numbers of the cell. For a grid that uses the DISV input file, - CELLIDM is the layer number and CELL2D number for the two cells. If - the model uses the unstructured discretization (DISU) input file, - then CELLIDM is the node number for the cell. This argument is an - index variable, which means that it should be treated as zero-based - when working with FloPy and Python. Flopy will automatically subtract - one when loading index variables and add one when writing index - variables. - * cellidsj ((integer, ...)) is the array of CELLIDS for the - contributing j cells, which contribute to the interpolated head value - at the ghost node. This item contains one CELLID for each of the - contributing cells of the ghost node. Note that if the number of - actual contributing cells needed by the user is less than NUMALPHAJ - for any ghost node, then a dummy CELLID of zero(s) should be inserted - with an associated contributing factor of zero. For a structured grid - that uses the DIS input file, CELLID is the layer, row, and column - numbers of the cell. For a grid that uses the DISV input file, CELLID - is the layer number and cell2d number for the two cells. If the model - uses the unstructured discretization (DISU) input file, then CELLID - is the node number for the cell. This argument is an index variable, - which means that it should be treated as zero-based when working with - FloPy and Python. Flopy will automatically subtract one when loading - index variables and add one when writing index variables. - * alphasj (double) is the contributing factors for each contributing - node in CELLIDSJ. Note that if the number of actual contributing - cells is less than NUMALPHAJ for any ghost node, then dummy CELLIDS - should be inserted with an associated contributing factor of zero. - The sum of ALPHASJ should be less than one. This is because one minus - the sum of ALPHASJ is equal to the alpha term (alpha n in equation - 4-61 of the GWF Model report) that is multiplied by the head in cell - n. - filename : String - File name for this package. - pname : String - Package name for this package. - parent_file : MFPackage - Parent package file that references this package. Only needed for - utility packages (mfutl*). For example, mfutllaktab package must have - a mfgwflak package parent_file. - - """ - gncdata = ListTemplateGenerator(('gnc', 'gncdata', 'gncdata')) - package_abbr = "gnc" - _package_type = "gnc" - dfn_file_name = "gwf-gnc.dfn" - - dfn = [["block options", "name print_input", "type keyword", - "reader urword", "optional true"], - ["block options", "name print_flows", "type keyword", - "reader urword", "optional true"], - ["block options", "name explicit", "type keyword", "tagged true", - "reader urword", "optional true"], - ["block dimensions", "name numgnc", "type integer", - "reader urword", "optional false"], - ["block dimensions", "name numalphaj", "type integer", - "reader urword", "optional false"], - ["block gncdata", "name gncdata", - "type recarray cellidn cellidm cellidsj alphasj", - "shape (maxbound)", "reader urword"], - ["block gncdata", "name cellidn", "type integer", "shape", - "tagged false", "in_record true", "reader urword", - "numeric_index true"], - ["block gncdata", "name cellidm", "type integer", "shape", - "tagged false", "in_record true", "reader urword", - "numeric_index true"], - ["block gncdata", "name cellidsj", "type integer", - "shape (numalphaj)", "tagged false", "in_record true", - "reader urword", "numeric_index true"], - ["block gncdata", "name alphasj", "type double precision", - "shape (numalphaj)", "tagged false", "in_record true", - "reader urword"]] - - def __init__(self, simulation, loading_package=False, print_input=None, - print_flows=None, explicit=None, numgnc=None, numalphaj=None, - gncdata=None, filename=None, pname=None, parent_file=None): - super(ModflowGnc, self).__init__(simulation, "gnc", filename, pname, - loading_package, parent_file) - - # set up variables - self.print_input = self.build_mfdata("print_input", print_input) - self.print_flows = self.build_mfdata("print_flows", print_flows) - self.explicit = self.build_mfdata("explicit", explicit) - self.numgnc = self.build_mfdata("numgnc", numgnc) - self.numalphaj = self.build_mfdata("numalphaj", numalphaj) - self.gncdata = self.build_mfdata("gncdata", gncdata) - self._init_complete = True +# DO NOT MODIFY THIS FILE DIRECTLY. THIS FILE MUST BE CREATED BY +# mf6/utils/createpackages.py +from .. import mfpackage +from ..data.mfdatautil import ListTemplateGenerator + + +class ModflowGnc(mfpackage.MFPackage): + """ + ModflowGnc defines a gnc package. + + Parameters + ---------- + simulation : MFSimulation + Simulation that this package is a part of. Package is automatically + added to simulation when it is initialized. + loading_package : bool + Do not set this parameter. It is intended for debugging and internal + processing purposes only. + print_input : boolean + * print_input (boolean) keyword to indicate that the list of GNC + information will be written to the listing file immediately after it + is read. + print_flows : boolean + * print_flows (boolean) keyword to indicate that the list of GNC flow + rates will be printed to the listing file for every stress period + time step in which "BUDGET PRINT" is specified in Output Control. If + there is no Output Control option and "PRINT_FLOWS" is specified, + then flow rates are printed for the last time step of each stress + period. + explicit : boolean + * explicit (boolean) keyword to indicate that the ghost node correction + is applied in an explicit manner on the right-hand side of the + matrix. The explicit approach will likely require additional outer + iterations. If the keyword is not specified, then the correction will + be applied in an implicit manner on the left-hand side. The implicit + approach will likely converge better, but may require additional + memory. If the EXPLICIT keyword is not specified, then the BICGSTAB + linear acceleration option should be specified within the LINEAR + block of the Sparse Matrix Solver. + numgnc : integer + * numgnc (integer) is the number of GNC entries. + numalphaj : integer + * numalphaj (integer) is the number of contributing factors. + gncdata : [cellidn, cellidm, cellidsj, alphasj] + * cellidn ((integer, ...)) is the cellid of the cell, :math:`n`, in + which the ghost node is located. For a structured grid that uses the + DIS input file, CELLIDN is the layer, row, and column numbers of the + cell. For a grid that uses the DISV input file, CELLIDN is the layer + number and CELL2D number for the two cells. If the model uses the + unstructured discretization (DISU) input file, then CELLIDN is the + node number for the cell. This argument is an index variable, which + means that it should be treated as zero-based when working with FloPy + and Python. Flopy will automatically subtract one when loading index + variables and add one when writing index variables. + * cellidm ((integer, ...)) is the cellid of the connecting cell, + :math:`m`, to which flow occurs from the ghost node. For a structured + grid that uses the DIS input file, CELLIDM is the layer, row, and + column numbers of the cell. For a grid that uses the DISV input file, + CELLIDM is the layer number and CELL2D number for the two cells. If + the model uses the unstructured discretization (DISU) input file, + then CELLIDM is the node number for the cell. This argument is an + index variable, which means that it should be treated as zero-based + when working with FloPy and Python. Flopy will automatically subtract + one when loading index variables and add one when writing index + variables. + * cellidsj ((integer, ...)) is the array of CELLIDS for the + contributing j cells, which contribute to the interpolated head value + at the ghost node. This item contains one CELLID for each of the + contributing cells of the ghost node. Note that if the number of + actual contributing cells needed by the user is less than NUMALPHAJ + for any ghost node, then a dummy CELLID of zero(s) should be inserted + with an associated contributing factor of zero. For a structured grid + that uses the DIS input file, CELLID is the layer, row, and column + numbers of the cell. For a grid that uses the DISV input file, CELLID + is the layer number and cell2d number for the two cells. If the model + uses the unstructured discretization (DISU) input file, then CELLID + is the node number for the cell. This argument is an index variable, + which means that it should be treated as zero-based when working with + FloPy and Python. Flopy will automatically subtract one when loading + index variables and add one when writing index variables. + * alphasj (double) is the contributing factors for each contributing + node in CELLIDSJ. Note that if the number of actual contributing + cells is less than NUMALPHAJ for any ghost node, then dummy CELLIDS + should be inserted with an associated contributing factor of zero. + The sum of ALPHASJ should be less than one. This is because one minus + the sum of ALPHASJ is equal to the alpha term (alpha n in equation + 4-61 of the GWF Model report) that is multiplied by the head in cell + n. + filename : String + File name for this package. + pname : String + Package name for this package. + parent_file : MFPackage + Parent package file that references this package. Only needed for + utility packages (mfutl*). For example, mfutllaktab package must have + a mfgwflak package parent_file. + + """ + gncdata = ListTemplateGenerator(('gnc', 'gncdata', 'gncdata')) + package_abbr = "gnc" + _package_type = "gnc" + dfn_file_name = "gwf-gnc.dfn" + + dfn = [["block options", "name print_input", "type keyword", + "reader urword", "optional true"], + ["block options", "name print_flows", "type keyword", + "reader urword", "optional true"], + ["block options", "name explicit", "type keyword", "tagged true", + "reader urword", "optional true"], + ["block dimensions", "name numgnc", "type integer", + "reader urword", "optional false"], + ["block dimensions", "name numalphaj", "type integer", + "reader urword", "optional false"], + ["block gncdata", "name gncdata", + "type recarray cellidn cellidm cellidsj alphasj", + "shape (maxbound)", "reader urword"], + ["block gncdata", "name cellidn", "type integer", "shape", + "tagged false", "in_record true", "reader urword", + "numeric_index true"], + ["block gncdata", "name cellidm", "type integer", "shape", + "tagged false", "in_record true", "reader urword", + "numeric_index true"], + ["block gncdata", "name cellidsj", "type integer", + "shape (numalphaj)", "tagged false", "in_record true", + "reader urword", "numeric_index true"], + ["block gncdata", "name alphasj", "type double precision", + "shape (numalphaj)", "tagged false", "in_record true", + "reader urword"]] + + def __init__(self, simulation, loading_package=False, print_input=None, + print_flows=None, explicit=None, numgnc=None, numalphaj=None, + gncdata=None, filename=None, pname=None, parent_file=None): + super(ModflowGnc, self).__init__(simulation, "gnc", filename, pname, + loading_package, parent_file) + + # set up variables + self.print_input = self.build_mfdata("print_input", print_input) + self.print_flows = self.build_mfdata("print_flows", print_flows) + self.explicit = self.build_mfdata("explicit", explicit) + self.numgnc = self.build_mfdata("numgnc", numgnc) + self.numalphaj = self.build_mfdata("numalphaj", numalphaj) + self.gncdata = self.build_mfdata("gncdata", gncdata) + self._init_complete = True diff --git a/flopy/mf6/modflow/mfgwfchd.py b/flopy/mf6/modflow/mfgwfchd.py index 93c0cf8cfb..f978a7eb08 100644 --- a/flopy/mf6/modflow/mfgwfchd.py +++ b/flopy/mf6/modflow/mfgwfchd.py @@ -1,196 +1,196 @@ -# DO NOT MODIFY THIS FILE DIRECTLY. THIS FILE MUST BE CREATED BY -# mf6/utils/createpackages.py -from .. import mfpackage -from ..data.mfdatautil import ListTemplateGenerator - - -class ModflowGwfchd(mfpackage.MFPackage): - """ - ModflowGwfchd defines a chd package within a gwf6 model. - - Parameters - ---------- - model : MFModel - Model that this package is a part of. Package is automatically - added to model when it is initialized. - loading_package : bool - Do not set this parameter. It is intended for debugging and internal - processing purposes only. - auxiliary : [string] - * auxiliary (string) defines an array of one or more auxiliary variable - names. There is no limit on the number of auxiliary variables that - can be provided on this line; however, lists of information provided - in subsequent blocks must have a column of data for each auxiliary - variable name defined here. The number of auxiliary variables - detected on this line determines the value for naux. Comments cannot - be provided anywhere on this line as they will be interpreted as - auxiliary variable names. Auxiliary variables may not be used by the - package, but they will be available for use by other parts of the - program. The program will terminate with an error if auxiliary - variables are specified on more than one line in the options block. - auxmultname : string - * auxmultname (string) name of auxiliary variable to be used as - multiplier of CHD head value. - boundnames : boolean - * boundnames (boolean) keyword to indicate that boundary names may be - provided with the list of constant-head cells. - print_input : boolean - * print_input (boolean) keyword to indicate that the list of constant- - head information will be written to the listing file immediately - after it is read. - print_flows : boolean - * print_flows (boolean) keyword to indicate that the list of constant- - head flow rates will be printed to the listing file for every stress - period time step in which "BUDGET PRINT" is specified in Output - Control. If there is no Output Control option and "PRINT_FLOWS" is - specified, then flow rates are printed for the last time step of each - stress period. - save_flows : boolean - * save_flows (boolean) keyword to indicate that constant-head flow - terms will be written to the file specified with "BUDGET FILEOUT" in - Output Control. - timeseries : {varname:data} or timeseries data - * Contains data for the ts package. Data can be stored in a dictionary - containing data for the ts package with variable names as keys and - package data as values. Data just for the timeseries variable is also - acceptable. See ts package documentation for more information. - observations : {varname:data} or continuous data - * Contains data for the obs package. Data can be stored in a dictionary - containing data for the obs package with variable names as keys and - package data as values. Data just for the observations variable is - also acceptable. See obs package documentation for more information. - maxbound : integer - * maxbound (integer) integer value specifying the maximum number of - constant-head cells that will be specified for use during any stress - period. - stress_period_data : [cellid, head, aux, boundname] - * cellid ((integer, ...)) is the cell identifier, and depends on the - type of grid that is used for the simulation. For a structured grid - that uses the DIS input file, CELLID is the layer, row, and column. - For a grid that uses the DISV input file, CELLID is the layer and - CELL2D number. If the model uses the unstructured discretization - (DISU) input file, CELLID is the node number for the cell. This - argument is an index variable, which means that it should be treated - as zero-based when working with FloPy and Python. Flopy will - automatically subtract one when loading index variables and add one - when writing index variables. - * head (double) is the head at the boundary. - * aux (double) represents the values of the auxiliary variables for - each constant head. The values of auxiliary variables must be present - for each constant head. The values must be specified in the order of - the auxiliary variables specified in the OPTIONS block. If the - package supports time series and the Options block includes a - TIMESERIESFILE entry (see the "Time-Variable Input" section), values - can be obtained from a time series by entering the time-series name - in place of a numeric value. - * boundname (string) name of the constant head boundary cell. BOUNDNAME - is an ASCII character variable that can contain as many as 40 - characters. If BOUNDNAME contains spaces in it, then the entire name - must be enclosed within single quotes. - filename : String - File name for this package. - pname : String - Package name for this package. - parent_file : MFPackage - Parent package file that references this package. Only needed for - utility packages (mfutl*). For example, mfutllaktab package must have - a mfgwflak package parent_file. - - """ - auxiliary = ListTemplateGenerator(('gwf6', 'chd', 'options', - 'auxiliary')) - ts_filerecord = ListTemplateGenerator(('gwf6', 'chd', 'options', - 'ts_filerecord')) - obs_filerecord = ListTemplateGenerator(('gwf6', 'chd', 'options', - 'obs_filerecord')) - stress_period_data = ListTemplateGenerator(('gwf6', 'chd', 'period', - 'stress_period_data')) - package_abbr = "gwfchd" - _package_type = "chd" - dfn_file_name = "gwf-chd.dfn" - - dfn = [["block options", "name auxiliary", "type string", - "shape (naux)", "reader urword", "optional true"], - ["block options", "name auxmultname", "type string", "shape", - "reader urword", "optional true"], - ["block options", "name boundnames", "type keyword", "shape", - "reader urword", "optional true"], - ["block options", "name print_input", "type keyword", - "reader urword", "optional true"], - ["block options", "name print_flows", "type keyword", - "reader urword", "optional true"], - ["block options", "name save_flows", "type keyword", - "reader urword", "optional true"], - ["block options", "name ts_filerecord", - "type record ts6 filein ts6_filename", "shape", "reader urword", - "tagged true", "optional true", "construct_package ts", - "construct_data timeseries", "parameter_name timeseries"], - ["block options", "name ts6", "type keyword", "shape", - "in_record true", "reader urword", "tagged true", - "optional false"], - ["block options", "name filein", "type keyword", "shape", - "in_record true", "reader urword", "tagged true", - "optional false"], - ["block options", "name ts6_filename", "type string", - "preserve_case true", "in_record true", "reader urword", - "optional false", "tagged false"], - ["block options", "name obs_filerecord", - "type record obs6 filein obs6_filename", "shape", "reader urword", - "tagged true", "optional true", "construct_package obs", - "construct_data continuous", "parameter_name observations"], - ["block options", "name obs6", "type keyword", "shape", - "in_record true", "reader urword", "tagged true", - "optional false"], - ["block options", "name obs6_filename", "type string", - "preserve_case true", "in_record true", "tagged false", - "reader urword", "optional false"], - ["block dimensions", "name maxbound", "type integer", - "reader urword", "optional false"], - ["block period", "name iper", "type integer", - "block_variable True", "in_record true", "tagged false", "shape", - "valid", "reader urword", "optional false"], - ["block period", "name stress_period_data", - "type recarray cellid head aux boundname", "shape (maxbound)", - "reader urword"], - ["block period", "name cellid", "type integer", - "shape (ncelldim)", "tagged false", "in_record true", - "reader urword"], - ["block period", "name head", "type double precision", "shape", - "tagged false", "in_record true", "reader urword", - "time_series true"], - ["block period", "name aux", "type double precision", - "in_record true", "tagged false", "shape (naux)", "reader urword", - "optional true", "time_series true"], - ["block period", "name boundname", "type string", "shape", - "tagged false", "in_record true", "reader urword", - "optional true"]] - - def __init__(self, model, loading_package=False, auxiliary=None, - auxmultname=None, boundnames=None, print_input=None, - print_flows=None, save_flows=None, timeseries=None, - observations=None, maxbound=None, stress_period_data=None, - filename=None, pname=None, parent_file=None): - super(ModflowGwfchd, self).__init__(model, "chd", filename, pname, - loading_package, parent_file) - - # set up variables - self.auxiliary = self.build_mfdata("auxiliary", auxiliary) - self.auxmultname = self.build_mfdata("auxmultname", auxmultname) - self.boundnames = self.build_mfdata("boundnames", boundnames) - self.print_input = self.build_mfdata("print_input", print_input) - self.print_flows = self.build_mfdata("print_flows", print_flows) - self.save_flows = self.build_mfdata("save_flows", save_flows) - self._ts_filerecord = self.build_mfdata("ts_filerecord", - None) - self._ts_package = self.build_child_package("ts", timeseries, - "timeseries", - self._ts_filerecord) - self._obs_filerecord = self.build_mfdata("obs_filerecord", - None) - self._obs_package = self.build_child_package("obs", observations, - "continuous", - self._obs_filerecord) - self.maxbound = self.build_mfdata("maxbound", maxbound) - self.stress_period_data = self.build_mfdata("stress_period_data", - stress_period_data) - self._init_complete = True +# DO NOT MODIFY THIS FILE DIRECTLY. THIS FILE MUST BE CREATED BY +# mf6/utils/createpackages.py +from .. import mfpackage +from ..data.mfdatautil import ListTemplateGenerator + + +class ModflowGwfchd(mfpackage.MFPackage): + """ + ModflowGwfchd defines a chd package within a gwf6 model. + + Parameters + ---------- + model : MFModel + Model that this package is a part of. Package is automatically + added to model when it is initialized. + loading_package : bool + Do not set this parameter. It is intended for debugging and internal + processing purposes only. + auxiliary : [string] + * auxiliary (string) defines an array of one or more auxiliary variable + names. There is no limit on the number of auxiliary variables that + can be provided on this line; however, lists of information provided + in subsequent blocks must have a column of data for each auxiliary + variable name defined here. The number of auxiliary variables + detected on this line determines the value for naux. Comments cannot + be provided anywhere on this line as they will be interpreted as + auxiliary variable names. Auxiliary variables may not be used by the + package, but they will be available for use by other parts of the + program. The program will terminate with an error if auxiliary + variables are specified on more than one line in the options block. + auxmultname : string + * auxmultname (string) name of auxiliary variable to be used as + multiplier of CHD head value. + boundnames : boolean + * boundnames (boolean) keyword to indicate that boundary names may be + provided with the list of constant-head cells. + print_input : boolean + * print_input (boolean) keyword to indicate that the list of constant- + head information will be written to the listing file immediately + after it is read. + print_flows : boolean + * print_flows (boolean) keyword to indicate that the list of constant- + head flow rates will be printed to the listing file for every stress + period time step in which "BUDGET PRINT" is specified in Output + Control. If there is no Output Control option and "PRINT_FLOWS" is + specified, then flow rates are printed for the last time step of each + stress period. + save_flows : boolean + * save_flows (boolean) keyword to indicate that constant-head flow + terms will be written to the file specified with "BUDGET FILEOUT" in + Output Control. + timeseries : {varname:data} or timeseries data + * Contains data for the ts package. Data can be stored in a dictionary + containing data for the ts package with variable names as keys and + package data as values. Data just for the timeseries variable is also + acceptable. See ts package documentation for more information. + observations : {varname:data} or continuous data + * Contains data for the obs package. Data can be stored in a dictionary + containing data for the obs package with variable names as keys and + package data as values. Data just for the observations variable is + also acceptable. See obs package documentation for more information. + maxbound : integer + * maxbound (integer) integer value specifying the maximum number of + constant-head cells that will be specified for use during any stress + period. + stress_period_data : [cellid, head, aux, boundname] + * cellid ((integer, ...)) is the cell identifier, and depends on the + type of grid that is used for the simulation. For a structured grid + that uses the DIS input file, CELLID is the layer, row, and column. + For a grid that uses the DISV input file, CELLID is the layer and + CELL2D number. If the model uses the unstructured discretization + (DISU) input file, CELLID is the node number for the cell. This + argument is an index variable, which means that it should be treated + as zero-based when working with FloPy and Python. Flopy will + automatically subtract one when loading index variables and add one + when writing index variables. + * head (double) is the head at the boundary. + * aux (double) represents the values of the auxiliary variables for + each constant head. The values of auxiliary variables must be present + for each constant head. The values must be specified in the order of + the auxiliary variables specified in the OPTIONS block. If the + package supports time series and the Options block includes a + TIMESERIESFILE entry (see the "Time-Variable Input" section), values + can be obtained from a time series by entering the time-series name + in place of a numeric value. + * boundname (string) name of the constant head boundary cell. BOUNDNAME + is an ASCII character variable that can contain as many as 40 + characters. If BOUNDNAME contains spaces in it, then the entire name + must be enclosed within single quotes. + filename : String + File name for this package. + pname : String + Package name for this package. + parent_file : MFPackage + Parent package file that references this package. Only needed for + utility packages (mfutl*). For example, mfutllaktab package must have + a mfgwflak package parent_file. + + """ + auxiliary = ListTemplateGenerator(('gwf6', 'chd', 'options', + 'auxiliary')) + ts_filerecord = ListTemplateGenerator(('gwf6', 'chd', 'options', + 'ts_filerecord')) + obs_filerecord = ListTemplateGenerator(('gwf6', 'chd', 'options', + 'obs_filerecord')) + stress_period_data = ListTemplateGenerator(('gwf6', 'chd', 'period', + 'stress_period_data')) + package_abbr = "gwfchd" + _package_type = "chd" + dfn_file_name = "gwf-chd.dfn" + + dfn = [["block options", "name auxiliary", "type string", + "shape (naux)", "reader urword", "optional true"], + ["block options", "name auxmultname", "type string", "shape", + "reader urword", "optional true"], + ["block options", "name boundnames", "type keyword", "shape", + "reader urword", "optional true"], + ["block options", "name print_input", "type keyword", + "reader urword", "optional true"], + ["block options", "name print_flows", "type keyword", + "reader urword", "optional true"], + ["block options", "name save_flows", "type keyword", + "reader urword", "optional true"], + ["block options", "name ts_filerecord", + "type record ts6 filein ts6_filename", "shape", "reader urword", + "tagged true", "optional true", "construct_package ts", + "construct_data timeseries", "parameter_name timeseries"], + ["block options", "name ts6", "type keyword", "shape", + "in_record true", "reader urword", "tagged true", + "optional false"], + ["block options", "name filein", "type keyword", "shape", + "in_record true", "reader urword", "tagged true", + "optional false"], + ["block options", "name ts6_filename", "type string", + "preserve_case true", "in_record true", "reader urword", + "optional false", "tagged false"], + ["block options", "name obs_filerecord", + "type record obs6 filein obs6_filename", "shape", "reader urword", + "tagged true", "optional true", "construct_package obs", + "construct_data continuous", "parameter_name observations"], + ["block options", "name obs6", "type keyword", "shape", + "in_record true", "reader urword", "tagged true", + "optional false"], + ["block options", "name obs6_filename", "type string", + "preserve_case true", "in_record true", "tagged false", + "reader urword", "optional false"], + ["block dimensions", "name maxbound", "type integer", + "reader urword", "optional false"], + ["block period", "name iper", "type integer", + "block_variable True", "in_record true", "tagged false", "shape", + "valid", "reader urword", "optional false"], + ["block period", "name stress_period_data", + "type recarray cellid head aux boundname", "shape (maxbound)", + "reader urword"], + ["block period", "name cellid", "type integer", + "shape (ncelldim)", "tagged false", "in_record true", + "reader urword"], + ["block period", "name head", "type double precision", "shape", + "tagged false", "in_record true", "reader urword", + "time_series true"], + ["block period", "name aux", "type double precision", + "in_record true", "tagged false", "shape (naux)", "reader urword", + "optional true", "time_series true"], + ["block period", "name boundname", "type string", "shape", + "tagged false", "in_record true", "reader urword", + "optional true"]] + + def __init__(self, model, loading_package=False, auxiliary=None, + auxmultname=None, boundnames=None, print_input=None, + print_flows=None, save_flows=None, timeseries=None, + observations=None, maxbound=None, stress_period_data=None, + filename=None, pname=None, parent_file=None): + super(ModflowGwfchd, self).__init__(model, "chd", filename, pname, + loading_package, parent_file) + + # set up variables + self.auxiliary = self.build_mfdata("auxiliary", auxiliary) + self.auxmultname = self.build_mfdata("auxmultname", auxmultname) + self.boundnames = self.build_mfdata("boundnames", boundnames) + self.print_input = self.build_mfdata("print_input", print_input) + self.print_flows = self.build_mfdata("print_flows", print_flows) + self.save_flows = self.build_mfdata("save_flows", save_flows) + self._ts_filerecord = self.build_mfdata("ts_filerecord", + None) + self._ts_package = self.build_child_package("ts", timeseries, + "timeseries", + self._ts_filerecord) + self._obs_filerecord = self.build_mfdata("obs_filerecord", + None) + self._obs_package = self.build_child_package("obs", observations, + "continuous", + self._obs_filerecord) + self.maxbound = self.build_mfdata("maxbound", maxbound) + self.stress_period_data = self.build_mfdata("stress_period_data", + stress_period_data) + self._init_complete = True diff --git a/flopy/mf6/modflow/mfgwfcsub.py b/flopy/mf6/modflow/mfgwfcsub.py index 900c5e1793..0831727562 100644 --- a/flopy/mf6/modflow/mfgwfcsub.py +++ b/flopy/mf6/modflow/mfgwfcsub.py @@ -1,579 +1,579 @@ -# DO NOT MODIFY THIS FILE DIRECTLY. THIS FILE MUST BE CREATED BY -# mf6/utils/createpackages.py -from .. import mfpackage -from ..data.mfdatautil import ListTemplateGenerator, ArrayTemplateGenerator - - -class ModflowGwfcsub(mfpackage.MFPackage): - """ - ModflowGwfcsub defines a csub package within a gwf6 model. - - Parameters - ---------- - model : MFModel - Model that this package is a part of. Package is automatically - added to model when it is initialized. - loading_package : bool - Do not set this parameter. It is intended for debugging and internal - processing purposes only. - boundnames : boolean - * boundnames (boolean) keyword to indicate that boundary names may be - provided with the list of CSUB cells. - print_input : boolean - * print_input (boolean) keyword to indicate that the list of CSUB - information will be written to the listing file immediately after it - is read. - save_flows : boolean - * save_flows (boolean) keyword to indicate that cell-by-cell flow terms - will be written to the file specified with "BUDGET SAVE FILE" in - Output Control. - gammaw : double - * gammaw (double) unit weight of water. For freshwater, GAMMAW is - 9806.65 Newtons/cubic meters or 62.48 lb/cubic foot in SI and English - units, respectively. By default, GAMMAW is 9806.65 Newtons/cubic - meters. - beta : double - * beta (double) compressibility of water. Typical values of BETA are - 4.6512e-10 1/Pa or 2.2270e-8 lb/square foot in SI and English units, - respectively. By default, BETA is 4.6512e-10 1/Pa. - head_based : boolean - * head_based (boolean) keyword to indicate the head-based formulation - will be used to simulate coarse-grained aquifer materials and no- - delay and delay interbeds. Specifying HEAD_BASED also specifies the - INITIAL_PRECONSOLIDATION_HEAD option. - initial_preconsolidation_head : boolean - * initial_preconsolidation_head (boolean) keyword to indicate that - preconsolidation heads will be specified for no-delay and delay - interbeds in the PACKAGEDATA block. If the - SPECIFIED_INITIAL_INTERBED_STATE option is specified in the OPTIONS - block, user-specified preconsolidation heads in the PACKAGEDATA block - are absolute values. Otherwise, user-specified preconsolidation heads - in the PACKAGEDATA block are relative to steady-state or initial - heads. - ndelaycells : integer - * ndelaycells (integer) number of nodes used to discretize delay - interbeds. If not specified, then a default value of 19 is assigned. - compression_indices : boolean - * compression_indices (boolean) keyword to indicate that the - recompression (CR) and compression (CC) indices are specified instead - of the elastic specific storage (SSE) and inelastic specific storage - (SSV) coefficients. If not specified, then elastic specific storage - (SSE) and inelastic specific storage (SSV) coefficients must be - specified. - update_material_properties : boolean - * update_material_properties (boolean) keyword to indicate that the - thickness and void ratio of coarse-grained and interbed sediments - (delay and no-delay) will vary during the simulation. If not - specified, the thickness and void ratio of coarse-grained and - interbed sediments will not vary during the simulation. - cell_fraction : boolean - * cell_fraction (boolean) keyword to indicate that the thickness of - interbeds will be specified in terms of the fraction of cell - thickness. If not specified, interbed thicknness must be specified. - specified_initial_interbed_state : boolean - * specified_initial_interbed_state (boolean) keyword to indicate that - absolute preconsolidation stresses (heads) and delay bed heads will - be specified for interbeds defined in the PACKAGEDATA block. The - SPECIFIED_INITIAL_INTERBED_STATE option is equivalent to specifying - the SPECIFIED_INITIAL_PRECONSOLITATION_STRESS and - SPECIFIED_INITIAL_DELAY_HEAD. If SPECIFIED_INITIAL_INTERBED_STATE is - not specified then preconsolidation stress (head) and delay bed head - values specified in the PACKAGEDATA block are relative to simulated - values of the first stress period if steady-state or initial stresses - and GWF heads if the first stress period is transient. - specified_initial_preconsolidation_stress : boolean - * specified_initial_preconsolidation_stress (boolean) keyword to - indicate that absolute preconsolidation stresses (heads) will be - specified for interbeds defined in the PACKAGEDATA block. If - SPECIFIED_INITIAL_PRECONSOLITATION_STRESS and - SPECIFIED_INITIAL_INTERBED_STATE are not specified then - preconsolidation stress (head) values specified in the PACKAGEDATA - block are relative to simulated values if the first stress period is - steady-state or initial stresses (heads) if the first stress period - is transient. - specified_initial_delay_head : boolean - * specified_initial_delay_head (boolean) keyword to indicate that - absolute initial delay bed head will be specified for interbeds - defined in the PACKAGEDATA block. If SPECIFIED_INITIAL_DELAY_HEAD and - SPECIFIED_INITIAL_INTERBED_STATE are not specified then delay bed - head values specified in the PACKAGEDATA block are relative to - simulated values if the first stress period is steady-state or - initial GWF heads if the first stress period is transient. - effective_stress_lag : boolean - * effective_stress_lag (boolean) keyword to indicate the effective - stress from the previous time step will be used to calculate specific - storage values. This option can 1) help with convergence in models - with thin cells and water table elevations close to land surface; 2) - is identical to the approach used in the SUBWT package for - MODFLOW-2005; and 3) is only used if the effective-stress formulation - is being used. By default, current effective stress values are used - to calculate specific storage values. - strainib_filerecord : [interbedstrain_filename] - * interbedstrain_filename (string) name of the comma-separated-values - output file to write final interbed strain information. - straincg_filerecord : [coarsestrain_filename] - * coarsestrain_filename (string) name of the comma-separated-values - output file to write final coarse-grained material strain - information. - compaction_filerecord : [compaction_filename] - * compaction_filename (string) name of the binary output file to write - compaction information. - fileout : boolean - * fileout (boolean) keyword to specify that an output filename is - expected next. - compaction_elastic_filerecord : [elastic_compaction_filename] - * elastic_compaction_filename (string) name of the binary output file - to write elastic interbed compaction information. - compaction_inelastic_filerecord : [inelastic_compaction_filename] - * inelastic_compaction_filename (string) name of the binary output file - to write inelastic interbed compaction information. - compaction_interbed_filerecord : [interbed_compaction_filename] - * interbed_compaction_filename (string) name of the binary output file - to write interbed compaction information. - compaction_coarse_filerecord : [coarse_compaction_filename] - * coarse_compaction_filename (string) name of the binary output file to - write elastic coarse-grained material compaction information. - zdisplacement_filerecord : [zdisplacement_filename] - * zdisplacement_filename (string) name of the binary output file to - write z-displacement information. - timeseries : {varname:data} or timeseries data - * Contains data for the ts package. Data can be stored in a dictionary - containing data for the ts package with variable names as keys and - package data as values. Data just for the timeseries variable is also - acceptable. See ts package documentation for more information. - observations : {varname:data} or continuous data - * Contains data for the obs package. Data can be stored in a dictionary - containing data for the obs package with variable names as keys and - package data as values. Data just for the observations variable is - also acceptable. See obs package documentation for more information. - ninterbeds : integer - * ninterbeds (integer) is the number of CSUB interbed systems. More - than 1 CSUB interbed systems can be assigned to a GWF cell; however, - only 1 GWF cell can be assigned to a single CSUB interbed system. - maxsig0 : integer - * maxsig0 (integer) is the maximum number of cells that can have a - specified stress offset. More than 1 stress offset can be assigned to - a GWF cell. By default, MAXSIG0 is 0. - cg_ske_cr : [double] - * cg_ske_cr (double) is the initial elastic coarse-grained material - specific storage or recompression index. The recompression index is - specified if COMPRESSION_INDICES is specified in the OPTIONS block. - Specified or calculated elastic coarse-grained material specific - storage values are not adjusted from initial values if HEAD_BASED is - specified in the OPTIONS block. - cg_theta : [double] - * cg_theta (double) is the initial porosity of coarse-grained - materials. - sgm : [double] - * sgm (double) is the specific gravity of moist or unsaturated - sediments. If not specified, then a default value of 1.7 is assigned. - sgs : [double] - * sgs (double) is the specific gravity of saturated sediments. If not - specified, then a default value of 2.0 is assigned. - packagedata : [icsubno, cellid, cdelay, pcs0, thick_frac, rnb, ssv_cc, - sse_cr, theta, kv, h0, boundname] - * icsubno (integer) integer value that defines the CSUB interbed number - associated with the specified PACKAGEDATA data on the line. CSUBNO - must be greater than zero and less than or equal to NCSUBCELLS. CSUB - information must be specified for every CSUB cell or the program will - terminate with an error. The program will also terminate with an - error if information for a CSUB interbed number is specified more - than once. This argument is an index variable, which means that it - should be treated as zero-based when working with FloPy and Python. - Flopy will automatically subtract one when loading index variables - and add one when writing index variables. - * cellid ((integer, ...)) is the cell identifier, and depends on the - type of grid that is used for the simulation. For a structured grid - that uses the DIS input file, CELLID is the layer, row, and column. - For a grid that uses the DISV input file, CELLID is the layer and - CELL2D number. If the model uses the unstructured discretization - (DISU) input file, CELLID is the node number for the cell. This - argument is an index variable, which means that it should be treated - as zero-based when working with FloPy and Python. Flopy will - automatically subtract one when loading index variables and add one - when writing index variables. - * cdelay (string) character string that defines the subsidence delay - type for the interbed. Possible subsidence package CDELAY strings - include: NODELAY--character keyword to indicate that delay will not - be simulated in the interbed. DELAY--character keyword to indicate - that delay will be simulated in the interbed. - * pcs0 (double) is the initial offset from the calculated initial - effective stress or initial preconsolidation stress in the interbed, - in units of height of a column of water. PCS0 is the initial - preconsolidation stress if SPECIFIED_INITIAL_INTERBED_STATE or - SPECIFIED_INITIAL_PRECONSOLIDATION_STRESS are specified in the - OPTIONS block. If HEAD_BASED is specified in the OPTIONS block, PCS0 - is the initial offset from the calculated initial head or initial - preconsolidation head in the CSUB interbed and the initial - preconsolidation stress is calculated from the calculated initial - effective stress or calculated initial geostatic stress, - respectively. - * thick_frac (double) is the interbed thickness or cell fraction of the - interbed. Interbed thickness is specified as a fraction of the cell - thickness if CELL_FRACTION is specified in the OPTIONS block. - * rnb (double) is the interbed material factor equivalent number of - interbeds in the interbed system represented by the interbed. RNB - must be greater than or equal to 1 if CDELAY is DELAY. Otherwise, RNB - can be any value. - * ssv_cc (double) is the initial inelastic specific storage or - compression index of the interbed. The compression index is specified - if COMPRESSION_INDICES is specified in the OPTIONS block. Specified - or calculated interbed inelastic specific storage values are not - adjusted from initial values if HEAD_BASED is specified in the - OPTIONS block. - * sse_cr (double) is the initial elastic coarse-grained material - specific storage or recompression index of the interbed. The - recompression index is specified if COMPRESSION_INDICES is specified - in the OPTIONS block. Specified or calculated interbed elastic - specific storage values are not adjusted from initial values if - HEAD_BASED is specified in the OPTIONS block. - * theta (double) is the initial porosity of the interbed. - * kv (double) is the vertical hydraulic conductivity of the delay - interbed. KV must be greater than 0 if CDELAY is DELAY. Otherwise, KV - can be any value. - * h0 (double) is the initial offset from the head in cell cellid or the - initial head in the delay interbed. H0 is the initial head in the - delay bed if SPECIFIED_INITIAL_INTERBED_STATE or - SPECIFIED_INITIAL_DELAY_HEAD are specified in the OPTIONS block. H0 - can be any value if CDELAY is NODELAY. - * boundname (string) name of the CSUB cell. BOUNDNAME is an ASCII - character variable that can contain as many as 40 characters. If - BOUNDNAME contains spaces in it, then the entire name must be - enclosed within single quotes. - stress_period_data : [cellid, sig0] - * cellid ((integer, ...)) is the cell identifier, and depends on the - type of grid that is used for the simulation. For a structured grid - that uses the DIS input file, CELLID is the layer, row, and column. - For a grid that uses the DISV input file, CELLID is the layer and - CELL2D number. If the model uses the unstructured discretization - (DISU) input file, CELLID is the node number for the cell. This - argument is an index variable, which means that it should be treated - as zero-based when working with FloPy and Python. Flopy will - automatically subtract one when loading index variables and add one - when writing index variables. - * sig0 (double) is the stress offset for the cell. SIG0 is added to the - calculated geostatic stress for the cell. SIG0 is specified only if - MAXSIG0 is specified to be greater than 0 in the DIMENSIONS block. - filename : String - File name for this package. - pname : String - Package name for this package. - parent_file : MFPackage - Parent package file that references this package. Only needed for - utility packages (mfutl*). For example, mfutllaktab package must have - a mfgwflak package parent_file. - - """ - strainib_filerecord = ListTemplateGenerator(('gwf6', 'csub', - 'options', - 'strainib_filerecord')) - straincg_filerecord = ListTemplateGenerator(('gwf6', 'csub', - 'options', - 'straincg_filerecord')) - compaction_filerecord = ListTemplateGenerator(('gwf6', 'csub', - 'options', - 'compaction_filerecord')) - compaction_elastic_filerecord = ListTemplateGenerator(( - 'gwf6', 'csub', 'options', 'compaction_elastic_filerecord')) - compaction_inelastic_filerecord = ListTemplateGenerator(( - 'gwf6', 'csub', 'options', 'compaction_inelastic_filerecord')) - compaction_interbed_filerecord = ListTemplateGenerator(( - 'gwf6', 'csub', 'options', 'compaction_interbed_filerecord')) - compaction_coarse_filerecord = ListTemplateGenerator(( - 'gwf6', 'csub', 'options', 'compaction_coarse_filerecord')) - zdisplacement_filerecord = ListTemplateGenerator(( - 'gwf6', 'csub', 'options', 'zdisplacement_filerecord')) - ts_filerecord = ListTemplateGenerator(('gwf6', 'csub', 'options', - 'ts_filerecord')) - obs_filerecord = ListTemplateGenerator(('gwf6', 'csub', 'options', - 'obs_filerecord')) - cg_ske_cr = ArrayTemplateGenerator(('gwf6', 'csub', 'griddata', - 'cg_ske_cr')) - cg_theta = ArrayTemplateGenerator(('gwf6', 'csub', 'griddata', - 'cg_theta')) - sgm = ArrayTemplateGenerator(('gwf6', 'csub', 'griddata', 'sgm')) - sgs = ArrayTemplateGenerator(('gwf6', 'csub', 'griddata', 'sgs')) - packagedata = ListTemplateGenerator(('gwf6', 'csub', 'packagedata', - 'packagedata')) - stress_period_data = ListTemplateGenerator(('gwf6', 'csub', 'period', - 'stress_period_data')) - package_abbr = "gwfcsub" - _package_type = "csub" - dfn_file_name = "gwf-csub.dfn" - - dfn = [["block options", "name boundnames", "type keyword", "shape", - "reader urword", "optional true"], - ["block options", "name print_input", "type keyword", - "reader urword", "optional true"], - ["block options", "name save_flows", "type keyword", - "reader urword", "optional true"], - ["block options", "name gammaw", "type double precision", - "reader urword", "optional true", "default_value 9806.65"], - ["block options", "name beta", "type double precision", - "reader urword", "optional true", "default_value 4.6512e-10"], - ["block options", "name head_based", "type keyword", - "reader urword", "optional true"], - ["block options", "name initial_preconsolidation_head", - "type keyword", "reader urword", "optional true"], - ["block options", "name ndelaycells", "type integer", - "reader urword", "optional true"], - ["block options", "name compression_indices", "type keyword", - "reader urword", "optional true"], - ["block options", "name update_material_properties", - "type keyword", "reader urword", "optional true"], - ["block options", "name cell_fraction", "type keyword", - "reader urword", "optional true"], - ["block options", "name specified_initial_interbed_state", - "type keyword", "reader urword", "optional true"], - ["block options", - "name specified_initial_preconsolidation_stress", "type keyword", - "reader urword", "optional true"], - ["block options", "name specified_initial_delay_head", - "type keyword", "reader urword", "optional true"], - ["block options", "name effective_stress_lag", "type keyword", - "reader urword", "optional true"], - ["block options", "name strainib_filerecord", - "type record strain_csv_interbed fileout interbedstrain_filename", - "shape", "reader urword", "tagged true", "optional true"], - ["block options", "name strain_csv_interbed", "type keyword", - "shape", "in_record true", "reader urword", "tagged true", - "optional false"], - ["block options", "name fileout", "type keyword", "shape", - "in_record true", "reader urword", "tagged true", - "optional false"], - ["block options", "name interbedstrain_filename", "type string", - "shape", "in_record true", "reader urword", "tagged false", - "optional false"], - ["block options", "name straincg_filerecord", - "type record strain_csv_coarse fileout coarsestrain_filename", - "shape", "reader urword", "tagged true", "optional true"], - ["block options", "name strain_csv_coarse", "type keyword", - "shape", "in_record true", "reader urword", "tagged true", - "optional false"], - ["block options", "name coarsestrain_filename", "type string", - "shape", "in_record true", "reader urword", "tagged false", - "optional false"], - ["block options", "name compaction_filerecord", - "type record compaction fileout compaction_filename", "shape", - "reader urword", "tagged true", "optional true"], - ["block options", "name compaction", "type keyword", "shape", - "in_record true", "reader urword", "tagged true", - "optional false"], - ["block options", "name fileout", "type keyword", "shape", - "in_record true", "reader urword", "tagged true", - "optional false"], - ["block options", "name compaction_filename", "type string", - "shape", "in_record true", "reader urword", "tagged false", - "optional false"], - ["block options", "name compaction_elastic_filerecord", - "type record compaction_elastic fileout elastic_compaction_filename", - "shape", "reader urword", "tagged true", "optional true"], - ["block options", "name compaction_elastic", "type keyword", - "shape", "in_record true", "reader urword", "tagged true", - "optional false"], - ["block options", "name elastic_compaction_filename", - "type string", "shape", "in_record true", "reader urword", - "tagged false", "optional false"], - ["block options", "name compaction_inelastic_filerecord", - "type record compaction_inelastic fileout " - "inelastic_compaction_filename", - "shape", "reader urword", "tagged true", "optional true"], - ["block options", "name compaction_inelastic", "type keyword", - "shape", "in_record true", "reader urword", "tagged true", - "optional false"], - ["block options", "name inelastic_compaction_filename", - "type string", "shape", "in_record true", "reader urword", - "tagged false", "optional false"], - ["block options", "name compaction_interbed_filerecord", - "type record compaction_interbed fileout " - "interbed_compaction_filename", - "shape", "reader urword", "tagged true", "optional true"], - ["block options", "name compaction_interbed", "type keyword", - "shape", "in_record true", "reader urword", "tagged true", - "optional false"], - ["block options", "name interbed_compaction_filename", - "type string", "shape", "in_record true", "reader urword", - "tagged false", "optional false"], - ["block options", "name compaction_coarse_filerecord", - "type record compaction_coarse fileout coarse_compaction_filename", - "shape", "reader urword", "tagged true", "optional true"], - ["block options", "name compaction_coarse", "type keyword", - "shape", "in_record true", "reader urword", "tagged true", - "optional false"], - ["block options", "name coarse_compaction_filename", - "type string", "shape", "in_record true", "reader urword", - "tagged false", "optional false"], - ["block options", "name zdisplacement_filerecord", - "type record zdisplacement fileout zdisplacement_filename", - "shape", "reader urword", "tagged true", "optional true"], - ["block options", "name zdisplacement", "type keyword", "shape", - "in_record true", "reader urword", "tagged true", - "optional false"], - ["block options", "name zdisplacement_filename", "type string", - "shape", "in_record true", "reader urword", "tagged false", - "optional false"], - ["block options", "name ts_filerecord", - "type record ts6 filein ts6_filename", "shape", "reader urword", - "tagged true", "optional true", "construct_package ts", - "construct_data timeseries", "parameter_name timeseries"], - ["block options", "name ts6", "type keyword", "shape", - "in_record true", "reader urword", "tagged true", - "optional false"], - ["block options", "name filein", "type keyword", "shape", - "in_record true", "reader urword", "tagged true", - "optional false"], - ["block options", "name ts6_filename", "type string", - "in_record true", "reader urword", "optional false", - "tagged false"], - ["block options", "name obs_filerecord", - "type record obs6 filein obs6_filename", "shape", "reader urword", - "tagged true", "optional true", "construct_package obs", - "construct_data continuous", "parameter_name observations"], - ["block options", "name obs6", "type keyword", "shape", - "in_record true", "reader urword", "tagged true", - "optional false"], - ["block options", "name obs6_filename", "type string", - "in_record true", "tagged false", "reader urword", - "optional false"], - ["block dimensions", "name ninterbeds", "type integer", - "reader urword", "optional false"], - ["block dimensions", "name maxsig0", "type integer", - "reader urword", "optional true"], - ["block griddata", "name cg_ske_cr", "type double precision", - "shape (nodes)", "valid", "reader readarray", - "default_value 1e-5"], - ["block griddata", "name cg_theta", "type double precision", - "shape (nodes)", "valid", "reader readarray", "default_value 0.2"], - ["block griddata", "name sgm", "type double precision", - "shape (nodes)", "valid", "reader readarray", "optional true"], - ["block griddata", "name sgs", "type double precision", - "shape (nodes)", "valid", "reader readarray", "optional true"], - ["block packagedata", "name packagedata", - "type recarray icsubno cellid cdelay pcs0 thick_frac rnb ssv_cc " - "sse_cr theta kv h0 boundname", - "shape (ncsubcells)", "reader urword"], - ["block packagedata", "name icsubno", "type integer", "shape", - "tagged false", "in_record true", "reader urword", - "numeric_index true"], - ["block packagedata", "name cellid", "type integer", - "shape (ncelldim)", "tagged false", "in_record true", - "reader urword"], - ["block packagedata", "name cdelay", "type string", "shape", - "tagged false", "in_record true", "reader urword"], - ["block packagedata", "name pcs0", "type double precision", - "shape", "tagged false", "in_record true", "reader urword"], - ["block packagedata", "name thick_frac", "type double precision", - "shape", "tagged false", "in_record true", "reader urword"], - ["block packagedata", "name rnb", "type double precision", - "shape", "tagged false", "in_record true", "reader urword"], - ["block packagedata", "name ssv_cc", "type double precision", - "shape", "tagged false", "in_record true", "reader urword"], - ["block packagedata", "name sse_cr", "type double precision", - "shape", "tagged false", "in_record true", "reader urword"], - ["block packagedata", "name theta", "type double precision", - "shape", "tagged false", "in_record true", "reader urword", - "default_value 0.2"], - ["block packagedata", "name kv", "type double precision", - "shape", "tagged false", "in_record true", "reader urword"], - ["block packagedata", "name h0", "type double precision", - "shape", "tagged false", "in_record true", "reader urword"], - ["block packagedata", "name boundname", "type string", "shape", - "tagged false", "in_record true", "reader urword", - "optional true"], - ["block period", "name iper", "type integer", - "block_variable True", "in_record true", "tagged false", "shape", - "valid", "reader urword", "optional false"], - ["block period", "name stress_period_data", - "type recarray cellid sig0", "shape (maxsig0)", "reader urword"], - ["block period", "name cellid", "type integer", - "shape (ncelldim)", "tagged false", "in_record true", - "reader urword"], - ["block period", "name sig0", "type double precision", "shape", - "tagged false", "in_record true", "reader urword", - "time_series true"]] - - def __init__(self, model, loading_package=False, boundnames=None, - print_input=None, save_flows=None, gammaw=9806.65, - beta=4.6512e-10, head_based=None, - initial_preconsolidation_head=None, ndelaycells=None, - compression_indices=None, update_material_properties=None, - cell_fraction=None, specified_initial_interbed_state=None, - specified_initial_preconsolidation_stress=None, - specified_initial_delay_head=None, effective_stress_lag=None, - strainib_filerecord=None, straincg_filerecord=None, - compaction_filerecord=None, fileout=None, - compaction_elastic_filerecord=None, - compaction_inelastic_filerecord=None, - compaction_interbed_filerecord=None, - compaction_coarse_filerecord=None, - zdisplacement_filerecord=None, timeseries=None, - observations=None, ninterbeds=None, maxsig0=None, - cg_ske_cr=1e-5, cg_theta=0.2, sgm=None, sgs=None, - packagedata=None, stress_period_data=None, filename=None, - pname=None, parent_file=None): - super(ModflowGwfcsub, self).__init__(model, "csub", filename, pname, - loading_package, parent_file) - - # set up variables - self.boundnames = self.build_mfdata("boundnames", boundnames) - self.print_input = self.build_mfdata("print_input", print_input) - self.save_flows = self.build_mfdata("save_flows", save_flows) - self.gammaw = self.build_mfdata("gammaw", gammaw) - self.beta = self.build_mfdata("beta", beta) - self.head_based = self.build_mfdata("head_based", head_based) - self.initial_preconsolidation_head = self.build_mfdata( - "initial_preconsolidation_head", initial_preconsolidation_head) - self.ndelaycells = self.build_mfdata("ndelaycells", ndelaycells) - self.compression_indices = self.build_mfdata("compression_indices", - compression_indices) - self.update_material_properties = self.build_mfdata( - "update_material_properties", update_material_properties) - self.cell_fraction = self.build_mfdata("cell_fraction", cell_fraction) - self.specified_initial_interbed_state = self.build_mfdata( - "specified_initial_interbed_state", - specified_initial_interbed_state) - self.specified_initial_preconsolidation_stress = self.build_mfdata( - "specified_initial_preconsolidation_stress", - specified_initial_preconsolidation_stress) - self.specified_initial_delay_head = self.build_mfdata( - "specified_initial_delay_head", specified_initial_delay_head) - self.effective_stress_lag = self.build_mfdata("effective_stress_lag", - effective_stress_lag) - self.strainib_filerecord = self.build_mfdata("strainib_filerecord", - strainib_filerecord) - self.straincg_filerecord = self.build_mfdata("straincg_filerecord", - straincg_filerecord) - self.compaction_filerecord = self.build_mfdata("compaction_filerecord", - compaction_filerecord) - self.fileout = self.build_mfdata("fileout", fileout) - self.compaction_elastic_filerecord = self.build_mfdata( - "compaction_elastic_filerecord", compaction_elastic_filerecord) - self.compaction_inelastic_filerecord = self.build_mfdata( - "compaction_inelastic_filerecord", - compaction_inelastic_filerecord) - self.compaction_interbed_filerecord = self.build_mfdata( - "compaction_interbed_filerecord", compaction_interbed_filerecord) - self.compaction_coarse_filerecord = self.build_mfdata( - "compaction_coarse_filerecord", compaction_coarse_filerecord) - self.zdisplacement_filerecord = self.build_mfdata( - "zdisplacement_filerecord", zdisplacement_filerecord) - self._ts_filerecord = self.build_mfdata("ts_filerecord", - None) - self._ts_package = self.build_child_package("ts", timeseries, - "timeseries", - self._ts_filerecord) - self._obs_filerecord = self.build_mfdata("obs_filerecord", - None) - self._obs_package = self.build_child_package("obs", observations, - "continuous", - self._obs_filerecord) - self.ninterbeds = self.build_mfdata("ninterbeds", ninterbeds) - self.maxsig0 = self.build_mfdata("maxsig0", maxsig0) - self.cg_ske_cr = self.build_mfdata("cg_ske_cr", cg_ske_cr) - self.cg_theta = self.build_mfdata("cg_theta", cg_theta) - self.sgm = self.build_mfdata("sgm", sgm) - self.sgs = self.build_mfdata("sgs", sgs) - self.packagedata = self.build_mfdata("packagedata", packagedata) - self.stress_period_data = self.build_mfdata("stress_period_data", - stress_period_data) - self._init_complete = True +# DO NOT MODIFY THIS FILE DIRECTLY. THIS FILE MUST BE CREATED BY +# mf6/utils/createpackages.py +from .. import mfpackage +from ..data.mfdatautil import ListTemplateGenerator, ArrayTemplateGenerator + + +class ModflowGwfcsub(mfpackage.MFPackage): + """ + ModflowGwfcsub defines a csub package within a gwf6 model. + + Parameters + ---------- + model : MFModel + Model that this package is a part of. Package is automatically + added to model when it is initialized. + loading_package : bool + Do not set this parameter. It is intended for debugging and internal + processing purposes only. + boundnames : boolean + * boundnames (boolean) keyword to indicate that boundary names may be + provided with the list of CSUB cells. + print_input : boolean + * print_input (boolean) keyword to indicate that the list of CSUB + information will be written to the listing file immediately after it + is read. + save_flows : boolean + * save_flows (boolean) keyword to indicate that cell-by-cell flow terms + will be written to the file specified with "BUDGET SAVE FILE" in + Output Control. + gammaw : double + * gammaw (double) unit weight of water. For freshwater, GAMMAW is + 9806.65 Newtons/cubic meters or 62.48 lb/cubic foot in SI and English + units, respectively. By default, GAMMAW is 9806.65 Newtons/cubic + meters. + beta : double + * beta (double) compressibility of water. Typical values of BETA are + 4.6512e-10 1/Pa or 2.2270e-8 lb/square foot in SI and English units, + respectively. By default, BETA is 4.6512e-10 1/Pa. + head_based : boolean + * head_based (boolean) keyword to indicate the head-based formulation + will be used to simulate coarse-grained aquifer materials and no- + delay and delay interbeds. Specifying HEAD_BASED also specifies the + INITIAL_PRECONSOLIDATION_HEAD option. + initial_preconsolidation_head : boolean + * initial_preconsolidation_head (boolean) keyword to indicate that + preconsolidation heads will be specified for no-delay and delay + interbeds in the PACKAGEDATA block. If the + SPECIFIED_INITIAL_INTERBED_STATE option is specified in the OPTIONS + block, user-specified preconsolidation heads in the PACKAGEDATA block + are absolute values. Otherwise, user-specified preconsolidation heads + in the PACKAGEDATA block are relative to steady-state or initial + heads. + ndelaycells : integer + * ndelaycells (integer) number of nodes used to discretize delay + interbeds. If not specified, then a default value of 19 is assigned. + compression_indices : boolean + * compression_indices (boolean) keyword to indicate that the + recompression (CR) and compression (CC) indices are specified instead + of the elastic specific storage (SSE) and inelastic specific storage + (SSV) coefficients. If not specified, then elastic specific storage + (SSE) and inelastic specific storage (SSV) coefficients must be + specified. + update_material_properties : boolean + * update_material_properties (boolean) keyword to indicate that the + thickness and void ratio of coarse-grained and interbed sediments + (delay and no-delay) will vary during the simulation. If not + specified, the thickness and void ratio of coarse-grained and + interbed sediments will not vary during the simulation. + cell_fraction : boolean + * cell_fraction (boolean) keyword to indicate that the thickness of + interbeds will be specified in terms of the fraction of cell + thickness. If not specified, interbed thicknness must be specified. + specified_initial_interbed_state : boolean + * specified_initial_interbed_state (boolean) keyword to indicate that + absolute preconsolidation stresses (heads) and delay bed heads will + be specified for interbeds defined in the PACKAGEDATA block. The + SPECIFIED_INITIAL_INTERBED_STATE option is equivalent to specifying + the SPECIFIED_INITIAL_PRECONSOLITATION_STRESS and + SPECIFIED_INITIAL_DELAY_HEAD. If SPECIFIED_INITIAL_INTERBED_STATE is + not specified then preconsolidation stress (head) and delay bed head + values specified in the PACKAGEDATA block are relative to simulated + values of the first stress period if steady-state or initial stresses + and GWF heads if the first stress period is transient. + specified_initial_preconsolidation_stress : boolean + * specified_initial_preconsolidation_stress (boolean) keyword to + indicate that absolute preconsolidation stresses (heads) will be + specified for interbeds defined in the PACKAGEDATA block. If + SPECIFIED_INITIAL_PRECONSOLITATION_STRESS and + SPECIFIED_INITIAL_INTERBED_STATE are not specified then + preconsolidation stress (head) values specified in the PACKAGEDATA + block are relative to simulated values if the first stress period is + steady-state or initial stresses (heads) if the first stress period + is transient. + specified_initial_delay_head : boolean + * specified_initial_delay_head (boolean) keyword to indicate that + absolute initial delay bed head will be specified for interbeds + defined in the PACKAGEDATA block. If SPECIFIED_INITIAL_DELAY_HEAD and + SPECIFIED_INITIAL_INTERBED_STATE are not specified then delay bed + head values specified in the PACKAGEDATA block are relative to + simulated values if the first stress period is steady-state or + initial GWF heads if the first stress period is transient. + effective_stress_lag : boolean + * effective_stress_lag (boolean) keyword to indicate the effective + stress from the previous time step will be used to calculate specific + storage values. This option can 1) help with convergence in models + with thin cells and water table elevations close to land surface; 2) + is identical to the approach used in the SUBWT package for + MODFLOW-2005; and 3) is only used if the effective-stress formulation + is being used. By default, current effective stress values are used + to calculate specific storage values. + strainib_filerecord : [interbedstrain_filename] + * interbedstrain_filename (string) name of the comma-separated-values + output file to write final interbed strain information. + straincg_filerecord : [coarsestrain_filename] + * coarsestrain_filename (string) name of the comma-separated-values + output file to write final coarse-grained material strain + information. + compaction_filerecord : [compaction_filename] + * compaction_filename (string) name of the binary output file to write + compaction information. + fileout : boolean + * fileout (boolean) keyword to specify that an output filename is + expected next. + compaction_elastic_filerecord : [elastic_compaction_filename] + * elastic_compaction_filename (string) name of the binary output file + to write elastic interbed compaction information. + compaction_inelastic_filerecord : [inelastic_compaction_filename] + * inelastic_compaction_filename (string) name of the binary output file + to write inelastic interbed compaction information. + compaction_interbed_filerecord : [interbed_compaction_filename] + * interbed_compaction_filename (string) name of the binary output file + to write interbed compaction information. + compaction_coarse_filerecord : [coarse_compaction_filename] + * coarse_compaction_filename (string) name of the binary output file to + write elastic coarse-grained material compaction information. + zdisplacement_filerecord : [zdisplacement_filename] + * zdisplacement_filename (string) name of the binary output file to + write z-displacement information. + timeseries : {varname:data} or timeseries data + * Contains data for the ts package. Data can be stored in a dictionary + containing data for the ts package with variable names as keys and + package data as values. Data just for the timeseries variable is also + acceptable. See ts package documentation for more information. + observations : {varname:data} or continuous data + * Contains data for the obs package. Data can be stored in a dictionary + containing data for the obs package with variable names as keys and + package data as values. Data just for the observations variable is + also acceptable. See obs package documentation for more information. + ninterbeds : integer + * ninterbeds (integer) is the number of CSUB interbed systems. More + than 1 CSUB interbed systems can be assigned to a GWF cell; however, + only 1 GWF cell can be assigned to a single CSUB interbed system. + maxsig0 : integer + * maxsig0 (integer) is the maximum number of cells that can have a + specified stress offset. More than 1 stress offset can be assigned to + a GWF cell. By default, MAXSIG0 is 0. + cg_ske_cr : [double] + * cg_ske_cr (double) is the initial elastic coarse-grained material + specific storage or recompression index. The recompression index is + specified if COMPRESSION_INDICES is specified in the OPTIONS block. + Specified or calculated elastic coarse-grained material specific + storage values are not adjusted from initial values if HEAD_BASED is + specified in the OPTIONS block. + cg_theta : [double] + * cg_theta (double) is the initial porosity of coarse-grained + materials. + sgm : [double] + * sgm (double) is the specific gravity of moist or unsaturated + sediments. If not specified, then a default value of 1.7 is assigned. + sgs : [double] + * sgs (double) is the specific gravity of saturated sediments. If not + specified, then a default value of 2.0 is assigned. + packagedata : [icsubno, cellid, cdelay, pcs0, thick_frac, rnb, ssv_cc, + sse_cr, theta, kv, h0, boundname] + * icsubno (integer) integer value that defines the CSUB interbed number + associated with the specified PACKAGEDATA data on the line. CSUBNO + must be greater than zero and less than or equal to NCSUBCELLS. CSUB + information must be specified for every CSUB cell or the program will + terminate with an error. The program will also terminate with an + error if information for a CSUB interbed number is specified more + than once. This argument is an index variable, which means that it + should be treated as zero-based when working with FloPy and Python. + Flopy will automatically subtract one when loading index variables + and add one when writing index variables. + * cellid ((integer, ...)) is the cell identifier, and depends on the + type of grid that is used for the simulation. For a structured grid + that uses the DIS input file, CELLID is the layer, row, and column. + For a grid that uses the DISV input file, CELLID is the layer and + CELL2D number. If the model uses the unstructured discretization + (DISU) input file, CELLID is the node number for the cell. This + argument is an index variable, which means that it should be treated + as zero-based when working with FloPy and Python. Flopy will + automatically subtract one when loading index variables and add one + when writing index variables. + * cdelay (string) character string that defines the subsidence delay + type for the interbed. Possible subsidence package CDELAY strings + include: NODELAY--character keyword to indicate that delay will not + be simulated in the interbed. DELAY--character keyword to indicate + that delay will be simulated in the interbed. + * pcs0 (double) is the initial offset from the calculated initial + effective stress or initial preconsolidation stress in the interbed, + in units of height of a column of water. PCS0 is the initial + preconsolidation stress if SPECIFIED_INITIAL_INTERBED_STATE or + SPECIFIED_INITIAL_PRECONSOLIDATION_STRESS are specified in the + OPTIONS block. If HEAD_BASED is specified in the OPTIONS block, PCS0 + is the initial offset from the calculated initial head or initial + preconsolidation head in the CSUB interbed and the initial + preconsolidation stress is calculated from the calculated initial + effective stress or calculated initial geostatic stress, + respectively. + * thick_frac (double) is the interbed thickness or cell fraction of the + interbed. Interbed thickness is specified as a fraction of the cell + thickness if CELL_FRACTION is specified in the OPTIONS block. + * rnb (double) is the interbed material factor equivalent number of + interbeds in the interbed system represented by the interbed. RNB + must be greater than or equal to 1 if CDELAY is DELAY. Otherwise, RNB + can be any value. + * ssv_cc (double) is the initial inelastic specific storage or + compression index of the interbed. The compression index is specified + if COMPRESSION_INDICES is specified in the OPTIONS block. Specified + or calculated interbed inelastic specific storage values are not + adjusted from initial values if HEAD_BASED is specified in the + OPTIONS block. + * sse_cr (double) is the initial elastic coarse-grained material + specific storage or recompression index of the interbed. The + recompression index is specified if COMPRESSION_INDICES is specified + in the OPTIONS block. Specified or calculated interbed elastic + specific storage values are not adjusted from initial values if + HEAD_BASED is specified in the OPTIONS block. + * theta (double) is the initial porosity of the interbed. + * kv (double) is the vertical hydraulic conductivity of the delay + interbed. KV must be greater than 0 if CDELAY is DELAY. Otherwise, KV + can be any value. + * h0 (double) is the initial offset from the head in cell cellid or the + initial head in the delay interbed. H0 is the initial head in the + delay bed if SPECIFIED_INITIAL_INTERBED_STATE or + SPECIFIED_INITIAL_DELAY_HEAD are specified in the OPTIONS block. H0 + can be any value if CDELAY is NODELAY. + * boundname (string) name of the CSUB cell. BOUNDNAME is an ASCII + character variable that can contain as many as 40 characters. If + BOUNDNAME contains spaces in it, then the entire name must be + enclosed within single quotes. + stress_period_data : [cellid, sig0] + * cellid ((integer, ...)) is the cell identifier, and depends on the + type of grid that is used for the simulation. For a structured grid + that uses the DIS input file, CELLID is the layer, row, and column. + For a grid that uses the DISV input file, CELLID is the layer and + CELL2D number. If the model uses the unstructured discretization + (DISU) input file, CELLID is the node number for the cell. This + argument is an index variable, which means that it should be treated + as zero-based when working with FloPy and Python. Flopy will + automatically subtract one when loading index variables and add one + when writing index variables. + * sig0 (double) is the stress offset for the cell. SIG0 is added to the + calculated geostatic stress for the cell. SIG0 is specified only if + MAXSIG0 is specified to be greater than 0 in the DIMENSIONS block. + filename : String + File name for this package. + pname : String + Package name for this package. + parent_file : MFPackage + Parent package file that references this package. Only needed for + utility packages (mfutl*). For example, mfutllaktab package must have + a mfgwflak package parent_file. + + """ + strainib_filerecord = ListTemplateGenerator(('gwf6', 'csub', + 'options', + 'strainib_filerecord')) + straincg_filerecord = ListTemplateGenerator(('gwf6', 'csub', + 'options', + 'straincg_filerecord')) + compaction_filerecord = ListTemplateGenerator(('gwf6', 'csub', + 'options', + 'compaction_filerecord')) + compaction_elastic_filerecord = ListTemplateGenerator(( + 'gwf6', 'csub', 'options', 'compaction_elastic_filerecord')) + compaction_inelastic_filerecord = ListTemplateGenerator(( + 'gwf6', 'csub', 'options', 'compaction_inelastic_filerecord')) + compaction_interbed_filerecord = ListTemplateGenerator(( + 'gwf6', 'csub', 'options', 'compaction_interbed_filerecord')) + compaction_coarse_filerecord = ListTemplateGenerator(( + 'gwf6', 'csub', 'options', 'compaction_coarse_filerecord')) + zdisplacement_filerecord = ListTemplateGenerator(( + 'gwf6', 'csub', 'options', 'zdisplacement_filerecord')) + ts_filerecord = ListTemplateGenerator(('gwf6', 'csub', 'options', + 'ts_filerecord')) + obs_filerecord = ListTemplateGenerator(('gwf6', 'csub', 'options', + 'obs_filerecord')) + cg_ske_cr = ArrayTemplateGenerator(('gwf6', 'csub', 'griddata', + 'cg_ske_cr')) + cg_theta = ArrayTemplateGenerator(('gwf6', 'csub', 'griddata', + 'cg_theta')) + sgm = ArrayTemplateGenerator(('gwf6', 'csub', 'griddata', 'sgm')) + sgs = ArrayTemplateGenerator(('gwf6', 'csub', 'griddata', 'sgs')) + packagedata = ListTemplateGenerator(('gwf6', 'csub', 'packagedata', + 'packagedata')) + stress_period_data = ListTemplateGenerator(('gwf6', 'csub', 'period', + 'stress_period_data')) + package_abbr = "gwfcsub" + _package_type = "csub" + dfn_file_name = "gwf-csub.dfn" + + dfn = [["block options", "name boundnames", "type keyword", "shape", + "reader urword", "optional true"], + ["block options", "name print_input", "type keyword", + "reader urword", "optional true"], + ["block options", "name save_flows", "type keyword", + "reader urword", "optional true"], + ["block options", "name gammaw", "type double precision", + "reader urword", "optional true", "default_value 9806.65"], + ["block options", "name beta", "type double precision", + "reader urword", "optional true", "default_value 4.6512e-10"], + ["block options", "name head_based", "type keyword", + "reader urword", "optional true"], + ["block options", "name initial_preconsolidation_head", + "type keyword", "reader urword", "optional true"], + ["block options", "name ndelaycells", "type integer", + "reader urword", "optional true"], + ["block options", "name compression_indices", "type keyword", + "reader urword", "optional true"], + ["block options", "name update_material_properties", + "type keyword", "reader urword", "optional true"], + ["block options", "name cell_fraction", "type keyword", + "reader urword", "optional true"], + ["block options", "name specified_initial_interbed_state", + "type keyword", "reader urword", "optional true"], + ["block options", + "name specified_initial_preconsolidation_stress", "type keyword", + "reader urword", "optional true"], + ["block options", "name specified_initial_delay_head", + "type keyword", "reader urword", "optional true"], + ["block options", "name effective_stress_lag", "type keyword", + "reader urword", "optional true"], + ["block options", "name strainib_filerecord", + "type record strain_csv_interbed fileout interbedstrain_filename", + "shape", "reader urword", "tagged true", "optional true"], + ["block options", "name strain_csv_interbed", "type keyword", + "shape", "in_record true", "reader urword", "tagged true", + "optional false"], + ["block options", "name fileout", "type keyword", "shape", + "in_record true", "reader urword", "tagged true", + "optional false"], + ["block options", "name interbedstrain_filename", "type string", + "shape", "in_record true", "reader urword", "tagged false", + "optional false"], + ["block options", "name straincg_filerecord", + "type record strain_csv_coarse fileout coarsestrain_filename", + "shape", "reader urword", "tagged true", "optional true"], + ["block options", "name strain_csv_coarse", "type keyword", + "shape", "in_record true", "reader urword", "tagged true", + "optional false"], + ["block options", "name coarsestrain_filename", "type string", + "shape", "in_record true", "reader urword", "tagged false", + "optional false"], + ["block options", "name compaction_filerecord", + "type record compaction fileout compaction_filename", "shape", + "reader urword", "tagged true", "optional true"], + ["block options", "name compaction", "type keyword", "shape", + "in_record true", "reader urword", "tagged true", + "optional false"], + ["block options", "name fileout", "type keyword", "shape", + "in_record true", "reader urword", "tagged true", + "optional false"], + ["block options", "name compaction_filename", "type string", + "shape", "in_record true", "reader urword", "tagged false", + "optional false"], + ["block options", "name compaction_elastic_filerecord", + "type record compaction_elastic fileout elastic_compaction_filename", + "shape", "reader urword", "tagged true", "optional true"], + ["block options", "name compaction_elastic", "type keyword", + "shape", "in_record true", "reader urword", "tagged true", + "optional false"], + ["block options", "name elastic_compaction_filename", + "type string", "shape", "in_record true", "reader urword", + "tagged false", "optional false"], + ["block options", "name compaction_inelastic_filerecord", + "type record compaction_inelastic fileout " + "inelastic_compaction_filename", + "shape", "reader urword", "tagged true", "optional true"], + ["block options", "name compaction_inelastic", "type keyword", + "shape", "in_record true", "reader urword", "tagged true", + "optional false"], + ["block options", "name inelastic_compaction_filename", + "type string", "shape", "in_record true", "reader urword", + "tagged false", "optional false"], + ["block options", "name compaction_interbed_filerecord", + "type record compaction_interbed fileout " + "interbed_compaction_filename", + "shape", "reader urword", "tagged true", "optional true"], + ["block options", "name compaction_interbed", "type keyword", + "shape", "in_record true", "reader urword", "tagged true", + "optional false"], + ["block options", "name interbed_compaction_filename", + "type string", "shape", "in_record true", "reader urword", + "tagged false", "optional false"], + ["block options", "name compaction_coarse_filerecord", + "type record compaction_coarse fileout coarse_compaction_filename", + "shape", "reader urword", "tagged true", "optional true"], + ["block options", "name compaction_coarse", "type keyword", + "shape", "in_record true", "reader urword", "tagged true", + "optional false"], + ["block options", "name coarse_compaction_filename", + "type string", "shape", "in_record true", "reader urword", + "tagged false", "optional false"], + ["block options", "name zdisplacement_filerecord", + "type record zdisplacement fileout zdisplacement_filename", + "shape", "reader urword", "tagged true", "optional true"], + ["block options", "name zdisplacement", "type keyword", "shape", + "in_record true", "reader urword", "tagged true", + "optional false"], + ["block options", "name zdisplacement_filename", "type string", + "shape", "in_record true", "reader urword", "tagged false", + "optional false"], + ["block options", "name ts_filerecord", + "type record ts6 filein ts6_filename", "shape", "reader urword", + "tagged true", "optional true", "construct_package ts", + "construct_data timeseries", "parameter_name timeseries"], + ["block options", "name ts6", "type keyword", "shape", + "in_record true", "reader urword", "tagged true", + "optional false"], + ["block options", "name filein", "type keyword", "shape", + "in_record true", "reader urword", "tagged true", + "optional false"], + ["block options", "name ts6_filename", "type string", + "in_record true", "reader urword", "optional false", + "tagged false"], + ["block options", "name obs_filerecord", + "type record obs6 filein obs6_filename", "shape", "reader urword", + "tagged true", "optional true", "construct_package obs", + "construct_data continuous", "parameter_name observations"], + ["block options", "name obs6", "type keyword", "shape", + "in_record true", "reader urword", "tagged true", + "optional false"], + ["block options", "name obs6_filename", "type string", + "in_record true", "tagged false", "reader urword", + "optional false"], + ["block dimensions", "name ninterbeds", "type integer", + "reader urword", "optional false"], + ["block dimensions", "name maxsig0", "type integer", + "reader urword", "optional true"], + ["block griddata", "name cg_ske_cr", "type double precision", + "shape (nodes)", "valid", "reader readarray", + "default_value 1e-5"], + ["block griddata", "name cg_theta", "type double precision", + "shape (nodes)", "valid", "reader readarray", "default_value 0.2"], + ["block griddata", "name sgm", "type double precision", + "shape (nodes)", "valid", "reader readarray", "optional true"], + ["block griddata", "name sgs", "type double precision", + "shape (nodes)", "valid", "reader readarray", "optional true"], + ["block packagedata", "name packagedata", + "type recarray icsubno cellid cdelay pcs0 thick_frac rnb ssv_cc " + "sse_cr theta kv h0 boundname", + "shape (ncsubcells)", "reader urword"], + ["block packagedata", "name icsubno", "type integer", "shape", + "tagged false", "in_record true", "reader urword", + "numeric_index true"], + ["block packagedata", "name cellid", "type integer", + "shape (ncelldim)", "tagged false", "in_record true", + "reader urword"], + ["block packagedata", "name cdelay", "type string", "shape", + "tagged false", "in_record true", "reader urword"], + ["block packagedata", "name pcs0", "type double precision", + "shape", "tagged false", "in_record true", "reader urword"], + ["block packagedata", "name thick_frac", "type double precision", + "shape", "tagged false", "in_record true", "reader urword"], + ["block packagedata", "name rnb", "type double precision", + "shape", "tagged false", "in_record true", "reader urword"], + ["block packagedata", "name ssv_cc", "type double precision", + "shape", "tagged false", "in_record true", "reader urword"], + ["block packagedata", "name sse_cr", "type double precision", + "shape", "tagged false", "in_record true", "reader urword"], + ["block packagedata", "name theta", "type double precision", + "shape", "tagged false", "in_record true", "reader urword", + "default_value 0.2"], + ["block packagedata", "name kv", "type double precision", + "shape", "tagged false", "in_record true", "reader urword"], + ["block packagedata", "name h0", "type double precision", + "shape", "tagged false", "in_record true", "reader urword"], + ["block packagedata", "name boundname", "type string", "shape", + "tagged false", "in_record true", "reader urword", + "optional true"], + ["block period", "name iper", "type integer", + "block_variable True", "in_record true", "tagged false", "shape", + "valid", "reader urword", "optional false"], + ["block period", "name stress_period_data", + "type recarray cellid sig0", "shape (maxsig0)", "reader urword"], + ["block period", "name cellid", "type integer", + "shape (ncelldim)", "tagged false", "in_record true", + "reader urword"], + ["block period", "name sig0", "type double precision", "shape", + "tagged false", "in_record true", "reader urword", + "time_series true"]] + + def __init__(self, model, loading_package=False, boundnames=None, + print_input=None, save_flows=None, gammaw=9806.65, + beta=4.6512e-10, head_based=None, + initial_preconsolidation_head=None, ndelaycells=None, + compression_indices=None, update_material_properties=None, + cell_fraction=None, specified_initial_interbed_state=None, + specified_initial_preconsolidation_stress=None, + specified_initial_delay_head=None, effective_stress_lag=None, + strainib_filerecord=None, straincg_filerecord=None, + compaction_filerecord=None, fileout=None, + compaction_elastic_filerecord=None, + compaction_inelastic_filerecord=None, + compaction_interbed_filerecord=None, + compaction_coarse_filerecord=None, + zdisplacement_filerecord=None, timeseries=None, + observations=None, ninterbeds=None, maxsig0=None, + cg_ske_cr=1e-5, cg_theta=0.2, sgm=None, sgs=None, + packagedata=None, stress_period_data=None, filename=None, + pname=None, parent_file=None): + super(ModflowGwfcsub, self).__init__(model, "csub", filename, pname, + loading_package, parent_file) + + # set up variables + self.boundnames = self.build_mfdata("boundnames", boundnames) + self.print_input = self.build_mfdata("print_input", print_input) + self.save_flows = self.build_mfdata("save_flows", save_flows) + self.gammaw = self.build_mfdata("gammaw", gammaw) + self.beta = self.build_mfdata("beta", beta) + self.head_based = self.build_mfdata("head_based", head_based) + self.initial_preconsolidation_head = self.build_mfdata( + "initial_preconsolidation_head", initial_preconsolidation_head) + self.ndelaycells = self.build_mfdata("ndelaycells", ndelaycells) + self.compression_indices = self.build_mfdata("compression_indices", + compression_indices) + self.update_material_properties = self.build_mfdata( + "update_material_properties", update_material_properties) + self.cell_fraction = self.build_mfdata("cell_fraction", cell_fraction) + self.specified_initial_interbed_state = self.build_mfdata( + "specified_initial_interbed_state", + specified_initial_interbed_state) + self.specified_initial_preconsolidation_stress = self.build_mfdata( + "specified_initial_preconsolidation_stress", + specified_initial_preconsolidation_stress) + self.specified_initial_delay_head = self.build_mfdata( + "specified_initial_delay_head", specified_initial_delay_head) + self.effective_stress_lag = self.build_mfdata("effective_stress_lag", + effective_stress_lag) + self.strainib_filerecord = self.build_mfdata("strainib_filerecord", + strainib_filerecord) + self.straincg_filerecord = self.build_mfdata("straincg_filerecord", + straincg_filerecord) + self.compaction_filerecord = self.build_mfdata("compaction_filerecord", + compaction_filerecord) + self.fileout = self.build_mfdata("fileout", fileout) + self.compaction_elastic_filerecord = self.build_mfdata( + "compaction_elastic_filerecord", compaction_elastic_filerecord) + self.compaction_inelastic_filerecord = self.build_mfdata( + "compaction_inelastic_filerecord", + compaction_inelastic_filerecord) + self.compaction_interbed_filerecord = self.build_mfdata( + "compaction_interbed_filerecord", compaction_interbed_filerecord) + self.compaction_coarse_filerecord = self.build_mfdata( + "compaction_coarse_filerecord", compaction_coarse_filerecord) + self.zdisplacement_filerecord = self.build_mfdata( + "zdisplacement_filerecord", zdisplacement_filerecord) + self._ts_filerecord = self.build_mfdata("ts_filerecord", + None) + self._ts_package = self.build_child_package("ts", timeseries, + "timeseries", + self._ts_filerecord) + self._obs_filerecord = self.build_mfdata("obs_filerecord", + None) + self._obs_package = self.build_child_package("obs", observations, + "continuous", + self._obs_filerecord) + self.ninterbeds = self.build_mfdata("ninterbeds", ninterbeds) + self.maxsig0 = self.build_mfdata("maxsig0", maxsig0) + self.cg_ske_cr = self.build_mfdata("cg_ske_cr", cg_ske_cr) + self.cg_theta = self.build_mfdata("cg_theta", cg_theta) + self.sgm = self.build_mfdata("sgm", sgm) + self.sgs = self.build_mfdata("sgs", sgs) + self.packagedata = self.build_mfdata("packagedata", packagedata) + self.stress_period_data = self.build_mfdata("stress_period_data", + stress_period_data) + self._init_complete = True diff --git a/flopy/mf6/modflow/mfgwfdisu.py b/flopy/mf6/modflow/mfgwfdisu.py index 597c2b8a91..5e009f57e1 100644 --- a/flopy/mf6/modflow/mfgwfdisu.py +++ b/flopy/mf6/modflow/mfgwfdisu.py @@ -1,297 +1,297 @@ -# DO NOT MODIFY THIS FILE DIRECTLY. THIS FILE MUST BE CREATED BY -# mf6/utils/createpackages.py -from .. import mfpackage -from ..data.mfdatautil import ArrayTemplateGenerator, ListTemplateGenerator - - -class ModflowGwfdisu(mfpackage.MFPackage): - """ - ModflowGwfdisu defines a disu package within a gwf6 model. - - Parameters - ---------- - model : MFModel - Model that this package is a part of. Package is automatically - added to model when it is initialized. - loading_package : bool - Do not set this parameter. It is intended for debugging and internal - processing purposes only. - length_units : string - * length_units (string) is the length units used for this model. Values - can be "FEET", "METERS", or "CENTIMETERS". If not specified, the - default is "UNKNOWN". - nogrb : boolean - * nogrb (boolean) keyword to deactivate writing of the binary grid - file. - xorigin : double - * xorigin (double) x-position of the origin used for model grid - vertices. This value should be provided in a real-world coordinate - system. A default value of zero is assigned if not specified. The - value for XORIGIN does not affect the model simulation, but it is - written to the binary grid file so that postprocessors can locate the - grid in space. - yorigin : double - * yorigin (double) y-position of the origin used for model grid - vertices. This value should be provided in a real-world coordinate - system. If not specified, then a default value equal to zero is used. - The value for YORIGIN does not affect the model simulation, but it is - written to the binary grid file so that postprocessors can locate the - grid in space. - angrot : double - * angrot (double) counter-clockwise rotation angle (in degrees) of the - model grid coordinate system relative to a real-world coordinate - system. If not specified, then a default value of 0.0 is assigned. - The value for ANGROT does not affect the model simulation, but it is - written to the binary grid file so that postprocessors can locate the - grid in space. - nodes : integer - * nodes (integer) is the number of cells in the model grid. - nja : integer - * nja (integer) is the sum of the number of connections and NODES. When - calculating the total number of connections, the connection between - cell n and cell m is considered to be different from the connection - between cell m and cell n. Thus, NJA is equal to the total number of - connections, including n to m and m to n, and the total number of - cells. - nvert : integer - * nvert (integer) is the total number of (x, y) vertex pairs used to - define the plan-view shape of each cell in the model grid. If NVERT - is not specified or is specified as zero, then the VERTICES and - CELL2D blocks below are not read. NVERT and the accompanying VERTICES - and CELL2D blocks should be specified for most simulations. If the - XT3D or SAVE_SPECIFIC_DISCHARGE options are specified in the NPF - Package, then this information is required. - top : [double] - * top (double) is the top elevation for each cell in the model grid. - bot : [double] - * bot (double) is the bottom elevation for each cell. - area : [double] - * area (double) is the cell surface area (in plan view). - idomain : [integer] - * idomain (integer) is an optional array that characterizes the - existence status of a cell. If the IDOMAIN array is not specified, - then all model cells exist within the solution. If the IDOMAIN value - for a cell is 0, the cell does not exist in the simulation. Input and - output values will be read and written for the cell, but internal to - the program, the cell is excluded from the solution. If the IDOMAIN - value for a cell is 1, the cell exists in the simulation. IDOMAIN - values of -1 cannot be specified for the DISU Package. - iac : [integer] - * iac (integer) is the number of connections (plus 1) for each cell. - The sum of all the entries in IAC must be equal to NJA. - ja : [integer] - * ja (integer) is a list of cell number (n) followed by its connecting - cell numbers (m) for each of the m cells connected to cell n. The - number of values to provide for cell n is IAC(n). This list is - sequentially provided for the first to the last cell. The first value - in the list must be cell n itself, and the remaining cells must be - listed in an increasing order (sorted from lowest number to highest). - Note that the cell and its connections are only supplied for the GWF - cells and their connections to the other GWF cells. Also note that - the JA list input may be divided such that every node and its - connectivity list can be on a separate line for ease in readability - of the file. To further ease readability of the file, the node number - of the cell whose connectivity is subsequently listed, may be - expressed as a negative number, the sign of which is subsequently - converted to positive by the code. This argument is an index - variable, which means that it should be treated as zero-based when - working with FloPy and Python. Flopy will automatically subtract one - when loading index variables and add one when writing index - variables. - ihc : [integer] - * ihc (integer) is an index array indicating the direction between node - n and all of its m connections. If IHC = 0 then cell n and cell m are - connected in the vertical direction. Cell n overlies cell m if the - cell number for n is less than m; cell m overlies cell n if the cell - number for m is less than n. If IHC = 1 then cell n and cell m are - connected in the horizontal direction. If IHC = 2 then cell n and - cell m are connected in the horizontal direction, and the connection - is vertically staggered. A vertically staggered connection is one in - which a cell is horizontally connected to more than one cell in a - horizontal connection. - cl12 : [double] - * cl12 (double) is the array containing connection lengths between the - center of cell n and the shared face with each adjacent m cell. - hwva : [double] - * hwva (double) is a symmetric array of size NJA. For horizontal - connections, entries in HWVA are the horizontal width perpendicular - to flow. For vertical connections, entries in HWVA are the vertical - area for flow. Thus, values in the HWVA array contain dimensions of - both length and area. Entries in the HWVA array have a one-to-one - correspondence with the connections specified in the JA array. - Likewise, there is a one-to-one correspondence between entries in the - HWVA array and entries in the IHC array, which specifies the - connection type (horizontal or vertical). Entries in the HWVA array - must be symmetric; the program will terminate with an error if the - value for HWVA for an n to m connection does not equal the value for - HWVA for the corresponding n to m connection. - angldegx : [double] - * angldegx (double) is the angle (in degrees) between the horizontal - x-axis and the outward normal to the face between a cell and its - connecting cells. The angle varies between zero and 360.0 degrees, - where zero degrees points in the positive x-axis direction, and 90 - degrees points in the positive y-axis direction. ANGLDEGX is only - needed if horizontal anisotropy is specified in the NPF Package, if - the XT3D option is used in the NPF Package, or if the - SAVE_SPECIFIC_DISCHARGE option is specifed in the NPF Package. - ANGLDEGX does not need to be specified if these conditions are not - met. ANGLDEGX is of size NJA; values specified for vertical - connections and for the diagonal position are not used. Note that - ANGLDEGX is read in degrees, which is different from MODFLOW-USG, - which reads a similar variable (ANGLEX) in radians. - vertices : [iv, xv, yv] - * iv (integer) is the vertex number. Records in the VERTICES block must - be listed in consecutive order from 1 to NVERT. This argument is an - index variable, which means that it should be treated as zero-based - when working with FloPy and Python. Flopy will automatically subtract - one when loading index variables and add one when writing index - variables. - * xv (double) is the x-coordinate for the vertex. - * yv (double) is the y-coordinate for the vertex. - cell2d : [icell2d, xc, yc, ncvert, icvert] - * icell2d (integer) is the cell2d number. Records in the CELL2D block - must be listed in consecutive order from 1 to NODES. This argument is - an index variable, which means that it should be treated as zero- - based when working with FloPy and Python. Flopy will automatically - subtract one when loading index variables and add one when writing - index variables. - * xc (double) is the x-coordinate for the cell center. - * yc (double) is the y-coordinate for the cell center. - * ncvert (integer) is the number of vertices required to define the - cell. There may be a different number of vertices for each cell. - * icvert (integer) is an array of integer values containing vertex - numbers (in the VERTICES block) used to define the cell. Vertices - must be listed in clockwise order. - filename : String - File name for this package. - pname : String - Package name for this package. - parent_file : MFPackage - Parent package file that references this package. Only needed for - utility packages (mfutl*). For example, mfutllaktab package must have - a mfgwflak package parent_file. - - """ - top = ArrayTemplateGenerator(('gwf6', 'disu', 'griddata', 'top')) - bot = ArrayTemplateGenerator(('gwf6', 'disu', 'griddata', 'bot')) - area = ArrayTemplateGenerator(('gwf6', 'disu', 'griddata', 'area')) - idomain = ArrayTemplateGenerator(('gwf6', 'disu', 'griddata', - 'idomain')) - iac = ArrayTemplateGenerator(('gwf6', 'disu', 'connectiondata', - 'iac')) - ja = ArrayTemplateGenerator(('gwf6', 'disu', 'connectiondata', - 'ja')) - ihc = ArrayTemplateGenerator(('gwf6', 'disu', 'connectiondata', - 'ihc')) - cl12 = ArrayTemplateGenerator(('gwf6', 'disu', 'connectiondata', - 'cl12')) - hwva = ArrayTemplateGenerator(('gwf6', 'disu', 'connectiondata', - 'hwva')) - angldegx = ArrayTemplateGenerator(('gwf6', 'disu', 'connectiondata', - 'angldegx')) - vertices = ListTemplateGenerator(('gwf6', 'disu', 'vertices', - 'vertices')) - cell2d = ListTemplateGenerator(('gwf6', 'disu', 'cell2d', 'cell2d')) - package_abbr = "gwfdisu" - _package_type = "disu" - dfn_file_name = "gwf-disu.dfn" - - dfn = [["block options", "name length_units", "type string", - "reader urword", "optional true"], - ["block options", "name nogrb", "type keyword", "reader urword", - "optional true"], - ["block options", "name xorigin", "type double precision", - "reader urword", "optional true"], - ["block options", "name yorigin", "type double precision", - "reader urword", "optional true"], - ["block options", "name angrot", "type double precision", - "reader urword", "optional true"], - ["block dimensions", "name nodes", "type integer", - "reader urword", "optional false"], - ["block dimensions", "name nja", "type integer", "reader urword", - "optional false"], - ["block dimensions", "name nvert", "type integer", - "reader urword", "optional true"], - ["block griddata", "name top", "type double precision", - "shape (nodes)", "reader readarray"], - ["block griddata", "name bot", "type double precision", - "shape (nodes)", "reader readarray"], - ["block griddata", "name area", "type double precision", - "shape (nodes)", "reader readarray"], - ["block griddata", "name idomain", "type integer", - "shape (nodes)", "reader readarray", "layered true", - "optional true"], - ["block connectiondata", "name iac", "type integer", - "shape (nodes)", "reader readarray"], - ["block connectiondata", "name ja", "type integer", - "shape (nja)", "reader readarray", "numeric_index true", - "jagged_array iac"], - ["block connectiondata", "name ihc", "type integer", - "shape (nja)", "reader readarray", "jagged_array iac"], - ["block connectiondata", "name cl12", "type double precision", - "shape (nja)", "reader readarray", "jagged_array iac"], - ["block connectiondata", "name hwva", "type double precision", - "shape (nja)", "reader readarray", "jagged_array iac"], - ["block connectiondata", "name angldegx", - "type double precision", "optional true", "shape (nja)", - "reader readarray", "jagged_array iac"], - ["block vertices", "name vertices", "type recarray iv xv yv", - "reader urword", "optional false"], - ["block vertices", "name iv", "type integer", "in_record true", - "tagged false", "reader urword", "optional false", - "numeric_index true"], - ["block vertices", "name xv", "type double precision", - "in_record true", "tagged false", "reader urword", - "optional false"], - ["block vertices", "name yv", "type double precision", - "in_record true", "tagged false", "reader urword", - "optional false"], - ["block cell2d", "name cell2d", - "type recarray icell2d xc yc ncvert icvert", "reader urword", - "optional false"], - ["block cell2d", "name icell2d", "type integer", - "in_record true", "tagged false", "reader urword", - "optional false", "numeric_index true"], - ["block cell2d", "name xc", "type double precision", - "in_record true", "tagged false", "reader urword", - "optional false"], - ["block cell2d", "name yc", "type double precision", - "in_record true", "tagged false", "reader urword", - "optional false"], - ["block cell2d", "name ncvert", "type integer", "in_record true", - "tagged false", "reader urword", "optional false"], - ["block cell2d", "name icvert", "type integer", "shape (ncvert)", - "in_record true", "tagged false", "reader urword", - "optional false"]] - - def __init__(self, model, loading_package=False, length_units=None, - nogrb=None, xorigin=None, yorigin=None, angrot=None, - nodes=None, nja=None, nvert=None, top=None, bot=None, - area=None, idomain=None, iac=None, ja=None, ihc=None, - cl12=None, hwva=None, angldegx=None, vertices=None, - cell2d=None, filename=None, pname=None, parent_file=None): - super(ModflowGwfdisu, self).__init__(model, "disu", filename, pname, - loading_package, parent_file) - - # set up variables - self.length_units = self.build_mfdata("length_units", length_units) - self.nogrb = self.build_mfdata("nogrb", nogrb) - self.xorigin = self.build_mfdata("xorigin", xorigin) - self.yorigin = self.build_mfdata("yorigin", yorigin) - self.angrot = self.build_mfdata("angrot", angrot) - self.nodes = self.build_mfdata("nodes", nodes) - self.nja = self.build_mfdata("nja", nja) - self.nvert = self.build_mfdata("nvert", nvert) - self.top = self.build_mfdata("top", top) - self.bot = self.build_mfdata("bot", bot) - self.area = self.build_mfdata("area", area) - self.idomain = self.build_mfdata("idomain", idomain) - self.iac = self.build_mfdata("iac", iac) - self.ja = self.build_mfdata("ja", ja) - self.ihc = self.build_mfdata("ihc", ihc) - self.cl12 = self.build_mfdata("cl12", cl12) - self.hwva = self.build_mfdata("hwva", hwva) - self.angldegx = self.build_mfdata("angldegx", angldegx) - self.vertices = self.build_mfdata("vertices", vertices) - self.cell2d = self.build_mfdata("cell2d", cell2d) - self._init_complete = True +# DO NOT MODIFY THIS FILE DIRECTLY. THIS FILE MUST BE CREATED BY +# mf6/utils/createpackages.py +from .. import mfpackage +from ..data.mfdatautil import ArrayTemplateGenerator, ListTemplateGenerator + + +class ModflowGwfdisu(mfpackage.MFPackage): + """ + ModflowGwfdisu defines a disu package within a gwf6 model. + + Parameters + ---------- + model : MFModel + Model that this package is a part of. Package is automatically + added to model when it is initialized. + loading_package : bool + Do not set this parameter. It is intended for debugging and internal + processing purposes only. + length_units : string + * length_units (string) is the length units used for this model. Values + can be "FEET", "METERS", or "CENTIMETERS". If not specified, the + default is "UNKNOWN". + nogrb : boolean + * nogrb (boolean) keyword to deactivate writing of the binary grid + file. + xorigin : double + * xorigin (double) x-position of the origin used for model grid + vertices. This value should be provided in a real-world coordinate + system. A default value of zero is assigned if not specified. The + value for XORIGIN does not affect the model simulation, but it is + written to the binary grid file so that postprocessors can locate the + grid in space. + yorigin : double + * yorigin (double) y-position of the origin used for model grid + vertices. This value should be provided in a real-world coordinate + system. If not specified, then a default value equal to zero is used. + The value for YORIGIN does not affect the model simulation, but it is + written to the binary grid file so that postprocessors can locate the + grid in space. + angrot : double + * angrot (double) counter-clockwise rotation angle (in degrees) of the + model grid coordinate system relative to a real-world coordinate + system. If not specified, then a default value of 0.0 is assigned. + The value for ANGROT does not affect the model simulation, but it is + written to the binary grid file so that postprocessors can locate the + grid in space. + nodes : integer + * nodes (integer) is the number of cells in the model grid. + nja : integer + * nja (integer) is the sum of the number of connections and NODES. When + calculating the total number of connections, the connection between + cell n and cell m is considered to be different from the connection + between cell m and cell n. Thus, NJA is equal to the total number of + connections, including n to m and m to n, and the total number of + cells. + nvert : integer + * nvert (integer) is the total number of (x, y) vertex pairs used to + define the plan-view shape of each cell in the model grid. If NVERT + is not specified or is specified as zero, then the VERTICES and + CELL2D blocks below are not read. NVERT and the accompanying VERTICES + and CELL2D blocks should be specified for most simulations. If the + XT3D or SAVE_SPECIFIC_DISCHARGE options are specified in the NPF + Package, then this information is required. + top : [double] + * top (double) is the top elevation for each cell in the model grid. + bot : [double] + * bot (double) is the bottom elevation for each cell. + area : [double] + * area (double) is the cell surface area (in plan view). + idomain : [integer] + * idomain (integer) is an optional array that characterizes the + existence status of a cell. If the IDOMAIN array is not specified, + then all model cells exist within the solution. If the IDOMAIN value + for a cell is 0, the cell does not exist in the simulation. Input and + output values will be read and written for the cell, but internal to + the program, the cell is excluded from the solution. If the IDOMAIN + value for a cell is 1, the cell exists in the simulation. IDOMAIN + values of -1 cannot be specified for the DISU Package. + iac : [integer] + * iac (integer) is the number of connections (plus 1) for each cell. + The sum of all the entries in IAC must be equal to NJA. + ja : [integer] + * ja (integer) is a list of cell number (n) followed by its connecting + cell numbers (m) for each of the m cells connected to cell n. The + number of values to provide for cell n is IAC(n). This list is + sequentially provided for the first to the last cell. The first value + in the list must be cell n itself, and the remaining cells must be + listed in an increasing order (sorted from lowest number to highest). + Note that the cell and its connections are only supplied for the GWF + cells and their connections to the other GWF cells. Also note that + the JA list input may be divided such that every node and its + connectivity list can be on a separate line for ease in readability + of the file. To further ease readability of the file, the node number + of the cell whose connectivity is subsequently listed, may be + expressed as a negative number, the sign of which is subsequently + converted to positive by the code. This argument is an index + variable, which means that it should be treated as zero-based when + working with FloPy and Python. Flopy will automatically subtract one + when loading index variables and add one when writing index + variables. + ihc : [integer] + * ihc (integer) is an index array indicating the direction between node + n and all of its m connections. If IHC = 0 then cell n and cell m are + connected in the vertical direction. Cell n overlies cell m if the + cell number for n is less than m; cell m overlies cell n if the cell + number for m is less than n. If IHC = 1 then cell n and cell m are + connected in the horizontal direction. If IHC = 2 then cell n and + cell m are connected in the horizontal direction, and the connection + is vertically staggered. A vertically staggered connection is one in + which a cell is horizontally connected to more than one cell in a + horizontal connection. + cl12 : [double] + * cl12 (double) is the array containing connection lengths between the + center of cell n and the shared face with each adjacent m cell. + hwva : [double] + * hwva (double) is a symmetric array of size NJA. For horizontal + connections, entries in HWVA are the horizontal width perpendicular + to flow. For vertical connections, entries in HWVA are the vertical + area for flow. Thus, values in the HWVA array contain dimensions of + both length and area. Entries in the HWVA array have a one-to-one + correspondence with the connections specified in the JA array. + Likewise, there is a one-to-one correspondence between entries in the + HWVA array and entries in the IHC array, which specifies the + connection type (horizontal or vertical). Entries in the HWVA array + must be symmetric; the program will terminate with an error if the + value for HWVA for an n to m connection does not equal the value for + HWVA for the corresponding n to m connection. + angldegx : [double] + * angldegx (double) is the angle (in degrees) between the horizontal + x-axis and the outward normal to the face between a cell and its + connecting cells. The angle varies between zero and 360.0 degrees, + where zero degrees points in the positive x-axis direction, and 90 + degrees points in the positive y-axis direction. ANGLDEGX is only + needed if horizontal anisotropy is specified in the NPF Package, if + the XT3D option is used in the NPF Package, or if the + SAVE_SPECIFIC_DISCHARGE option is specifed in the NPF Package. + ANGLDEGX does not need to be specified if these conditions are not + met. ANGLDEGX is of size NJA; values specified for vertical + connections and for the diagonal position are not used. Note that + ANGLDEGX is read in degrees, which is different from MODFLOW-USG, + which reads a similar variable (ANGLEX) in radians. + vertices : [iv, xv, yv] + * iv (integer) is the vertex number. Records in the VERTICES block must + be listed in consecutive order from 1 to NVERT. This argument is an + index variable, which means that it should be treated as zero-based + when working with FloPy and Python. Flopy will automatically subtract + one when loading index variables and add one when writing index + variables. + * xv (double) is the x-coordinate for the vertex. + * yv (double) is the y-coordinate for the vertex. + cell2d : [icell2d, xc, yc, ncvert, icvert] + * icell2d (integer) is the cell2d number. Records in the CELL2D block + must be listed in consecutive order from 1 to NODES. This argument is + an index variable, which means that it should be treated as zero- + based when working with FloPy and Python. Flopy will automatically + subtract one when loading index variables and add one when writing + index variables. + * xc (double) is the x-coordinate for the cell center. + * yc (double) is the y-coordinate for the cell center. + * ncvert (integer) is the number of vertices required to define the + cell. There may be a different number of vertices for each cell. + * icvert (integer) is an array of integer values containing vertex + numbers (in the VERTICES block) used to define the cell. Vertices + must be listed in clockwise order. + filename : String + File name for this package. + pname : String + Package name for this package. + parent_file : MFPackage + Parent package file that references this package. Only needed for + utility packages (mfutl*). For example, mfutllaktab package must have + a mfgwflak package parent_file. + + """ + top = ArrayTemplateGenerator(('gwf6', 'disu', 'griddata', 'top')) + bot = ArrayTemplateGenerator(('gwf6', 'disu', 'griddata', 'bot')) + area = ArrayTemplateGenerator(('gwf6', 'disu', 'griddata', 'area')) + idomain = ArrayTemplateGenerator(('gwf6', 'disu', 'griddata', + 'idomain')) + iac = ArrayTemplateGenerator(('gwf6', 'disu', 'connectiondata', + 'iac')) + ja = ArrayTemplateGenerator(('gwf6', 'disu', 'connectiondata', + 'ja')) + ihc = ArrayTemplateGenerator(('gwf6', 'disu', 'connectiondata', + 'ihc')) + cl12 = ArrayTemplateGenerator(('gwf6', 'disu', 'connectiondata', + 'cl12')) + hwva = ArrayTemplateGenerator(('gwf6', 'disu', 'connectiondata', + 'hwva')) + angldegx = ArrayTemplateGenerator(('gwf6', 'disu', 'connectiondata', + 'angldegx')) + vertices = ListTemplateGenerator(('gwf6', 'disu', 'vertices', + 'vertices')) + cell2d = ListTemplateGenerator(('gwf6', 'disu', 'cell2d', 'cell2d')) + package_abbr = "gwfdisu" + _package_type = "disu" + dfn_file_name = "gwf-disu.dfn" + + dfn = [["block options", "name length_units", "type string", + "reader urword", "optional true"], + ["block options", "name nogrb", "type keyword", "reader urword", + "optional true"], + ["block options", "name xorigin", "type double precision", + "reader urword", "optional true"], + ["block options", "name yorigin", "type double precision", + "reader urword", "optional true"], + ["block options", "name angrot", "type double precision", + "reader urword", "optional true"], + ["block dimensions", "name nodes", "type integer", + "reader urword", "optional false"], + ["block dimensions", "name nja", "type integer", "reader urword", + "optional false"], + ["block dimensions", "name nvert", "type integer", + "reader urword", "optional true"], + ["block griddata", "name top", "type double precision", + "shape (nodes)", "reader readarray"], + ["block griddata", "name bot", "type double precision", + "shape (nodes)", "reader readarray"], + ["block griddata", "name area", "type double precision", + "shape (nodes)", "reader readarray"], + ["block griddata", "name idomain", "type integer", + "shape (nodes)", "reader readarray", "layered true", + "optional true"], + ["block connectiondata", "name iac", "type integer", + "shape (nodes)", "reader readarray"], + ["block connectiondata", "name ja", "type integer", + "shape (nja)", "reader readarray", "numeric_index true", + "jagged_array iac"], + ["block connectiondata", "name ihc", "type integer", + "shape (nja)", "reader readarray", "jagged_array iac"], + ["block connectiondata", "name cl12", "type double precision", + "shape (nja)", "reader readarray", "jagged_array iac"], + ["block connectiondata", "name hwva", "type double precision", + "shape (nja)", "reader readarray", "jagged_array iac"], + ["block connectiondata", "name angldegx", + "type double precision", "optional true", "shape (nja)", + "reader readarray", "jagged_array iac"], + ["block vertices", "name vertices", "type recarray iv xv yv", + "reader urword", "optional false"], + ["block vertices", "name iv", "type integer", "in_record true", + "tagged false", "reader urword", "optional false", + "numeric_index true"], + ["block vertices", "name xv", "type double precision", + "in_record true", "tagged false", "reader urword", + "optional false"], + ["block vertices", "name yv", "type double precision", + "in_record true", "tagged false", "reader urword", + "optional false"], + ["block cell2d", "name cell2d", + "type recarray icell2d xc yc ncvert icvert", "reader urword", + "optional false"], + ["block cell2d", "name icell2d", "type integer", + "in_record true", "tagged false", "reader urword", + "optional false", "numeric_index true"], + ["block cell2d", "name xc", "type double precision", + "in_record true", "tagged false", "reader urword", + "optional false"], + ["block cell2d", "name yc", "type double precision", + "in_record true", "tagged false", "reader urword", + "optional false"], + ["block cell2d", "name ncvert", "type integer", "in_record true", + "tagged false", "reader urword", "optional false"], + ["block cell2d", "name icvert", "type integer", "shape (ncvert)", + "in_record true", "tagged false", "reader urword", + "optional false"]] + + def __init__(self, model, loading_package=False, length_units=None, + nogrb=None, xorigin=None, yorigin=None, angrot=None, + nodes=None, nja=None, nvert=None, top=None, bot=None, + area=None, idomain=None, iac=None, ja=None, ihc=None, + cl12=None, hwva=None, angldegx=None, vertices=None, + cell2d=None, filename=None, pname=None, parent_file=None): + super(ModflowGwfdisu, self).__init__(model, "disu", filename, pname, + loading_package, parent_file) + + # set up variables + self.length_units = self.build_mfdata("length_units", length_units) + self.nogrb = self.build_mfdata("nogrb", nogrb) + self.xorigin = self.build_mfdata("xorigin", xorigin) + self.yorigin = self.build_mfdata("yorigin", yorigin) + self.angrot = self.build_mfdata("angrot", angrot) + self.nodes = self.build_mfdata("nodes", nodes) + self.nja = self.build_mfdata("nja", nja) + self.nvert = self.build_mfdata("nvert", nvert) + self.top = self.build_mfdata("top", top) + self.bot = self.build_mfdata("bot", bot) + self.area = self.build_mfdata("area", area) + self.idomain = self.build_mfdata("idomain", idomain) + self.iac = self.build_mfdata("iac", iac) + self.ja = self.build_mfdata("ja", ja) + self.ihc = self.build_mfdata("ihc", ihc) + self.cl12 = self.build_mfdata("cl12", cl12) + self.hwva = self.build_mfdata("hwva", hwva) + self.angldegx = self.build_mfdata("angldegx", angldegx) + self.vertices = self.build_mfdata("vertices", vertices) + self.cell2d = self.build_mfdata("cell2d", cell2d) + self._init_complete = True diff --git a/flopy/mf6/modflow/mfgwfdisv.py b/flopy/mf6/modflow/mfgwfdisv.py index d37f0d25a8..70f44b8388 100644 --- a/flopy/mf6/modflow/mfgwfdisv.py +++ b/flopy/mf6/modflow/mfgwfdisv.py @@ -1,194 +1,194 @@ -# DO NOT MODIFY THIS FILE DIRECTLY. THIS FILE MUST BE CREATED BY -# mf6/utils/createpackages.py -from .. import mfpackage -from ..data.mfdatautil import ArrayTemplateGenerator, ListTemplateGenerator - - -class ModflowGwfdisv(mfpackage.MFPackage): - """ - ModflowGwfdisv defines a disv package within a gwf6 model. - - Parameters - ---------- - model : MFModel - Model that this package is a part of. Package is automatically - added to model when it is initialized. - loading_package : bool - Do not set this parameter. It is intended for debugging and internal - processing purposes only. - length_units : string - * length_units (string) is the length units used for this model. Values - can be "FEET", "METERS", or "CENTIMETERS". If not specified, the - default is "UNKNOWN". - nogrb : boolean - * nogrb (boolean) keyword to deactivate writing of the binary grid - file. - xorigin : double - * xorigin (double) x-position of the origin used for model grid - vertices. This value should be provided in a real-world coordinate - system. A default value of zero is assigned if not specified. The - value for XORIGIN does not affect the model simulation, but it is - written to the binary grid file so that postprocessors can locate the - grid in space. - yorigin : double - * yorigin (double) y-position of the origin used for model grid - vertices. This value should be provided in a real-world coordinate - system. If not specified, then a default value equal to zero is used. - The value for YORIGIN does not affect the model simulation, but it is - written to the binary grid file so that postprocessors can locate the - grid in space. - angrot : double - * angrot (double) counter-clockwise rotation angle (in degrees) of the - model grid coordinate system relative to a real-world coordinate - system. If not specified, then a default value of 0.0 is assigned. - The value for ANGROT does not affect the model simulation, but it is - written to the binary grid file so that postprocessors can locate the - grid in space. - nlay : integer - * nlay (integer) is the number of layers in the model grid. - ncpl : integer - * ncpl (integer) is the number of cells per layer. This is a constant - value for the grid and it applies to all layers. - nvert : integer - * nvert (integer) is the total number of (x, y) vertex pairs used to - characterize the horizontal configuration of the model grid. - top : [double] - * top (double) is the top elevation for each cell in the top model - layer. - botm : [double] - * botm (double) is the bottom elevation for each cell. - idomain : [integer] - * idomain (integer) is an optional array that characterizes the - existence status of a cell. If the IDOMAIN array is not specified, - then all model cells exist within the solution. If the IDOMAIN value - for a cell is 0, the cell does not exist in the simulation. Input and - output values will be read and written for the cell, but internal to - the program, the cell is excluded from the solution. If the IDOMAIN - value for a cell is 1, the cell exists in the simulation. If the - IDOMAIN value for a cell is -1, the cell does not exist in the - simulation. Furthermore, the first existing cell above will be - connected to the first existing cell below. This type of cell is - referred to as a "vertical pass through" cell. - vertices : [iv, xv, yv] - * iv (integer) is the vertex number. Records in the VERTICES block must - be listed in consecutive order from 1 to NVERT. This argument is an - index variable, which means that it should be treated as zero-based - when working with FloPy and Python. Flopy will automatically subtract - one when loading index variables and add one when writing index - variables. - * xv (double) is the x-coordinate for the vertex. - * yv (double) is the y-coordinate for the vertex. - cell2d : [icell2d, xc, yc, ncvert, icvert] - * icell2d (integer) is the CELL2D number. Records in the CELL2D block - must be listed in consecutive order from the first to the last. This - argument is an index variable, which means that it should be treated - as zero-based when working with FloPy and Python. Flopy will - automatically subtract one when loading index variables and add one - when writing index variables. - * xc (double) is the x-coordinate for the cell center. - * yc (double) is the y-coordinate for the cell center. - * ncvert (integer) is the number of vertices required to define the - cell. There may be a different number of vertices for each cell. - * icvert (integer) is an array of integer values containing vertex - numbers (in the VERTICES block) used to define the cell. Vertices - must be listed in clockwise order. Cells that are connected must - share vertices. This argument is an index variable, which means that - it should be treated as zero-based when working with FloPy and - Python. Flopy will automatically subtract one when loading index - variables and add one when writing index variables. - filename : String - File name for this package. - pname : String - Package name for this package. - parent_file : MFPackage - Parent package file that references this package. Only needed for - utility packages (mfutl*). For example, mfutllaktab package must have - a mfgwflak package parent_file. - - """ - top = ArrayTemplateGenerator(('gwf6', 'disv', 'griddata', 'top')) - botm = ArrayTemplateGenerator(('gwf6', 'disv', 'griddata', 'botm')) - idomain = ArrayTemplateGenerator(('gwf6', 'disv', 'griddata', - 'idomain')) - vertices = ListTemplateGenerator(('gwf6', 'disv', 'vertices', - 'vertices')) - cell2d = ListTemplateGenerator(('gwf6', 'disv', 'cell2d', 'cell2d')) - package_abbr = "gwfdisv" - _package_type = "disv" - dfn_file_name = "gwf-disv.dfn" - - dfn = [["block options", "name length_units", "type string", - "reader urword", "optional true"], - ["block options", "name nogrb", "type keyword", "reader urword", - "optional true"], - ["block options", "name xorigin", "type double precision", - "reader urword", "optional true"], - ["block options", "name yorigin", "type double precision", - "reader urword", "optional true"], - ["block options", "name angrot", "type double precision", - "reader urword", "optional true"], - ["block dimensions", "name nlay", "type integer", - "reader urword", "optional false"], - ["block dimensions", "name ncpl", "type integer", - "reader urword", "optional false"], - ["block dimensions", "name nvert", "type integer", - "reader urword", "optional false"], - ["block griddata", "name top", "type double precision", - "shape (ncpl)", "reader readarray"], - ["block griddata", "name botm", "type double precision", - "shape (nlay, ncpl)", "reader readarray", "layered true"], - ["block griddata", "name idomain", "type integer", - "shape (nlay, ncpl)", "reader readarray", "layered true", - "optional true"], - ["block vertices", "name vertices", "type recarray iv xv yv", - "reader urword", "optional false"], - ["block vertices", "name iv", "type integer", "in_record true", - "tagged false", "reader urword", "optional false", - "numeric_index true"], - ["block vertices", "name xv", "type double precision", - "in_record true", "tagged false", "reader urword", - "optional false"], - ["block vertices", "name yv", "type double precision", - "in_record true", "tagged false", "reader urword", - "optional false"], - ["block cell2d", "name cell2d", - "type recarray icell2d xc yc ncvert icvert", "reader urword", - "optional false"], - ["block cell2d", "name icell2d", "type integer", - "in_record true", "tagged false", "reader urword", - "optional false", "numeric_index true"], - ["block cell2d", "name xc", "type double precision", - "in_record true", "tagged false", "reader urword", - "optional false"], - ["block cell2d", "name yc", "type double precision", - "in_record true", "tagged false", "reader urword", - "optional false"], - ["block cell2d", "name ncvert", "type integer", "in_record true", - "tagged false", "reader urword", "optional false"], - ["block cell2d", "name icvert", "type integer", "shape (ncvert)", - "in_record true", "tagged false", "reader urword", - "optional false", "numeric_index true"]] - - def __init__(self, model, loading_package=False, length_units=None, - nogrb=None, xorigin=None, yorigin=None, angrot=None, - nlay=None, ncpl=None, nvert=None, top=None, botm=None, - idomain=None, vertices=None, cell2d=None, filename=None, - pname=None, parent_file=None): - super(ModflowGwfdisv, self).__init__(model, "disv", filename, pname, - loading_package, parent_file) - - # set up variables - self.length_units = self.build_mfdata("length_units", length_units) - self.nogrb = self.build_mfdata("nogrb", nogrb) - self.xorigin = self.build_mfdata("xorigin", xorigin) - self.yorigin = self.build_mfdata("yorigin", yorigin) - self.angrot = self.build_mfdata("angrot", angrot) - self.nlay = self.build_mfdata("nlay", nlay) - self.ncpl = self.build_mfdata("ncpl", ncpl) - self.nvert = self.build_mfdata("nvert", nvert) - self.top = self.build_mfdata("top", top) - self.botm = self.build_mfdata("botm", botm) - self.idomain = self.build_mfdata("idomain", idomain) - self.vertices = self.build_mfdata("vertices", vertices) - self.cell2d = self.build_mfdata("cell2d", cell2d) - self._init_complete = True +# DO NOT MODIFY THIS FILE DIRECTLY. THIS FILE MUST BE CREATED BY +# mf6/utils/createpackages.py +from .. import mfpackage +from ..data.mfdatautil import ArrayTemplateGenerator, ListTemplateGenerator + + +class ModflowGwfdisv(mfpackage.MFPackage): + """ + ModflowGwfdisv defines a disv package within a gwf6 model. + + Parameters + ---------- + model : MFModel + Model that this package is a part of. Package is automatically + added to model when it is initialized. + loading_package : bool + Do not set this parameter. It is intended for debugging and internal + processing purposes only. + length_units : string + * length_units (string) is the length units used for this model. Values + can be "FEET", "METERS", or "CENTIMETERS". If not specified, the + default is "UNKNOWN". + nogrb : boolean + * nogrb (boolean) keyword to deactivate writing of the binary grid + file. + xorigin : double + * xorigin (double) x-position of the origin used for model grid + vertices. This value should be provided in a real-world coordinate + system. A default value of zero is assigned if not specified. The + value for XORIGIN does not affect the model simulation, but it is + written to the binary grid file so that postprocessors can locate the + grid in space. + yorigin : double + * yorigin (double) y-position of the origin used for model grid + vertices. This value should be provided in a real-world coordinate + system. If not specified, then a default value equal to zero is used. + The value for YORIGIN does not affect the model simulation, but it is + written to the binary grid file so that postprocessors can locate the + grid in space. + angrot : double + * angrot (double) counter-clockwise rotation angle (in degrees) of the + model grid coordinate system relative to a real-world coordinate + system. If not specified, then a default value of 0.0 is assigned. + The value for ANGROT does not affect the model simulation, but it is + written to the binary grid file so that postprocessors can locate the + grid in space. + nlay : integer + * nlay (integer) is the number of layers in the model grid. + ncpl : integer + * ncpl (integer) is the number of cells per layer. This is a constant + value for the grid and it applies to all layers. + nvert : integer + * nvert (integer) is the total number of (x, y) vertex pairs used to + characterize the horizontal configuration of the model grid. + top : [double] + * top (double) is the top elevation for each cell in the top model + layer. + botm : [double] + * botm (double) is the bottom elevation for each cell. + idomain : [integer] + * idomain (integer) is an optional array that characterizes the + existence status of a cell. If the IDOMAIN array is not specified, + then all model cells exist within the solution. If the IDOMAIN value + for a cell is 0, the cell does not exist in the simulation. Input and + output values will be read and written for the cell, but internal to + the program, the cell is excluded from the solution. If the IDOMAIN + value for a cell is 1, the cell exists in the simulation. If the + IDOMAIN value for a cell is -1, the cell does not exist in the + simulation. Furthermore, the first existing cell above will be + connected to the first existing cell below. This type of cell is + referred to as a "vertical pass through" cell. + vertices : [iv, xv, yv] + * iv (integer) is the vertex number. Records in the VERTICES block must + be listed in consecutive order from 1 to NVERT. This argument is an + index variable, which means that it should be treated as zero-based + when working with FloPy and Python. Flopy will automatically subtract + one when loading index variables and add one when writing index + variables. + * xv (double) is the x-coordinate for the vertex. + * yv (double) is the y-coordinate for the vertex. + cell2d : [icell2d, xc, yc, ncvert, icvert] + * icell2d (integer) is the CELL2D number. Records in the CELL2D block + must be listed in consecutive order from the first to the last. This + argument is an index variable, which means that it should be treated + as zero-based when working with FloPy and Python. Flopy will + automatically subtract one when loading index variables and add one + when writing index variables. + * xc (double) is the x-coordinate for the cell center. + * yc (double) is the y-coordinate for the cell center. + * ncvert (integer) is the number of vertices required to define the + cell. There may be a different number of vertices for each cell. + * icvert (integer) is an array of integer values containing vertex + numbers (in the VERTICES block) used to define the cell. Vertices + must be listed in clockwise order. Cells that are connected must + share vertices. This argument is an index variable, which means that + it should be treated as zero-based when working with FloPy and + Python. Flopy will automatically subtract one when loading index + variables and add one when writing index variables. + filename : String + File name for this package. + pname : String + Package name for this package. + parent_file : MFPackage + Parent package file that references this package. Only needed for + utility packages (mfutl*). For example, mfutllaktab package must have + a mfgwflak package parent_file. + + """ + top = ArrayTemplateGenerator(('gwf6', 'disv', 'griddata', 'top')) + botm = ArrayTemplateGenerator(('gwf6', 'disv', 'griddata', 'botm')) + idomain = ArrayTemplateGenerator(('gwf6', 'disv', 'griddata', + 'idomain')) + vertices = ListTemplateGenerator(('gwf6', 'disv', 'vertices', + 'vertices')) + cell2d = ListTemplateGenerator(('gwf6', 'disv', 'cell2d', 'cell2d')) + package_abbr = "gwfdisv" + _package_type = "disv" + dfn_file_name = "gwf-disv.dfn" + + dfn = [["block options", "name length_units", "type string", + "reader urword", "optional true"], + ["block options", "name nogrb", "type keyword", "reader urword", + "optional true"], + ["block options", "name xorigin", "type double precision", + "reader urword", "optional true"], + ["block options", "name yorigin", "type double precision", + "reader urword", "optional true"], + ["block options", "name angrot", "type double precision", + "reader urword", "optional true"], + ["block dimensions", "name nlay", "type integer", + "reader urword", "optional false"], + ["block dimensions", "name ncpl", "type integer", + "reader urword", "optional false"], + ["block dimensions", "name nvert", "type integer", + "reader urword", "optional false"], + ["block griddata", "name top", "type double precision", + "shape (ncpl)", "reader readarray"], + ["block griddata", "name botm", "type double precision", + "shape (nlay, ncpl)", "reader readarray", "layered true"], + ["block griddata", "name idomain", "type integer", + "shape (nlay, ncpl)", "reader readarray", "layered true", + "optional true"], + ["block vertices", "name vertices", "type recarray iv xv yv", + "reader urword", "optional false"], + ["block vertices", "name iv", "type integer", "in_record true", + "tagged false", "reader urword", "optional false", + "numeric_index true"], + ["block vertices", "name xv", "type double precision", + "in_record true", "tagged false", "reader urword", + "optional false"], + ["block vertices", "name yv", "type double precision", + "in_record true", "tagged false", "reader urword", + "optional false"], + ["block cell2d", "name cell2d", + "type recarray icell2d xc yc ncvert icvert", "reader urword", + "optional false"], + ["block cell2d", "name icell2d", "type integer", + "in_record true", "tagged false", "reader urword", + "optional false", "numeric_index true"], + ["block cell2d", "name xc", "type double precision", + "in_record true", "tagged false", "reader urword", + "optional false"], + ["block cell2d", "name yc", "type double precision", + "in_record true", "tagged false", "reader urword", + "optional false"], + ["block cell2d", "name ncvert", "type integer", "in_record true", + "tagged false", "reader urword", "optional false"], + ["block cell2d", "name icvert", "type integer", "shape (ncvert)", + "in_record true", "tagged false", "reader urword", + "optional false", "numeric_index true"]] + + def __init__(self, model, loading_package=False, length_units=None, + nogrb=None, xorigin=None, yorigin=None, angrot=None, + nlay=None, ncpl=None, nvert=None, top=None, botm=None, + idomain=None, vertices=None, cell2d=None, filename=None, + pname=None, parent_file=None): + super(ModflowGwfdisv, self).__init__(model, "disv", filename, pname, + loading_package, parent_file) + + # set up variables + self.length_units = self.build_mfdata("length_units", length_units) + self.nogrb = self.build_mfdata("nogrb", nogrb) + self.xorigin = self.build_mfdata("xorigin", xorigin) + self.yorigin = self.build_mfdata("yorigin", yorigin) + self.angrot = self.build_mfdata("angrot", angrot) + self.nlay = self.build_mfdata("nlay", nlay) + self.ncpl = self.build_mfdata("ncpl", ncpl) + self.nvert = self.build_mfdata("nvert", nvert) + self.top = self.build_mfdata("top", top) + self.botm = self.build_mfdata("botm", botm) + self.idomain = self.build_mfdata("idomain", idomain) + self.vertices = self.build_mfdata("vertices", vertices) + self.cell2d = self.build_mfdata("cell2d", cell2d) + self._init_complete = True diff --git a/flopy/mf6/modflow/mfgwfdrn.py b/flopy/mf6/modflow/mfgwfdrn.py index 079d3db167..8b9c899dd6 100644 --- a/flopy/mf6/modflow/mfgwfdrn.py +++ b/flopy/mf6/modflow/mfgwfdrn.py @@ -1,215 +1,215 @@ -# DO NOT MODIFY THIS FILE DIRECTLY. THIS FILE MUST BE CREATED BY -# mf6/utils/createpackages.py -from .. import mfpackage -from ..data.mfdatautil import ListTemplateGenerator - - -class ModflowGwfdrn(mfpackage.MFPackage): - """ - ModflowGwfdrn defines a drn package within a gwf6 model. - - Parameters - ---------- - model : MFModel - Model that this package is a part of. Package is automatically - added to model when it is initialized. - loading_package : bool - Do not set this parameter. It is intended for debugging and internal - processing purposes only. - auxiliary : [string] - * auxiliary (string) defines an array of one or more auxiliary variable - names. There is no limit on the number of auxiliary variables that - can be provided on this line; however, lists of information provided - in subsequent blocks must have a column of data for each auxiliary - variable name defined here. The number of auxiliary variables - detected on this line determines the value for naux. Comments cannot - be provided anywhere on this line as they will be interpreted as - auxiliary variable names. Auxiliary variables may not be used by the - package, but they will be available for use by other parts of the - program. The program will terminate with an error if auxiliary - variables are specified on more than one line in the options block. - auxmultname : string - * auxmultname (string) name of auxiliary variable to be used as - multiplier of drain conductance. - boundnames : boolean - * boundnames (boolean) keyword to indicate that boundary names may be - provided with the list of drain cells. - print_input : boolean - * print_input (boolean) keyword to indicate that the list of drain - information will be written to the listing file immediately after it - is read. - print_flows : boolean - * print_flows (boolean) keyword to indicate that the list of drain flow - rates will be printed to the listing file for every stress period - time step in which "BUDGET PRINT" is specified in Output Control. If - there is no Output Control option and "PRINT_FLOWS" is specified, - then flow rates are printed for the last time step of each stress - period. - save_flows : boolean - * save_flows (boolean) keyword to indicate that drain flow terms will - be written to the file specified with "BUDGET FILEOUT" in Output - Control. - timeseries : {varname:data} or timeseries data - * Contains data for the ts package. Data can be stored in a dictionary - containing data for the ts package with variable names as keys and - package data as values. Data just for the timeseries variable is also - acceptable. See ts package documentation for more information. - observations : {varname:data} or continuous data - * Contains data for the obs package. Data can be stored in a dictionary - containing data for the obs package with variable names as keys and - package data as values. Data just for the observations variable is - also acceptable. See obs package documentation for more information. - mover : boolean - * mover (boolean) keyword to indicate that this instance of the Drain - Package can be used with the Water Mover (MVR) Package. When the - MOVER option is specified, additional memory is allocated within the - package to store the available, provided, and received water. - maxbound : integer - * maxbound (integer) integer value specifying the maximum number of - drains cells that will be specified for use during any stress period. - stress_period_data : [cellid, elev, cond, aux, boundname] - * cellid ((integer, ...)) is the cell identifier, and depends on the - type of grid that is used for the simulation. For a structured grid - that uses the DIS input file, CELLID is the layer, row, and column. - For a grid that uses the DISV input file, CELLID is the layer and - CELL2D number. If the model uses the unstructured discretization - (DISU) input file, CELLID is the node number for the cell. This - argument is an index variable, which means that it should be treated - as zero-based when working with FloPy and Python. Flopy will - automatically subtract one when loading index variables and add one - when writing index variables. - * elev (double) is the elevation of the drain. If the Options block - includes a TIMESERIESFILE entry (see the "Time-Variable Input" - section), values can be obtained from a time series by entering the - time-series name in place of a numeric value. - * cond (double) is the hydraulic conductance of the interface between - the aquifer and the drain. If the Options block includes a - TIMESERIESFILE entry (see the "Time-Variable Input" section), values - can be obtained from a time series by entering the time-series name - in place of a numeric value. - * aux (double) represents the values of the auxiliary variables for - each drain. The values of auxiliary variables must be present for - each drain. The values must be specified in the order of the - auxiliary variables specified in the OPTIONS block. If the package - supports time series and the Options block includes a TIMESERIESFILE - entry (see the "Time-Variable Input" section), values can be obtained - from a time series by entering the time-series name in place of a - numeric value. - * boundname (string) name of the drain cell. BOUNDNAME is an ASCII - character variable that can contain as many as 40 characters. If - BOUNDNAME contains spaces in it, then the entire name must be - enclosed within single quotes. - filename : String - File name for this package. - pname : String - Package name for this package. - parent_file : MFPackage - Parent package file that references this package. Only needed for - utility packages (mfutl*). For example, mfutllaktab package must have - a mfgwflak package parent_file. - - """ - auxiliary = ListTemplateGenerator(('gwf6', 'drn', 'options', - 'auxiliary')) - ts_filerecord = ListTemplateGenerator(('gwf6', 'drn', 'options', - 'ts_filerecord')) - obs_filerecord = ListTemplateGenerator(('gwf6', 'drn', 'options', - 'obs_filerecord')) - stress_period_data = ListTemplateGenerator(('gwf6', 'drn', 'period', - 'stress_period_data')) - package_abbr = "gwfdrn" - _package_type = "drn" - dfn_file_name = "gwf-drn.dfn" - - dfn = [["block options", "name auxiliary", "type string", - "shape (naux)", "reader urword", "optional true"], - ["block options", "name auxmultname", "type string", "shape", - "reader urword", "optional true"], - ["block options", "name boundnames", "type keyword", "shape", - "reader urword", "optional true"], - ["block options", "name print_input", "type keyword", - "reader urword", "optional true"], - ["block options", "name print_flows", "type keyword", - "reader urword", "optional true"], - ["block options", "name save_flows", "type keyword", - "reader urword", "optional true"], - ["block options", "name ts_filerecord", - "type record ts6 filein ts6_filename", "shape", "reader urword", - "tagged true", "optional true", "construct_package ts", - "construct_data timeseries", "parameter_name timeseries"], - ["block options", "name ts6", "type keyword", "shape", - "in_record true", "reader urword", "tagged true", - "optional false"], - ["block options", "name filein", "type keyword", "shape", - "in_record true", "reader urword", "tagged true", - "optional false"], - ["block options", "name ts6_filename", "type string", - "preserve_case true", "in_record true", "reader urword", - "optional false", "tagged false"], - ["block options", "name obs_filerecord", - "type record obs6 filein obs6_filename", "shape", "reader urword", - "tagged true", "optional true", "construct_package obs", - "construct_data continuous", "parameter_name observations"], - ["block options", "name obs6", "type keyword", "shape", - "in_record true", "reader urword", "tagged true", - "optional false"], - ["block options", "name obs6_filename", "type string", - "preserve_case true", "in_record true", "tagged false", - "reader urword", "optional false"], - ["block options", "name mover", "type keyword", "tagged true", - "reader urword", "optional true"], - ["block dimensions", "name maxbound", "type integer", - "reader urword", "optional false"], - ["block period", "name iper", "type integer", - "block_variable True", "in_record true", "tagged false", "shape", - "valid", "reader urword", "optional false"], - ["block period", "name stress_period_data", - "type recarray cellid elev cond aux boundname", - "shape (maxbound)", "reader urword"], - ["block period", "name cellid", "type integer", - "shape (ncelldim)", "tagged false", "in_record true", - "reader urword"], - ["block period", "name elev", "type double precision", "shape", - "tagged false", "in_record true", "reader urword", - "time_series true"], - ["block period", "name cond", "type double precision", "shape", - "tagged false", "in_record true", "reader urword", - "time_series true"], - ["block period", "name aux", "type double precision", - "in_record true", "tagged false", "shape (naux)", "reader urword", - "optional true", "time_series true"], - ["block period", "name boundname", "type string", "shape", - "tagged false", "in_record true", "reader urword", - "optional true"]] - - def __init__(self, model, loading_package=False, auxiliary=None, - auxmultname=None, boundnames=None, print_input=None, - print_flows=None, save_flows=None, timeseries=None, - observations=None, mover=None, maxbound=None, - stress_period_data=None, filename=None, pname=None, - parent_file=None): - super(ModflowGwfdrn, self).__init__(model, "drn", filename, pname, - loading_package, parent_file) - - # set up variables - self.auxiliary = self.build_mfdata("auxiliary", auxiliary) - self.auxmultname = self.build_mfdata("auxmultname", auxmultname) - self.boundnames = self.build_mfdata("boundnames", boundnames) - self.print_input = self.build_mfdata("print_input", print_input) - self.print_flows = self.build_mfdata("print_flows", print_flows) - self.save_flows = self.build_mfdata("save_flows", save_flows) - self._ts_filerecord = self.build_mfdata("ts_filerecord", - None) - self._ts_package = self.build_child_package("ts", timeseries, - "timeseries", - self._ts_filerecord) - self._obs_filerecord = self.build_mfdata("obs_filerecord", - None) - self._obs_package = self.build_child_package("obs", observations, - "continuous", - self._obs_filerecord) - self.mover = self.build_mfdata("mover", mover) - self.maxbound = self.build_mfdata("maxbound", maxbound) - self.stress_period_data = self.build_mfdata("stress_period_data", - stress_period_data) - self._init_complete = True +# DO NOT MODIFY THIS FILE DIRECTLY. THIS FILE MUST BE CREATED BY +# mf6/utils/createpackages.py +from .. import mfpackage +from ..data.mfdatautil import ListTemplateGenerator + + +class ModflowGwfdrn(mfpackage.MFPackage): + """ + ModflowGwfdrn defines a drn package within a gwf6 model. + + Parameters + ---------- + model : MFModel + Model that this package is a part of. Package is automatically + added to model when it is initialized. + loading_package : bool + Do not set this parameter. It is intended for debugging and internal + processing purposes only. + auxiliary : [string] + * auxiliary (string) defines an array of one or more auxiliary variable + names. There is no limit on the number of auxiliary variables that + can be provided on this line; however, lists of information provided + in subsequent blocks must have a column of data for each auxiliary + variable name defined here. The number of auxiliary variables + detected on this line determines the value for naux. Comments cannot + be provided anywhere on this line as they will be interpreted as + auxiliary variable names. Auxiliary variables may not be used by the + package, but they will be available for use by other parts of the + program. The program will terminate with an error if auxiliary + variables are specified on more than one line in the options block. + auxmultname : string + * auxmultname (string) name of auxiliary variable to be used as + multiplier of drain conductance. + boundnames : boolean + * boundnames (boolean) keyword to indicate that boundary names may be + provided with the list of drain cells. + print_input : boolean + * print_input (boolean) keyword to indicate that the list of drain + information will be written to the listing file immediately after it + is read. + print_flows : boolean + * print_flows (boolean) keyword to indicate that the list of drain flow + rates will be printed to the listing file for every stress period + time step in which "BUDGET PRINT" is specified in Output Control. If + there is no Output Control option and "PRINT_FLOWS" is specified, + then flow rates are printed for the last time step of each stress + period. + save_flows : boolean + * save_flows (boolean) keyword to indicate that drain flow terms will + be written to the file specified with "BUDGET FILEOUT" in Output + Control. + timeseries : {varname:data} or timeseries data + * Contains data for the ts package. Data can be stored in a dictionary + containing data for the ts package with variable names as keys and + package data as values. Data just for the timeseries variable is also + acceptable. See ts package documentation for more information. + observations : {varname:data} or continuous data + * Contains data for the obs package. Data can be stored in a dictionary + containing data for the obs package with variable names as keys and + package data as values. Data just for the observations variable is + also acceptable. See obs package documentation for more information. + mover : boolean + * mover (boolean) keyword to indicate that this instance of the Drain + Package can be used with the Water Mover (MVR) Package. When the + MOVER option is specified, additional memory is allocated within the + package to store the available, provided, and received water. + maxbound : integer + * maxbound (integer) integer value specifying the maximum number of + drains cells that will be specified for use during any stress period. + stress_period_data : [cellid, elev, cond, aux, boundname] + * cellid ((integer, ...)) is the cell identifier, and depends on the + type of grid that is used for the simulation. For a structured grid + that uses the DIS input file, CELLID is the layer, row, and column. + For a grid that uses the DISV input file, CELLID is the layer and + CELL2D number. If the model uses the unstructured discretization + (DISU) input file, CELLID is the node number for the cell. This + argument is an index variable, which means that it should be treated + as zero-based when working with FloPy and Python. Flopy will + automatically subtract one when loading index variables and add one + when writing index variables. + * elev (double) is the elevation of the drain. If the Options block + includes a TIMESERIESFILE entry (see the "Time-Variable Input" + section), values can be obtained from a time series by entering the + time-series name in place of a numeric value. + * cond (double) is the hydraulic conductance of the interface between + the aquifer and the drain. If the Options block includes a + TIMESERIESFILE entry (see the "Time-Variable Input" section), values + can be obtained from a time series by entering the time-series name + in place of a numeric value. + * aux (double) represents the values of the auxiliary variables for + each drain. The values of auxiliary variables must be present for + each drain. The values must be specified in the order of the + auxiliary variables specified in the OPTIONS block. If the package + supports time series and the Options block includes a TIMESERIESFILE + entry (see the "Time-Variable Input" section), values can be obtained + from a time series by entering the time-series name in place of a + numeric value. + * boundname (string) name of the drain cell. BOUNDNAME is an ASCII + character variable that can contain as many as 40 characters. If + BOUNDNAME contains spaces in it, then the entire name must be + enclosed within single quotes. + filename : String + File name for this package. + pname : String + Package name for this package. + parent_file : MFPackage + Parent package file that references this package. Only needed for + utility packages (mfutl*). For example, mfutllaktab package must have + a mfgwflak package parent_file. + + """ + auxiliary = ListTemplateGenerator(('gwf6', 'drn', 'options', + 'auxiliary')) + ts_filerecord = ListTemplateGenerator(('gwf6', 'drn', 'options', + 'ts_filerecord')) + obs_filerecord = ListTemplateGenerator(('gwf6', 'drn', 'options', + 'obs_filerecord')) + stress_period_data = ListTemplateGenerator(('gwf6', 'drn', 'period', + 'stress_period_data')) + package_abbr = "gwfdrn" + _package_type = "drn" + dfn_file_name = "gwf-drn.dfn" + + dfn = [["block options", "name auxiliary", "type string", + "shape (naux)", "reader urword", "optional true"], + ["block options", "name auxmultname", "type string", "shape", + "reader urword", "optional true"], + ["block options", "name boundnames", "type keyword", "shape", + "reader urword", "optional true"], + ["block options", "name print_input", "type keyword", + "reader urword", "optional true"], + ["block options", "name print_flows", "type keyword", + "reader urword", "optional true"], + ["block options", "name save_flows", "type keyword", + "reader urword", "optional true"], + ["block options", "name ts_filerecord", + "type record ts6 filein ts6_filename", "shape", "reader urword", + "tagged true", "optional true", "construct_package ts", + "construct_data timeseries", "parameter_name timeseries"], + ["block options", "name ts6", "type keyword", "shape", + "in_record true", "reader urword", "tagged true", + "optional false"], + ["block options", "name filein", "type keyword", "shape", + "in_record true", "reader urword", "tagged true", + "optional false"], + ["block options", "name ts6_filename", "type string", + "preserve_case true", "in_record true", "reader urword", + "optional false", "tagged false"], + ["block options", "name obs_filerecord", + "type record obs6 filein obs6_filename", "shape", "reader urword", + "tagged true", "optional true", "construct_package obs", + "construct_data continuous", "parameter_name observations"], + ["block options", "name obs6", "type keyword", "shape", + "in_record true", "reader urword", "tagged true", + "optional false"], + ["block options", "name obs6_filename", "type string", + "preserve_case true", "in_record true", "tagged false", + "reader urword", "optional false"], + ["block options", "name mover", "type keyword", "tagged true", + "reader urword", "optional true"], + ["block dimensions", "name maxbound", "type integer", + "reader urword", "optional false"], + ["block period", "name iper", "type integer", + "block_variable True", "in_record true", "tagged false", "shape", + "valid", "reader urword", "optional false"], + ["block period", "name stress_period_data", + "type recarray cellid elev cond aux boundname", + "shape (maxbound)", "reader urword"], + ["block period", "name cellid", "type integer", + "shape (ncelldim)", "tagged false", "in_record true", + "reader urword"], + ["block period", "name elev", "type double precision", "shape", + "tagged false", "in_record true", "reader urword", + "time_series true"], + ["block period", "name cond", "type double precision", "shape", + "tagged false", "in_record true", "reader urword", + "time_series true"], + ["block period", "name aux", "type double precision", + "in_record true", "tagged false", "shape (naux)", "reader urword", + "optional true", "time_series true"], + ["block period", "name boundname", "type string", "shape", + "tagged false", "in_record true", "reader urword", + "optional true"]] + + def __init__(self, model, loading_package=False, auxiliary=None, + auxmultname=None, boundnames=None, print_input=None, + print_flows=None, save_flows=None, timeseries=None, + observations=None, mover=None, maxbound=None, + stress_period_data=None, filename=None, pname=None, + parent_file=None): + super(ModflowGwfdrn, self).__init__(model, "drn", filename, pname, + loading_package, parent_file) + + # set up variables + self.auxiliary = self.build_mfdata("auxiliary", auxiliary) + self.auxmultname = self.build_mfdata("auxmultname", auxmultname) + self.boundnames = self.build_mfdata("boundnames", boundnames) + self.print_input = self.build_mfdata("print_input", print_input) + self.print_flows = self.build_mfdata("print_flows", print_flows) + self.save_flows = self.build_mfdata("save_flows", save_flows) + self._ts_filerecord = self.build_mfdata("ts_filerecord", + None) + self._ts_package = self.build_child_package("ts", timeseries, + "timeseries", + self._ts_filerecord) + self._obs_filerecord = self.build_mfdata("obs_filerecord", + None) + self._obs_package = self.build_child_package("obs", observations, + "continuous", + self._obs_filerecord) + self.mover = self.build_mfdata("mover", mover) + self.maxbound = self.build_mfdata("maxbound", maxbound) + self.stress_period_data = self.build_mfdata("stress_period_data", + stress_period_data) + self._init_complete = True diff --git a/flopy/mf6/modflow/mfgwfevt.py b/flopy/mf6/modflow/mfgwfevt.py index 5b278dde2a..179bd85b8b 100644 --- a/flopy/mf6/modflow/mfgwfevt.py +++ b/flopy/mf6/modflow/mfgwfevt.py @@ -1,254 +1,254 @@ -# DO NOT MODIFY THIS FILE DIRECTLY. THIS FILE MUST BE CREATED BY -# mf6/utils/createpackages.py -from .. import mfpackage -from ..data.mfdatautil import ListTemplateGenerator - - -class ModflowGwfevt(mfpackage.MFPackage): - """ - ModflowGwfevt defines a evt package within a gwf6 model. - - Parameters - ---------- - model : MFModel - Model that this package is a part of. Package is automatically - added to model when it is initialized. - loading_package : bool - Do not set this parameter. It is intended for debugging and internal - processing purposes only. - fixed_cell : boolean - * fixed_cell (boolean) indicates that evapotranspiration will not be - reassigned to a cell underlying the cell specified in the list if the - specified cell is inactive. - auxiliary : [string] - * auxiliary (string) defines an array of one or more auxiliary variable - names. There is no limit on the number of auxiliary variables that - can be provided on this line; however, lists of information provided - in subsequent blocks must have a column of data for each auxiliary - variable name defined here. The number of auxiliary variables - detected on this line determines the value for naux. Comments cannot - be provided anywhere on this line as they will be interpreted as - auxiliary variable names. Auxiliary variables may not be used by the - package, but they will be available for use by other parts of the - program. The program will terminate with an error if auxiliary - variables are specified on more than one line in the options block. - auxmultname : string - * auxmultname (string) name of auxiliary variable to be used as - multiplier of evapotranspiration rate. - boundnames : boolean - * boundnames (boolean) keyword to indicate that boundary names may be - provided with the list of evapotranspiration cells. - print_input : boolean - * print_input (boolean) keyword to indicate that the list of - evapotranspiration information will be written to the listing file - immediately after it is read. - print_flows : boolean - * print_flows (boolean) keyword to indicate that the list of - evapotranspiration flow rates will be printed to the listing file for - every stress period time step in which "BUDGET PRINT" is specified in - Output Control. If there is no Output Control option and - "PRINT_FLOWS" is specified, then flow rates are printed for the last - time step of each stress period. - save_flows : boolean - * save_flows (boolean) keyword to indicate that evapotranspiration flow - terms will be written to the file specified with "BUDGET FILEOUT" in - Output Control. - timeseries : {varname:data} or timeseries data - * Contains data for the ts package. Data can be stored in a dictionary - containing data for the ts package with variable names as keys and - package data as values. Data just for the timeseries variable is also - acceptable. See ts package documentation for more information. - observations : {varname:data} or continuous data - * Contains data for the obs package. Data can be stored in a dictionary - containing data for the obs package with variable names as keys and - package data as values. Data just for the observations variable is - also acceptable. See obs package documentation for more information. - surf_rate_specified : boolean - * surf_rate_specified (boolean) indicates that the proportion of the - evapotranspiration rate at the ET surface will be specified as PETM0 - in list input. - maxbound : integer - * maxbound (integer) integer value specifying the maximum number of - evapotranspiration cells cells that will be specified for use during - any stress period. - nseg : integer - * nseg (integer) number of ET segments. Default is one. When NSEG is - greater than 1, PXDP and PETM arrays must be specified NSEG - 1 times - each, in order from the uppermost segment down. PXDP defines the - extinction-depth proportion at the bottom of a segment. PETM defines - the proportion of the maximum ET flux rate at the bottom of a - segment. - stress_period_data : [cellid, surface, rate, depth, pxdp, petm, petm0, aux, - boundname] - * cellid ((integer, ...)) is the cell identifier, and depends on the - type of grid that is used for the simulation. For a structured grid - that uses the DIS input file, CELLID is the layer, row, and column. - For a grid that uses the DISV input file, CELLID is the layer and - CELL2D number. If the model uses the unstructured discretization - (DISU) input file, CELLID is the node number for the cell. This - argument is an index variable, which means that it should be treated - as zero-based when working with FloPy and Python. Flopy will - automatically subtract one when loading index variables and add one - when writing index variables. - * surface (double) is the elevation of the ET surface (:math:`L`). A - time-series name may be specified. - * rate (double) is the maximum ET flux rate (:math:`LT^{-1}`). A time- - series name may be specified. - * depth (double) is the ET extinction depth (:math:`L`). A time-series - name may be specified. - * pxdp (double) is the proportion of the ET extinction depth at the - bottom of a segment (dimensionless). A time-series name may be - specified. - * petm (double) is the proportion of the maximum ET flux rate at the - bottom of a segment (dimensionless). A time-series name may be - specified. - * petm0 (double) is the proportion of the maximum ET flux rate that - will apply when head is at or above the ET surface (dimensionless). - PETM0 is read only when the SURF_RATE_SPECIFIED option is used. A - time-series name may be specified. - * aux (double) represents the values of the auxiliary variables for - each evapotranspiration. The values of auxiliary variables must be - present for each evapotranspiration. The values must be specified in - the order of the auxiliary variables specified in the OPTIONS block. - If the package supports time series and the Options block includes a - TIMESERIESFILE entry (see the "Time-Variable Input" section), values - can be obtained from a time series by entering the time-series name - in place of a numeric value. - * boundname (string) name of the evapotranspiration cell. BOUNDNAME is - an ASCII character variable that can contain as many as 40 - characters. If BOUNDNAME contains spaces in it, then the entire name - must be enclosed within single quotes. - filename : String - File name for this package. - pname : String - Package name for this package. - parent_file : MFPackage - Parent package file that references this package. Only needed for - utility packages (mfutl*). For example, mfutllaktab package must have - a mfgwflak package parent_file. - - """ - auxiliary = ListTemplateGenerator(('gwf6', 'evt', 'options', - 'auxiliary')) - ts_filerecord = ListTemplateGenerator(('gwf6', 'evt', 'options', - 'ts_filerecord')) - obs_filerecord = ListTemplateGenerator(('gwf6', 'evt', 'options', - 'obs_filerecord')) - stress_period_data = ListTemplateGenerator(('gwf6', 'evt', 'period', - 'stress_period_data')) - package_abbr = "gwfevt" - _package_type = "evt" - dfn_file_name = "gwf-evt.dfn" - - dfn = [["block options", "name fixed_cell", "type keyword", "shape", - "reader urword", "optional true"], - ["block options", "name auxiliary", "type string", - "shape (naux)", "reader urword", "optional true"], - ["block options", "name auxmultname", "type string", "shape", - "reader urword", "optional true"], - ["block options", "name boundnames", "type keyword", "shape", - "reader urword", "optional true"], - ["block options", "name print_input", "type keyword", - "reader urword", "optional true"], - ["block options", "name print_flows", "type keyword", - "reader urword", "optional true"], - ["block options", "name save_flows", "type keyword", - "reader urword", "optional true"], - ["block options", "name ts_filerecord", - "type record ts6 filein ts6_filename", "shape", "reader urword", - "tagged true", "optional true", "construct_package ts", - "construct_data timeseries", "parameter_name timeseries"], - ["block options", "name ts6", "type keyword", "shape", - "in_record true", "reader urword", "tagged true", - "optional false"], - ["block options", "name filein", "type keyword", "shape", - "in_record true", "reader urword", "tagged true", - "optional false"], - ["block options", "name ts6_filename", "type string", - "preserve_case true", "in_record true", "reader urword", - "optional false", "tagged false"], - ["block options", "name obs_filerecord", - "type record obs6 filein obs6_filename", "shape", "reader urword", - "tagged true", "optional true", "construct_package obs", - "construct_data continuous", "parameter_name observations"], - ["block options", "name obs6", "type keyword", "shape", - "in_record true", "reader urword", "tagged true", - "optional false"], - ["block options", "name obs6_filename", "type string", - "preserve_case true", "in_record true", "tagged false", - "reader urword", "optional false"], - ["block options", "name surf_rate_specified", "type keyword", - "reader urword", "optional true"], - ["block dimensions", "name maxbound", "type integer", - "reader urword", "optional false"], - ["block dimensions", "name nseg", "type integer", - "reader urword", "optional false"], - ["block period", "name iper", "type integer", - "block_variable True", "in_record true", "tagged false", "shape", - "valid", "reader urword", "optional false"], - ["block period", "name stress_period_data", - "type recarray cellid surface rate depth pxdp petm petm0 aux " - "boundname", - "shape (maxbound)", "reader urword"], - ["block period", "name cellid", "type integer", - "shape (ncelldim)", "tagged false", "in_record true", - "reader urword"], - ["block period", "name surface", "type double precision", - "shape", "tagged false", "in_record true", "reader urword", - "time_series true"], - ["block period", "name rate", "type double precision", "shape", - "tagged false", "in_record true", "reader urword", - "time_series true"], - ["block period", "name depth", "type double precision", "shape", - "tagged false", "in_record true", "reader urword", - "time_series true"], - ["block period", "name pxdp", "type double precision", - "shape (nseg-1)", "tagged false", "in_record true", - "reader urword", "time_series true"], - ["block period", "name petm", "type double precision", - "shape (nseg-1)", "tagged false", "in_record true", - "reader urword", "time_series true"], - ["block period", "name petm0", "type double precision", "shape", - "tagged false", "in_record true", "reader urword", - "optional true", "time_series true"], - ["block period", "name aux", "type double precision", - "in_record true", "tagged false", "shape (naux)", "reader urword", - "optional true", "time_series true"], - ["block period", "name boundname", "type string", "shape", - "tagged false", "in_record true", "reader urword", - "optional true"]] - - def __init__(self, model, loading_package=False, fixed_cell=None, - auxiliary=None, auxmultname=None, boundnames=None, - print_input=None, print_flows=None, save_flows=None, - timeseries=None, observations=None, surf_rate_specified=None, - maxbound=None, nseg=None, stress_period_data=None, - filename=None, pname=None, parent_file=None): - super(ModflowGwfevt, self).__init__(model, "evt", filename, pname, - loading_package, parent_file) - - # set up variables - self.fixed_cell = self.build_mfdata("fixed_cell", fixed_cell) - self.auxiliary = self.build_mfdata("auxiliary", auxiliary) - self.auxmultname = self.build_mfdata("auxmultname", auxmultname) - self.boundnames = self.build_mfdata("boundnames", boundnames) - self.print_input = self.build_mfdata("print_input", print_input) - self.print_flows = self.build_mfdata("print_flows", print_flows) - self.save_flows = self.build_mfdata("save_flows", save_flows) - self._ts_filerecord = self.build_mfdata("ts_filerecord", - None) - self._ts_package = self.build_child_package("ts", timeseries, - "timeseries", - self._ts_filerecord) - self._obs_filerecord = self.build_mfdata("obs_filerecord", - None) - self._obs_package = self.build_child_package("obs", observations, - "continuous", - self._obs_filerecord) - self.surf_rate_specified = self.build_mfdata("surf_rate_specified", - surf_rate_specified) - self.maxbound = self.build_mfdata("maxbound", maxbound) - self.nseg = self.build_mfdata("nseg", nseg) - self.stress_period_data = self.build_mfdata("stress_period_data", - stress_period_data) - self._init_complete = True +# DO NOT MODIFY THIS FILE DIRECTLY. THIS FILE MUST BE CREATED BY +# mf6/utils/createpackages.py +from .. import mfpackage +from ..data.mfdatautil import ListTemplateGenerator + + +class ModflowGwfevt(mfpackage.MFPackage): + """ + ModflowGwfevt defines a evt package within a gwf6 model. + + Parameters + ---------- + model : MFModel + Model that this package is a part of. Package is automatically + added to model when it is initialized. + loading_package : bool + Do not set this parameter. It is intended for debugging and internal + processing purposes only. + fixed_cell : boolean + * fixed_cell (boolean) indicates that evapotranspiration will not be + reassigned to a cell underlying the cell specified in the list if the + specified cell is inactive. + auxiliary : [string] + * auxiliary (string) defines an array of one or more auxiliary variable + names. There is no limit on the number of auxiliary variables that + can be provided on this line; however, lists of information provided + in subsequent blocks must have a column of data for each auxiliary + variable name defined here. The number of auxiliary variables + detected on this line determines the value for naux. Comments cannot + be provided anywhere on this line as they will be interpreted as + auxiliary variable names. Auxiliary variables may not be used by the + package, but they will be available for use by other parts of the + program. The program will terminate with an error if auxiliary + variables are specified on more than one line in the options block. + auxmultname : string + * auxmultname (string) name of auxiliary variable to be used as + multiplier of evapotranspiration rate. + boundnames : boolean + * boundnames (boolean) keyword to indicate that boundary names may be + provided with the list of evapotranspiration cells. + print_input : boolean + * print_input (boolean) keyword to indicate that the list of + evapotranspiration information will be written to the listing file + immediately after it is read. + print_flows : boolean + * print_flows (boolean) keyword to indicate that the list of + evapotranspiration flow rates will be printed to the listing file for + every stress period time step in which "BUDGET PRINT" is specified in + Output Control. If there is no Output Control option and + "PRINT_FLOWS" is specified, then flow rates are printed for the last + time step of each stress period. + save_flows : boolean + * save_flows (boolean) keyword to indicate that evapotranspiration flow + terms will be written to the file specified with "BUDGET FILEOUT" in + Output Control. + timeseries : {varname:data} or timeseries data + * Contains data for the ts package. Data can be stored in a dictionary + containing data for the ts package with variable names as keys and + package data as values. Data just for the timeseries variable is also + acceptable. See ts package documentation for more information. + observations : {varname:data} or continuous data + * Contains data for the obs package. Data can be stored in a dictionary + containing data for the obs package with variable names as keys and + package data as values. Data just for the observations variable is + also acceptable. See obs package documentation for more information. + surf_rate_specified : boolean + * surf_rate_specified (boolean) indicates that the proportion of the + evapotranspiration rate at the ET surface will be specified as PETM0 + in list input. + maxbound : integer + * maxbound (integer) integer value specifying the maximum number of + evapotranspiration cells cells that will be specified for use during + any stress period. + nseg : integer + * nseg (integer) number of ET segments. Default is one. When NSEG is + greater than 1, PXDP and PETM arrays must be specified NSEG - 1 times + each, in order from the uppermost segment down. PXDP defines the + extinction-depth proportion at the bottom of a segment. PETM defines + the proportion of the maximum ET flux rate at the bottom of a + segment. + stress_period_data : [cellid, surface, rate, depth, pxdp, petm, petm0, aux, + boundname] + * cellid ((integer, ...)) is the cell identifier, and depends on the + type of grid that is used for the simulation. For a structured grid + that uses the DIS input file, CELLID is the layer, row, and column. + For a grid that uses the DISV input file, CELLID is the layer and + CELL2D number. If the model uses the unstructured discretization + (DISU) input file, CELLID is the node number for the cell. This + argument is an index variable, which means that it should be treated + as zero-based when working with FloPy and Python. Flopy will + automatically subtract one when loading index variables and add one + when writing index variables. + * surface (double) is the elevation of the ET surface (:math:`L`). A + time-series name may be specified. + * rate (double) is the maximum ET flux rate (:math:`LT^{-1}`). A time- + series name may be specified. + * depth (double) is the ET extinction depth (:math:`L`). A time-series + name may be specified. + * pxdp (double) is the proportion of the ET extinction depth at the + bottom of a segment (dimensionless). A time-series name may be + specified. + * petm (double) is the proportion of the maximum ET flux rate at the + bottom of a segment (dimensionless). A time-series name may be + specified. + * petm0 (double) is the proportion of the maximum ET flux rate that + will apply when head is at or above the ET surface (dimensionless). + PETM0 is read only when the SURF_RATE_SPECIFIED option is used. A + time-series name may be specified. + * aux (double) represents the values of the auxiliary variables for + each evapotranspiration. The values of auxiliary variables must be + present for each evapotranspiration. The values must be specified in + the order of the auxiliary variables specified in the OPTIONS block. + If the package supports time series and the Options block includes a + TIMESERIESFILE entry (see the "Time-Variable Input" section), values + can be obtained from a time series by entering the time-series name + in place of a numeric value. + * boundname (string) name of the evapotranspiration cell. BOUNDNAME is + an ASCII character variable that can contain as many as 40 + characters. If BOUNDNAME contains spaces in it, then the entire name + must be enclosed within single quotes. + filename : String + File name for this package. + pname : String + Package name for this package. + parent_file : MFPackage + Parent package file that references this package. Only needed for + utility packages (mfutl*). For example, mfutllaktab package must have + a mfgwflak package parent_file. + + """ + auxiliary = ListTemplateGenerator(('gwf6', 'evt', 'options', + 'auxiliary')) + ts_filerecord = ListTemplateGenerator(('gwf6', 'evt', 'options', + 'ts_filerecord')) + obs_filerecord = ListTemplateGenerator(('gwf6', 'evt', 'options', + 'obs_filerecord')) + stress_period_data = ListTemplateGenerator(('gwf6', 'evt', 'period', + 'stress_period_data')) + package_abbr = "gwfevt" + _package_type = "evt" + dfn_file_name = "gwf-evt.dfn" + + dfn = [["block options", "name fixed_cell", "type keyword", "shape", + "reader urword", "optional true"], + ["block options", "name auxiliary", "type string", + "shape (naux)", "reader urword", "optional true"], + ["block options", "name auxmultname", "type string", "shape", + "reader urword", "optional true"], + ["block options", "name boundnames", "type keyword", "shape", + "reader urword", "optional true"], + ["block options", "name print_input", "type keyword", + "reader urword", "optional true"], + ["block options", "name print_flows", "type keyword", + "reader urword", "optional true"], + ["block options", "name save_flows", "type keyword", + "reader urword", "optional true"], + ["block options", "name ts_filerecord", + "type record ts6 filein ts6_filename", "shape", "reader urword", + "tagged true", "optional true", "construct_package ts", + "construct_data timeseries", "parameter_name timeseries"], + ["block options", "name ts6", "type keyword", "shape", + "in_record true", "reader urword", "tagged true", + "optional false"], + ["block options", "name filein", "type keyword", "shape", + "in_record true", "reader urword", "tagged true", + "optional false"], + ["block options", "name ts6_filename", "type string", + "preserve_case true", "in_record true", "reader urword", + "optional false", "tagged false"], + ["block options", "name obs_filerecord", + "type record obs6 filein obs6_filename", "shape", "reader urword", + "tagged true", "optional true", "construct_package obs", + "construct_data continuous", "parameter_name observations"], + ["block options", "name obs6", "type keyword", "shape", + "in_record true", "reader urword", "tagged true", + "optional false"], + ["block options", "name obs6_filename", "type string", + "preserve_case true", "in_record true", "tagged false", + "reader urword", "optional false"], + ["block options", "name surf_rate_specified", "type keyword", + "reader urword", "optional true"], + ["block dimensions", "name maxbound", "type integer", + "reader urword", "optional false"], + ["block dimensions", "name nseg", "type integer", + "reader urword", "optional false"], + ["block period", "name iper", "type integer", + "block_variable True", "in_record true", "tagged false", "shape", + "valid", "reader urword", "optional false"], + ["block period", "name stress_period_data", + "type recarray cellid surface rate depth pxdp petm petm0 aux " + "boundname", + "shape (maxbound)", "reader urword"], + ["block period", "name cellid", "type integer", + "shape (ncelldim)", "tagged false", "in_record true", + "reader urword"], + ["block period", "name surface", "type double precision", + "shape", "tagged false", "in_record true", "reader urword", + "time_series true"], + ["block period", "name rate", "type double precision", "shape", + "tagged false", "in_record true", "reader urword", + "time_series true"], + ["block period", "name depth", "type double precision", "shape", + "tagged false", "in_record true", "reader urword", + "time_series true"], + ["block period", "name pxdp", "type double precision", + "shape (nseg-1)", "tagged false", "in_record true", + "reader urword", "time_series true"], + ["block period", "name petm", "type double precision", + "shape (nseg-1)", "tagged false", "in_record true", + "reader urword", "time_series true"], + ["block period", "name petm0", "type double precision", "shape", + "tagged false", "in_record true", "reader urword", + "optional true", "time_series true"], + ["block period", "name aux", "type double precision", + "in_record true", "tagged false", "shape (naux)", "reader urword", + "optional true", "time_series true"], + ["block period", "name boundname", "type string", "shape", + "tagged false", "in_record true", "reader urword", + "optional true"]] + + def __init__(self, model, loading_package=False, fixed_cell=None, + auxiliary=None, auxmultname=None, boundnames=None, + print_input=None, print_flows=None, save_flows=None, + timeseries=None, observations=None, surf_rate_specified=None, + maxbound=None, nseg=None, stress_period_data=None, + filename=None, pname=None, parent_file=None): + super(ModflowGwfevt, self).__init__(model, "evt", filename, pname, + loading_package, parent_file) + + # set up variables + self.fixed_cell = self.build_mfdata("fixed_cell", fixed_cell) + self.auxiliary = self.build_mfdata("auxiliary", auxiliary) + self.auxmultname = self.build_mfdata("auxmultname", auxmultname) + self.boundnames = self.build_mfdata("boundnames", boundnames) + self.print_input = self.build_mfdata("print_input", print_input) + self.print_flows = self.build_mfdata("print_flows", print_flows) + self.save_flows = self.build_mfdata("save_flows", save_flows) + self._ts_filerecord = self.build_mfdata("ts_filerecord", + None) + self._ts_package = self.build_child_package("ts", timeseries, + "timeseries", + self._ts_filerecord) + self._obs_filerecord = self.build_mfdata("obs_filerecord", + None) + self._obs_package = self.build_child_package("obs", observations, + "continuous", + self._obs_filerecord) + self.surf_rate_specified = self.build_mfdata("surf_rate_specified", + surf_rate_specified) + self.maxbound = self.build_mfdata("maxbound", maxbound) + self.nseg = self.build_mfdata("nseg", nseg) + self.stress_period_data = self.build_mfdata("stress_period_data", + stress_period_data) + self._init_complete = True diff --git a/flopy/mf6/modflow/mfgwfghb.py b/flopy/mf6/modflow/mfgwfghb.py index 4822e2d503..a320954fdd 100644 --- a/flopy/mf6/modflow/mfgwfghb.py +++ b/flopy/mf6/modflow/mfgwfghb.py @@ -1,217 +1,217 @@ -# DO NOT MODIFY THIS FILE DIRECTLY. THIS FILE MUST BE CREATED BY -# mf6/utils/createpackages.py -from .. import mfpackage -from ..data.mfdatautil import ListTemplateGenerator - - -class ModflowGwfghb(mfpackage.MFPackage): - """ - ModflowGwfghb defines a ghb package within a gwf6 model. - - Parameters - ---------- - model : MFModel - Model that this package is a part of. Package is automatically - added to model when it is initialized. - loading_package : bool - Do not set this parameter. It is intended for debugging and internal - processing purposes only. - auxiliary : [string] - * auxiliary (string) defines an array of one or more auxiliary variable - names. There is no limit on the number of auxiliary variables that - can be provided on this line; however, lists of information provided - in subsequent blocks must have a column of data for each auxiliary - variable name defined here. The number of auxiliary variables - detected on this line determines the value for naux. Comments cannot - be provided anywhere on this line as they will be interpreted as - auxiliary variable names. Auxiliary variables may not be used by the - package, but they will be available for use by other parts of the - program. The program will terminate with an error if auxiliary - variables are specified on more than one line in the options block. - auxmultname : string - * auxmultname (string) name of auxiliary variable to be used as - multiplier of general-head boundary conductance. - boundnames : boolean - * boundnames (boolean) keyword to indicate that boundary names may be - provided with the list of general-head boundary cells. - print_input : boolean - * print_input (boolean) keyword to indicate that the list of general- - head boundary information will be written to the listing file - immediately after it is read. - print_flows : boolean - * print_flows (boolean) keyword to indicate that the list of general- - head boundary flow rates will be printed to the listing file for - every stress period time step in which "BUDGET PRINT" is specified in - Output Control. If there is no Output Control option and - "PRINT_FLOWS" is specified, then flow rates are printed for the last - time step of each stress period. - save_flows : boolean - * save_flows (boolean) keyword to indicate that general-head boundary - flow terms will be written to the file specified with "BUDGET - FILEOUT" in Output Control. - timeseries : {varname:data} or timeseries data - * Contains data for the ts package. Data can be stored in a dictionary - containing data for the ts package with variable names as keys and - package data as values. Data just for the timeseries variable is also - acceptable. See ts package documentation for more information. - observations : {varname:data} or continuous data - * Contains data for the obs package. Data can be stored in a dictionary - containing data for the obs package with variable names as keys and - package data as values. Data just for the observations variable is - also acceptable. See obs package documentation for more information. - mover : boolean - * mover (boolean) keyword to indicate that this instance of the - General-Head Boundary Package can be used with the Water Mover (MVR) - Package. When the MOVER option is specified, additional memory is - allocated within the package to store the available, provided, and - received water. - maxbound : integer - * maxbound (integer) integer value specifying the maximum number of - general-head boundary cells that will be specified for use during any - stress period. - stress_period_data : [cellid, bhead, cond, aux, boundname] - * cellid ((integer, ...)) is the cell identifier, and depends on the - type of grid that is used for the simulation. For a structured grid - that uses the DIS input file, CELLID is the layer, row, and column. - For a grid that uses the DISV input file, CELLID is the layer and - CELL2D number. If the model uses the unstructured discretization - (DISU) input file, CELLID is the node number for the cell. This - argument is an index variable, which means that it should be treated - as zero-based when working with FloPy and Python. Flopy will - automatically subtract one when loading index variables and add one - when writing index variables. - * bhead (double) is the boundary head. If the Options block includes a - TIMESERIESFILE entry (see the "Time-Variable Input" section), values - can be obtained from a time series by entering the time-series name - in place of a numeric value. - * cond (double) is the hydraulic conductance of the interface between - the aquifer cell and the boundary. If the Options block includes a - TIMESERIESFILE entry (see the "Time-Variable Input" section), values - can be obtained from a time series by entering the time-series name - in place of a numeric value. - * aux (double) represents the values of the auxiliary variables for - each general-head boundary. The values of auxiliary variables must be - present for each general-head boundary. The values must be specified - in the order of the auxiliary variables specified in the OPTIONS - block. If the package supports time series and the Options block - includes a TIMESERIESFILE entry (see the "Time-Variable Input" - section), values can be obtained from a time series by entering the - time-series name in place of a numeric value. - * boundname (string) name of the general-head boundary cell. BOUNDNAME - is an ASCII character variable that can contain as many as 40 - characters. If BOUNDNAME contains spaces in it, then the entire name - must be enclosed within single quotes. - filename : String - File name for this package. - pname : String - Package name for this package. - parent_file : MFPackage - Parent package file that references this package. Only needed for - utility packages (mfutl*). For example, mfutllaktab package must have - a mfgwflak package parent_file. - - """ - auxiliary = ListTemplateGenerator(('gwf6', 'ghb', 'options', - 'auxiliary')) - ts_filerecord = ListTemplateGenerator(('gwf6', 'ghb', 'options', - 'ts_filerecord')) - obs_filerecord = ListTemplateGenerator(('gwf6', 'ghb', 'options', - 'obs_filerecord')) - stress_period_data = ListTemplateGenerator(('gwf6', 'ghb', 'period', - 'stress_period_data')) - package_abbr = "gwfghb" - _package_type = "ghb" - dfn_file_name = "gwf-ghb.dfn" - - dfn = [["block options", "name auxiliary", "type string", - "shape (naux)", "reader urword", "optional true"], - ["block options", "name auxmultname", "type string", "shape", - "reader urword", "optional true"], - ["block options", "name boundnames", "type keyword", "shape", - "reader urword", "optional true"], - ["block options", "name print_input", "type keyword", - "reader urword", "optional true"], - ["block options", "name print_flows", "type keyword", - "reader urword", "optional true"], - ["block options", "name save_flows", "type keyword", - "reader urword", "optional true"], - ["block options", "name ts_filerecord", - "type record ts6 filein ts6_filename", "shape", "reader urword", - "tagged true", "optional true", "construct_package ts", - "construct_data timeseries", "parameter_name timeseries"], - ["block options", "name ts6", "type keyword", "shape", - "in_record true", "reader urword", "tagged true", - "optional false"], - ["block options", "name filein", "type keyword", "shape", - "in_record true", "reader urword", "tagged true", - "optional false"], - ["block options", "name ts6_filename", "type string", - "preserve_case true", "in_record true", "reader urword", - "optional false", "tagged false"], - ["block options", "name obs_filerecord", - "type record obs6 filein obs6_filename", "shape", "reader urword", - "tagged true", "optional true", "construct_package obs", - "construct_data continuous", "parameter_name observations"], - ["block options", "name obs6", "type keyword", "shape", - "in_record true", "reader urword", "tagged true", - "optional false"], - ["block options", "name obs6_filename", "type string", - "preserve_case true", "in_record true", "tagged false", - "reader urword", "optional false"], - ["block options", "name mover", "type keyword", "tagged true", - "reader urword", "optional true"], - ["block dimensions", "name maxbound", "type integer", - "reader urword", "optional false"], - ["block period", "name iper", "type integer", - "block_variable True", "in_record true", "tagged false", "shape", - "valid", "reader urword", "optional false"], - ["block period", "name stress_period_data", - "type recarray cellid bhead cond aux boundname", - "shape (maxbound)", "reader urword"], - ["block period", "name cellid", "type integer", - "shape (ncelldim)", "tagged false", "in_record true", - "reader urword"], - ["block period", "name bhead", "type double precision", "shape", - "tagged false", "in_record true", "reader urword", - "time_series true"], - ["block period", "name cond", "type double precision", "shape", - "tagged false", "in_record true", "reader urword", - "time_series true"], - ["block period", "name aux", "type double precision", - "in_record true", "tagged false", "shape (naux)", "reader urword", - "optional true", "time_series true"], - ["block period", "name boundname", "type string", "shape", - "tagged false", "in_record true", "reader urword", - "optional true"]] - - def __init__(self, model, loading_package=False, auxiliary=None, - auxmultname=None, boundnames=None, print_input=None, - print_flows=None, save_flows=None, timeseries=None, - observations=None, mover=None, maxbound=None, - stress_period_data=None, filename=None, pname=None, - parent_file=None): - super(ModflowGwfghb, self).__init__(model, "ghb", filename, pname, - loading_package, parent_file) - - # set up variables - self.auxiliary = self.build_mfdata("auxiliary", auxiliary) - self.auxmultname = self.build_mfdata("auxmultname", auxmultname) - self.boundnames = self.build_mfdata("boundnames", boundnames) - self.print_input = self.build_mfdata("print_input", print_input) - self.print_flows = self.build_mfdata("print_flows", print_flows) - self.save_flows = self.build_mfdata("save_flows", save_flows) - self._ts_filerecord = self.build_mfdata("ts_filerecord", - None) - self._ts_package = self.build_child_package("ts", timeseries, - "timeseries", - self._ts_filerecord) - self._obs_filerecord = self.build_mfdata("obs_filerecord", - None) - self._obs_package = self.build_child_package("obs", observations, - "continuous", - self._obs_filerecord) - self.mover = self.build_mfdata("mover", mover) - self.maxbound = self.build_mfdata("maxbound", maxbound) - self.stress_period_data = self.build_mfdata("stress_period_data", - stress_period_data) - self._init_complete = True +# DO NOT MODIFY THIS FILE DIRECTLY. THIS FILE MUST BE CREATED BY +# mf6/utils/createpackages.py +from .. import mfpackage +from ..data.mfdatautil import ListTemplateGenerator + + +class ModflowGwfghb(mfpackage.MFPackage): + """ + ModflowGwfghb defines a ghb package within a gwf6 model. + + Parameters + ---------- + model : MFModel + Model that this package is a part of. Package is automatically + added to model when it is initialized. + loading_package : bool + Do not set this parameter. It is intended for debugging and internal + processing purposes only. + auxiliary : [string] + * auxiliary (string) defines an array of one or more auxiliary variable + names. There is no limit on the number of auxiliary variables that + can be provided on this line; however, lists of information provided + in subsequent blocks must have a column of data for each auxiliary + variable name defined here. The number of auxiliary variables + detected on this line determines the value for naux. Comments cannot + be provided anywhere on this line as they will be interpreted as + auxiliary variable names. Auxiliary variables may not be used by the + package, but they will be available for use by other parts of the + program. The program will terminate with an error if auxiliary + variables are specified on more than one line in the options block. + auxmultname : string + * auxmultname (string) name of auxiliary variable to be used as + multiplier of general-head boundary conductance. + boundnames : boolean + * boundnames (boolean) keyword to indicate that boundary names may be + provided with the list of general-head boundary cells. + print_input : boolean + * print_input (boolean) keyword to indicate that the list of general- + head boundary information will be written to the listing file + immediately after it is read. + print_flows : boolean + * print_flows (boolean) keyword to indicate that the list of general- + head boundary flow rates will be printed to the listing file for + every stress period time step in which "BUDGET PRINT" is specified in + Output Control. If there is no Output Control option and + "PRINT_FLOWS" is specified, then flow rates are printed for the last + time step of each stress period. + save_flows : boolean + * save_flows (boolean) keyword to indicate that general-head boundary + flow terms will be written to the file specified with "BUDGET + FILEOUT" in Output Control. + timeseries : {varname:data} or timeseries data + * Contains data for the ts package. Data can be stored in a dictionary + containing data for the ts package with variable names as keys and + package data as values. Data just for the timeseries variable is also + acceptable. See ts package documentation for more information. + observations : {varname:data} or continuous data + * Contains data for the obs package. Data can be stored in a dictionary + containing data for the obs package with variable names as keys and + package data as values. Data just for the observations variable is + also acceptable. See obs package documentation for more information. + mover : boolean + * mover (boolean) keyword to indicate that this instance of the + General-Head Boundary Package can be used with the Water Mover (MVR) + Package. When the MOVER option is specified, additional memory is + allocated within the package to store the available, provided, and + received water. + maxbound : integer + * maxbound (integer) integer value specifying the maximum number of + general-head boundary cells that will be specified for use during any + stress period. + stress_period_data : [cellid, bhead, cond, aux, boundname] + * cellid ((integer, ...)) is the cell identifier, and depends on the + type of grid that is used for the simulation. For a structured grid + that uses the DIS input file, CELLID is the layer, row, and column. + For a grid that uses the DISV input file, CELLID is the layer and + CELL2D number. If the model uses the unstructured discretization + (DISU) input file, CELLID is the node number for the cell. This + argument is an index variable, which means that it should be treated + as zero-based when working with FloPy and Python. Flopy will + automatically subtract one when loading index variables and add one + when writing index variables. + * bhead (double) is the boundary head. If the Options block includes a + TIMESERIESFILE entry (see the "Time-Variable Input" section), values + can be obtained from a time series by entering the time-series name + in place of a numeric value. + * cond (double) is the hydraulic conductance of the interface between + the aquifer cell and the boundary. If the Options block includes a + TIMESERIESFILE entry (see the "Time-Variable Input" section), values + can be obtained from a time series by entering the time-series name + in place of a numeric value. + * aux (double) represents the values of the auxiliary variables for + each general-head boundary. The values of auxiliary variables must be + present for each general-head boundary. The values must be specified + in the order of the auxiliary variables specified in the OPTIONS + block. If the package supports time series and the Options block + includes a TIMESERIESFILE entry (see the "Time-Variable Input" + section), values can be obtained from a time series by entering the + time-series name in place of a numeric value. + * boundname (string) name of the general-head boundary cell. BOUNDNAME + is an ASCII character variable that can contain as many as 40 + characters. If BOUNDNAME contains spaces in it, then the entire name + must be enclosed within single quotes. + filename : String + File name for this package. + pname : String + Package name for this package. + parent_file : MFPackage + Parent package file that references this package. Only needed for + utility packages (mfutl*). For example, mfutllaktab package must have + a mfgwflak package parent_file. + + """ + auxiliary = ListTemplateGenerator(('gwf6', 'ghb', 'options', + 'auxiliary')) + ts_filerecord = ListTemplateGenerator(('gwf6', 'ghb', 'options', + 'ts_filerecord')) + obs_filerecord = ListTemplateGenerator(('gwf6', 'ghb', 'options', + 'obs_filerecord')) + stress_period_data = ListTemplateGenerator(('gwf6', 'ghb', 'period', + 'stress_period_data')) + package_abbr = "gwfghb" + _package_type = "ghb" + dfn_file_name = "gwf-ghb.dfn" + + dfn = [["block options", "name auxiliary", "type string", + "shape (naux)", "reader urword", "optional true"], + ["block options", "name auxmultname", "type string", "shape", + "reader urword", "optional true"], + ["block options", "name boundnames", "type keyword", "shape", + "reader urword", "optional true"], + ["block options", "name print_input", "type keyword", + "reader urword", "optional true"], + ["block options", "name print_flows", "type keyword", + "reader urword", "optional true"], + ["block options", "name save_flows", "type keyword", + "reader urword", "optional true"], + ["block options", "name ts_filerecord", + "type record ts6 filein ts6_filename", "shape", "reader urword", + "tagged true", "optional true", "construct_package ts", + "construct_data timeseries", "parameter_name timeseries"], + ["block options", "name ts6", "type keyword", "shape", + "in_record true", "reader urword", "tagged true", + "optional false"], + ["block options", "name filein", "type keyword", "shape", + "in_record true", "reader urword", "tagged true", + "optional false"], + ["block options", "name ts6_filename", "type string", + "preserve_case true", "in_record true", "reader urword", + "optional false", "tagged false"], + ["block options", "name obs_filerecord", + "type record obs6 filein obs6_filename", "shape", "reader urword", + "tagged true", "optional true", "construct_package obs", + "construct_data continuous", "parameter_name observations"], + ["block options", "name obs6", "type keyword", "shape", + "in_record true", "reader urword", "tagged true", + "optional false"], + ["block options", "name obs6_filename", "type string", + "preserve_case true", "in_record true", "tagged false", + "reader urword", "optional false"], + ["block options", "name mover", "type keyword", "tagged true", + "reader urword", "optional true"], + ["block dimensions", "name maxbound", "type integer", + "reader urword", "optional false"], + ["block period", "name iper", "type integer", + "block_variable True", "in_record true", "tagged false", "shape", + "valid", "reader urword", "optional false"], + ["block period", "name stress_period_data", + "type recarray cellid bhead cond aux boundname", + "shape (maxbound)", "reader urword"], + ["block period", "name cellid", "type integer", + "shape (ncelldim)", "tagged false", "in_record true", + "reader urword"], + ["block period", "name bhead", "type double precision", "shape", + "tagged false", "in_record true", "reader urword", + "time_series true"], + ["block period", "name cond", "type double precision", "shape", + "tagged false", "in_record true", "reader urword", + "time_series true"], + ["block period", "name aux", "type double precision", + "in_record true", "tagged false", "shape (naux)", "reader urword", + "optional true", "time_series true"], + ["block period", "name boundname", "type string", "shape", + "tagged false", "in_record true", "reader urword", + "optional true"]] + + def __init__(self, model, loading_package=False, auxiliary=None, + auxmultname=None, boundnames=None, print_input=None, + print_flows=None, save_flows=None, timeseries=None, + observations=None, mover=None, maxbound=None, + stress_period_data=None, filename=None, pname=None, + parent_file=None): + super(ModflowGwfghb, self).__init__(model, "ghb", filename, pname, + loading_package, parent_file) + + # set up variables + self.auxiliary = self.build_mfdata("auxiliary", auxiliary) + self.auxmultname = self.build_mfdata("auxmultname", auxmultname) + self.boundnames = self.build_mfdata("boundnames", boundnames) + self.print_input = self.build_mfdata("print_input", print_input) + self.print_flows = self.build_mfdata("print_flows", print_flows) + self.save_flows = self.build_mfdata("save_flows", save_flows) + self._ts_filerecord = self.build_mfdata("ts_filerecord", + None) + self._ts_package = self.build_child_package("ts", timeseries, + "timeseries", + self._ts_filerecord) + self._obs_filerecord = self.build_mfdata("obs_filerecord", + None) + self._obs_package = self.build_child_package("obs", observations, + "continuous", + self._obs_filerecord) + self.mover = self.build_mfdata("mover", mover) + self.maxbound = self.build_mfdata("maxbound", maxbound) + self.stress_period_data = self.build_mfdata("stress_period_data", + stress_period_data) + self._init_complete = True diff --git a/flopy/mf6/modflow/mfgwfgnc.py b/flopy/mf6/modflow/mfgwfgnc.py index e81f805402..e08e90b3a5 100644 --- a/flopy/mf6/modflow/mfgwfgnc.py +++ b/flopy/mf6/modflow/mfgwfgnc.py @@ -1,144 +1,144 @@ -# DO NOT MODIFY THIS FILE DIRECTLY. THIS FILE MUST BE CREATED BY -# mf6/utils/createpackages.py -from .. import mfpackage -from ..data.mfdatautil import ListTemplateGenerator - - -class ModflowGwfgnc(mfpackage.MFPackage): - """ - ModflowGwfgnc defines a gnc package within a gwf6 model. - - Parameters - ---------- - model : MFModel - Model that this package is a part of. Package is automatically - added to model when it is initialized. - loading_package : bool - Do not set this parameter. It is intended for debugging and internal - processing purposes only. - print_input : boolean - * print_input (boolean) keyword to indicate that the list of GNC - information will be written to the listing file immediately after it - is read. - print_flows : boolean - * print_flows (boolean) keyword to indicate that the list of GNC flow - rates will be printed to the listing file for every stress period - time step in which "BUDGET PRINT" is specified in Output Control. If - there is no Output Control option and "PRINT_FLOWS" is specified, - then flow rates are printed for the last time step of each stress - period. - explicit : boolean - * explicit (boolean) keyword to indicate that the ghost node correction - is applied in an explicit manner on the right-hand side of the - matrix. The explicit approach will likely require additional outer - iterations. If the keyword is not specified, then the correction will - be applied in an implicit manner on the left-hand side. The implicit - approach will likely converge better, but may require additional - memory. If the EXPLICIT keyword is not specified, then the BICGSTAB - linear acceleration option should be specified within the LINEAR - block of the Sparse Matrix Solver. - numgnc : integer - * numgnc (integer) is the number of GNC entries. - numalphaj : integer - * numalphaj (integer) is the number of contributing factors. - gncdata : [cellidn, cellidm, cellidsj, alphasj] - * cellidn ((integer, ...)) is the cellid of the cell, :math:`n`, in - which the ghost node is located. For a structured grid that uses the - DIS input file, CELLIDN is the layer, row, and column numbers of the - cell. For a grid that uses the DISV input file, CELLIDN is the layer - number and CELL2D number for the two cells. If the model uses the - unstructured discretization (DISU) input file, then CELLIDN is the - node number for the cell. This argument is an index variable, which - means that it should be treated as zero-based when working with FloPy - and Python. Flopy will automatically subtract one when loading index - variables and add one when writing index variables. - * cellidm ((integer, ...)) is the cellid of the connecting cell, - :math:`m`, to which flow occurs from the ghost node. For a structured - grid that uses the DIS input file, CELLIDM is the layer, row, and - column numbers of the cell. For a grid that uses the DISV input file, - CELLIDM is the layer number and CELL2D number for the two cells. If - the model uses the unstructured discretization (DISU) input file, - then CELLIDM is the node number for the cell. This argument is an - index variable, which means that it should be treated as zero-based - when working with FloPy and Python. Flopy will automatically subtract - one when loading index variables and add one when writing index - variables. - * cellidsj ((integer, ...)) is the array of CELLIDS for the - contributing j cells, which contribute to the interpolated head value - at the ghost node. This item contains one CELLID for each of the - contributing cells of the ghost node. Note that if the number of - actual contributing cells needed by the user is less than NUMALPHAJ - for any ghost node, then a dummy CELLID of zero(s) should be inserted - with an associated contributing factor of zero. For a structured grid - that uses the DIS input file, CELLID is the layer, row, and column - numbers of the cell. For a grid that uses the DISV input file, CELLID - is the layer number and cell2d number for the two cells. If the model - uses the unstructured discretization (DISU) input file, then CELLID - is the node number for the cell. This argument is an index variable, - which means that it should be treated as zero-based when working with - FloPy and Python. Flopy will automatically subtract one when loading - index variables and add one when writing index variables. - * alphasj (double) is the contributing factors for each contributing - node in CELLIDSJ. Note that if the number of actual contributing - cells is less than NUMALPHAJ for any ghost node, then dummy CELLIDS - should be inserted with an associated contributing factor of zero. - The sum of ALPHASJ should be less than one. This is because one minus - the sum of ALPHASJ is equal to the alpha term (alpha n in equation - 4-61 of the GWF Model report) that is multiplied by the head in cell - n. - filename : String - File name for this package. - pname : String - Package name for this package. - parent_file : MFPackage - Parent package file that references this package. Only needed for - utility packages (mfutl*). For example, mfutllaktab package must have - a mfgwflak package parent_file. - - """ - gncdata = ListTemplateGenerator(('gwf6', 'gnc', 'gncdata', - 'gncdata')) - package_abbr = "gwfgnc" - _package_type = "gnc" - dfn_file_name = "gwf-gnc.dfn" - - dfn = [["block options", "name print_input", "type keyword", - "reader urword", "optional true"], - ["block options", "name print_flows", "type keyword", - "reader urword", "optional true"], - ["block options", "name explicit", "type keyword", "tagged true", - "reader urword", "optional true"], - ["block dimensions", "name numgnc", "type integer", - "reader urword", "optional false"], - ["block dimensions", "name numalphaj", "type integer", - "reader urword", "optional false"], - ["block gncdata", "name gncdata", - "type recarray cellidn cellidm cellidsj alphasj", - "shape (maxbound)", "reader urword"], - ["block gncdata", "name cellidn", "type integer", "shape", - "tagged false", "in_record true", "reader urword", - "numeric_index true"], - ["block gncdata", "name cellidm", "type integer", "shape", - "tagged false", "in_record true", "reader urword", - "numeric_index true"], - ["block gncdata", "name cellidsj", "type integer", - "shape (numalphaj)", "tagged false", "in_record true", - "reader urword", "numeric_index true"], - ["block gncdata", "name alphasj", "type double precision", - "shape (numalphaj)", "tagged false", "in_record true", - "reader urword"]] - - def __init__(self, model, loading_package=False, print_input=None, - print_flows=None, explicit=None, numgnc=None, numalphaj=None, - gncdata=None, filename=None, pname=None, parent_file=None): - super(ModflowGwfgnc, self).__init__(model, "gnc", filename, pname, - loading_package, parent_file) - - # set up variables - self.print_input = self.build_mfdata("print_input", print_input) - self.print_flows = self.build_mfdata("print_flows", print_flows) - self.explicit = self.build_mfdata("explicit", explicit) - self.numgnc = self.build_mfdata("numgnc", numgnc) - self.numalphaj = self.build_mfdata("numalphaj", numalphaj) - self.gncdata = self.build_mfdata("gncdata", gncdata) - self._init_complete = True +# DO NOT MODIFY THIS FILE DIRECTLY. THIS FILE MUST BE CREATED BY +# mf6/utils/createpackages.py +from .. import mfpackage +from ..data.mfdatautil import ListTemplateGenerator + + +class ModflowGwfgnc(mfpackage.MFPackage): + """ + ModflowGwfgnc defines a gnc package within a gwf6 model. + + Parameters + ---------- + model : MFModel + Model that this package is a part of. Package is automatically + added to model when it is initialized. + loading_package : bool + Do not set this parameter. It is intended for debugging and internal + processing purposes only. + print_input : boolean + * print_input (boolean) keyword to indicate that the list of GNC + information will be written to the listing file immediately after it + is read. + print_flows : boolean + * print_flows (boolean) keyword to indicate that the list of GNC flow + rates will be printed to the listing file for every stress period + time step in which "BUDGET PRINT" is specified in Output Control. If + there is no Output Control option and "PRINT_FLOWS" is specified, + then flow rates are printed for the last time step of each stress + period. + explicit : boolean + * explicit (boolean) keyword to indicate that the ghost node correction + is applied in an explicit manner on the right-hand side of the + matrix. The explicit approach will likely require additional outer + iterations. If the keyword is not specified, then the correction will + be applied in an implicit manner on the left-hand side. The implicit + approach will likely converge better, but may require additional + memory. If the EXPLICIT keyword is not specified, then the BICGSTAB + linear acceleration option should be specified within the LINEAR + block of the Sparse Matrix Solver. + numgnc : integer + * numgnc (integer) is the number of GNC entries. + numalphaj : integer + * numalphaj (integer) is the number of contributing factors. + gncdata : [cellidn, cellidm, cellidsj, alphasj] + * cellidn ((integer, ...)) is the cellid of the cell, :math:`n`, in + which the ghost node is located. For a structured grid that uses the + DIS input file, CELLIDN is the layer, row, and column numbers of the + cell. For a grid that uses the DISV input file, CELLIDN is the layer + number and CELL2D number for the two cells. If the model uses the + unstructured discretization (DISU) input file, then CELLIDN is the + node number for the cell. This argument is an index variable, which + means that it should be treated as zero-based when working with FloPy + and Python. Flopy will automatically subtract one when loading index + variables and add one when writing index variables. + * cellidm ((integer, ...)) is the cellid of the connecting cell, + :math:`m`, to which flow occurs from the ghost node. For a structured + grid that uses the DIS input file, CELLIDM is the layer, row, and + column numbers of the cell. For a grid that uses the DISV input file, + CELLIDM is the layer number and CELL2D number for the two cells. If + the model uses the unstructured discretization (DISU) input file, + then CELLIDM is the node number for the cell. This argument is an + index variable, which means that it should be treated as zero-based + when working with FloPy and Python. Flopy will automatically subtract + one when loading index variables and add one when writing index + variables. + * cellidsj ((integer, ...)) is the array of CELLIDS for the + contributing j cells, which contribute to the interpolated head value + at the ghost node. This item contains one CELLID for each of the + contributing cells of the ghost node. Note that if the number of + actual contributing cells needed by the user is less than NUMALPHAJ + for any ghost node, then a dummy CELLID of zero(s) should be inserted + with an associated contributing factor of zero. For a structured grid + that uses the DIS input file, CELLID is the layer, row, and column + numbers of the cell. For a grid that uses the DISV input file, CELLID + is the layer number and cell2d number for the two cells. If the model + uses the unstructured discretization (DISU) input file, then CELLID + is the node number for the cell. This argument is an index variable, + which means that it should be treated as zero-based when working with + FloPy and Python. Flopy will automatically subtract one when loading + index variables and add one when writing index variables. + * alphasj (double) is the contributing factors for each contributing + node in CELLIDSJ. Note that if the number of actual contributing + cells is less than NUMALPHAJ for any ghost node, then dummy CELLIDS + should be inserted with an associated contributing factor of zero. + The sum of ALPHASJ should be less than one. This is because one minus + the sum of ALPHASJ is equal to the alpha term (alpha n in equation + 4-61 of the GWF Model report) that is multiplied by the head in cell + n. + filename : String + File name for this package. + pname : String + Package name for this package. + parent_file : MFPackage + Parent package file that references this package. Only needed for + utility packages (mfutl*). For example, mfutllaktab package must have + a mfgwflak package parent_file. + + """ + gncdata = ListTemplateGenerator(('gwf6', 'gnc', 'gncdata', + 'gncdata')) + package_abbr = "gwfgnc" + _package_type = "gnc" + dfn_file_name = "gwf-gnc.dfn" + + dfn = [["block options", "name print_input", "type keyword", + "reader urword", "optional true"], + ["block options", "name print_flows", "type keyword", + "reader urword", "optional true"], + ["block options", "name explicit", "type keyword", "tagged true", + "reader urword", "optional true"], + ["block dimensions", "name numgnc", "type integer", + "reader urword", "optional false"], + ["block dimensions", "name numalphaj", "type integer", + "reader urword", "optional false"], + ["block gncdata", "name gncdata", + "type recarray cellidn cellidm cellidsj alphasj", + "shape (maxbound)", "reader urword"], + ["block gncdata", "name cellidn", "type integer", "shape", + "tagged false", "in_record true", "reader urword", + "numeric_index true"], + ["block gncdata", "name cellidm", "type integer", "shape", + "tagged false", "in_record true", "reader urword", + "numeric_index true"], + ["block gncdata", "name cellidsj", "type integer", + "shape (numalphaj)", "tagged false", "in_record true", + "reader urword", "numeric_index true"], + ["block gncdata", "name alphasj", "type double precision", + "shape (numalphaj)", "tagged false", "in_record true", + "reader urword"]] + + def __init__(self, model, loading_package=False, print_input=None, + print_flows=None, explicit=None, numgnc=None, numalphaj=None, + gncdata=None, filename=None, pname=None, parent_file=None): + super(ModflowGwfgnc, self).__init__(model, "gnc", filename, pname, + loading_package, parent_file) + + # set up variables + self.print_input = self.build_mfdata("print_input", print_input) + self.print_flows = self.build_mfdata("print_flows", print_flows) + self.explicit = self.build_mfdata("explicit", explicit) + self.numgnc = self.build_mfdata("numgnc", numgnc) + self.numalphaj = self.build_mfdata("numalphaj", numalphaj) + self.gncdata = self.build_mfdata("gncdata", gncdata) + self._init_complete = True diff --git a/flopy/mf6/modflow/mfgwfgwf.py b/flopy/mf6/modflow/mfgwfgwf.py index 4b10a2c2f2..779cefcb55 100644 --- a/flopy/mf6/modflow/mfgwfgwf.py +++ b/flopy/mf6/modflow/mfgwfgwf.py @@ -1,280 +1,280 @@ -# DO NOT MODIFY THIS FILE DIRECTLY. THIS FILE MUST BE CREATED BY -# mf6/utils/createpackages.py -from .. import mfpackage -from ..data.mfdatautil import ListTemplateGenerator - - -class ModflowGwfgwf(mfpackage.MFPackage): - """ - ModflowGwfgwf defines a gwfgwf package. - - Parameters - ---------- - simulation : MFSimulation - Simulation that this package is a part of. Package is automatically - added to simulation when it is initialized. - loading_package : bool - Do not set this parameter. It is intended for debugging and internal - processing purposes only. - exgtype : - * is the exchange type (GWF-GWF or GWF-GWT). - exgmnamea : - * is the name of the first model that is part of this exchange. - exgmnameb : - * is the name of the second model that is part of this exchange. - auxiliary : [string] - * auxiliary (string) an array of auxiliary variable names. There is no - limit on the number of auxiliary variables that can be provided. Most - auxiliary variables will not be used by the GWF-GWF Exchange, but - they will be available for use by other parts of the program. If an - auxiliary variable with the name "ANGLDEGX" is found, then this - information will be used as the angle (provided in degrees) between - the connection face normal and the x axis, where a value of zero - indicates that a normal vector points directly along the positive x - axis. The connection face normal is a normal vector on the cell face - shared between the cell in model 1 and the cell in model 2 pointing - away from the model 1 cell. Additional information on "ANGLDEGX" is - provided in the description of the DISU Package. If an auxiliary - variable with the name "CDIST" is found, then this information will - be used as the straight-line connection distance, including the - vertical component, between the two cell centers. Both ANGLDEGX and - CDIST are required if specific discharge is calculated for either of - the groundwater models. - print_input : boolean - * print_input (boolean) keyword to indicate that the list of exchange - entries will be echoed to the listing file immediately after it is - read. - print_flows : boolean - * print_flows (boolean) keyword to indicate that the list of exchange - flow rates will be printed to the listing file for every stress - period in which "SAVE BUDGET" is specified in Output Control. - save_flows : boolean - * save_flows (boolean) keyword to indicate that cell-by-cell flow terms - will be written to the budget file for each model provided that the - Output Control for the models are set up with the "BUDGET SAVE FILE" - option. - cell_averaging : string - * cell_averaging (string) is a keyword and text keyword to indicate the - method that will be used for calculating the conductance for - horizontal cell connections. The text value for CELL_AVERAGING can be - "HARMONIC", "LOGARITHMIC", or "AMT-LMK", which means "arithmetic-mean - thickness and logarithmic-mean hydraulic conductivity". If the user - does not specify a value for CELL_AVERAGING, then the harmonic-mean - method will be used. - cvoptions : [dewatered] - * dewatered (string) If the DEWATERED keyword is specified, then the - vertical conductance is calculated using only the saturated thickness - and properties of the overlying cell if the head in the underlying - cell is below its top. - newton : boolean - * newton (boolean) keyword that activates the Newton-Raphson - formulation for groundwater flow between connected, convertible - groundwater cells. Cells will not dry when this option is used. - gnc_filerecord : [gnc6_filename] - * gnc6_filename (string) is the file name for ghost node correction - input file. Information for the ghost nodes are provided in the file - provided with these keywords. The format for specifying the ghost - nodes is the same as described for the GNC Package of the GWF Model. - This includes specifying OPTIONS, DIMENSIONS, and GNCDATA blocks. The - order of the ghost nodes must follow the same order as the order of - the cells in the EXCHANGEDATA block. For the GNCDATA, noden and all - of the nodej values are assumed to be located in model 1, and nodem - is assumed to be in model 2. - mvr_filerecord : [mvr6_filename] - * mvr6_filename (string) is the file name of the water mover input file - to apply to this exchange. Information for the water mover are - provided in the file provided with these keywords. The format for - specifying the water mover information is the same as described for - the Water Mover (MVR) Package of the GWF Model, with two exceptions. - First, in the PACKAGES block, the model name must be included as a - separate string before each package. Second, the appropriate model - name must be included before package name 1 and package name 2 in the - BEGIN PERIOD block. This allows providers and receivers to be located - in both models listed as part of this exchange. - observations : {varname:data} or continuous data - * Contains data for the obs package. Data can be stored in a dictionary - containing data for the obs package with variable names as keys and - package data as values. Data just for the observations variable is - also acceptable. See obs package documentation for more information. - nexg : integer - * nexg (integer) keyword and integer value specifying the number of - GWF-GWF exchanges. - exchangedata : [cellidm1, cellidm2, ihc, cl1, cl2, hwva, aux] - * cellidm1 ((integer, ...)) is the cellid of the cell in model 1 as - specified in the simulation name file. For a structured grid that - uses the DIS input file, CELLIDM1 is the layer, row, and column - numbers of the cell. For a grid that uses the DISV input file, - CELLIDM1 is the layer number and CELL2D number for the two cells. If - the model uses the unstructured discretization (DISU) input file, - then CELLIDM1 is the node number for the cell. This argument is an - index variable, which means that it should be treated as zero-based - when working with FloPy and Python. Flopy will automatically subtract - one when loading index variables and add one when writing index - variables. - * cellidm2 ((integer, ...)) is the cellid of the cell in model 2 as - specified in the simulation name file. For a structured grid that - uses the DIS input file, CELLIDM2 is the layer, row, and column - numbers of the cell. For a grid that uses the DISV input file, - CELLIDM2 is the layer number and CELL2D number for the two cells. If - the model uses the unstructured discretization (DISU) input file, - then CELLIDM2 is the node number for the cell. This argument is an - index variable, which means that it should be treated as zero-based - when working with FloPy and Python. Flopy will automatically subtract - one when loading index variables and add one when writing index - variables. - * ihc (integer) is an integer flag indicating the direction between - node n and all of its m connections. If IHC = 0 then the connection - is vertical. If IHC = 1 then the connection is horizontal. If IHC = 2 - then the connection is horizontal for a vertically staggered grid. - * cl1 (double) is the distance between the center of cell 1 and the its - shared face with cell 2. - * cl2 (double) is the distance between the center of cell 2 and the its - shared face with cell 1. - * hwva (double) is the horizontal width of the flow connection between - cell 1 and cell 2 if IHC :math:`>` 0, or it is the area perpendicular - to flow of the vertical connection between cell 1 and cell 2 if IHC = - 0. - * aux (double) represents the values of the auxiliary variables for - each GWFGWF Exchange. The values of auxiliary variables must be - present for each exchange. The values must be specified in the order - of the auxiliary variables specified in the OPTIONS block. - filename : String - File name for this package. - pname : String - Package name for this package. - parent_file : MFPackage - Parent package file that references this package. Only needed for - utility packages (mfutl*). For example, mfutllaktab package must have - a mfgwflak package parent_file. - - """ - auxiliary = ListTemplateGenerator(('gwfgwf', 'options', 'auxiliary')) - gnc_filerecord = ListTemplateGenerator(('gwfgwf', 'options', - 'gnc_filerecord')) - mvr_filerecord = ListTemplateGenerator(('gwfgwf', 'options', - 'mvr_filerecord')) - obs_filerecord = ListTemplateGenerator(('gwfgwf', 'options', - 'obs_filerecord')) - exchangedata = ListTemplateGenerator(('gwfgwf', 'exchangedata', - 'exchangedata')) - package_abbr = "gwfgwf" - _package_type = "gwfgwf" - dfn_file_name = "exg-gwfgwf.dfn" - - dfn = [["block options", "name auxiliary", "type string", - "shape (naux)", "reader urword", "optional true"], - ["block options", "name print_input", "type keyword", - "reader urword", "optional true"], - ["block options", "name print_flows", "type keyword", - "reader urword", "optional true"], - ["block options", "name save_flows", "type keyword", - "reader urword", "optional true"], - ["block options", "name cell_averaging", "type string", - "valid harmonic logarithmic amt-lmk", "reader urword", - "optional true"], - ["block options", "name cvoptions", - "type record variablecv dewatered", "reader urword", - "optional true"], - ["block options", "name variablecv", "in_record true", - "type keyword", "reader urword"], - ["block options", "name dewatered", "in_record true", - "type keyword", "reader urword", "optional true"], - ["block options", "name newton", "type keyword", "reader urword", - "optional true"], - ["block options", "name gnc_filerecord", - "type record gnc6 filein gnc6_filename", "shape", "reader urword", - "tagged true", "optional true"], - ["block options", "name filein", "type keyword", "shape", - "in_record true", "reader urword", "tagged true", - "optional false"], - ["block options", "name gnc6", "type keyword", "shape", - "in_record true", "reader urword", "tagged true", - "optional false"], - ["block options", "name gnc6_filename", "type string", - "preserve_case true", "in_record true", "tagged false", - "reader urword", "optional false"], - ["block options", "name mvr_filerecord", - "type record mvr6 filein mvr6_filename", "shape", "reader urword", - "tagged true", "optional true"], - ["block options", "name mvr6", "type keyword", "shape", - "in_record true", "reader urword", "tagged true", - "optional false"], - ["block options", "name mvr6_filename", "type string", - "preserve_case true", "in_record true", "tagged false", - "reader urword", "optional false"], - ["block options", "name obs_filerecord", - "type record obs6 filein obs6_filename", "shape", "reader urword", - "tagged true", "optional true", "construct_package obs", - "construct_data continuous", "parameter_name observations"], - ["block options", "name obs6", "type keyword", "shape", - "in_record true", "reader urword", "tagged true", - "optional false"], - ["block options", "name obs6_filename", "type string", - "preserve_case true", "in_record true", "tagged false", - "reader urword", "optional false"], - ["block dimensions", "name nexg", "type integer", - "reader urword", "optional false"], - ["block exchangedata", "name exchangedata", - "type recarray cellidm1 cellidm2 ihc cl1 cl2 hwva aux", - "reader urword", "optional false"], - ["block exchangedata", "name cellidm1", "type integer", - "in_record true", "tagged false", "reader urword", - "optional false", "numeric_index true"], - ["block exchangedata", "name cellidm2", "type integer", - "in_record true", "tagged false", "reader urword", - "optional false", "numeric_index true"], - ["block exchangedata", "name ihc", "type integer", - "in_record true", "tagged false", "reader urword", - "optional false"], - ["block exchangedata", "name cl1", "type double precision", - "in_record true", "tagged false", "reader urword", - "optional false"], - ["block exchangedata", "name cl2", "type double precision", - "in_record true", "tagged false", "reader urword", - "optional false"], - ["block exchangedata", "name hwva", "type double precision", - "in_record true", "tagged false", "reader urword", - "optional false"], - ["block exchangedata", "name aux", "type double precision", - "in_record true", "tagged false", "shape (naux)", "reader urword", - "optional true"]] - - def __init__(self, simulation, loading_package=False, exgtype=None, - exgmnamea=None, exgmnameb=None, auxiliary=None, - print_input=None, print_flows=None, save_flows=None, - cell_averaging=None, cvoptions=None, newton=None, - gnc_filerecord=None, mvr_filerecord=None, observations=None, - nexg=None, exchangedata=None, filename=None, pname=None, - parent_file=None): - super(ModflowGwfgwf, self).__init__(simulation, "gwfgwf", filename, pname, - loading_package, parent_file) - - # set up variables - self.exgtype = exgtype - - self.exgmnamea = exgmnamea - - self.exgmnameb = exgmnameb - - simulation.register_exchange_file(self) - - self.auxiliary = self.build_mfdata("auxiliary", auxiliary) - self.print_input = self.build_mfdata("print_input", print_input) - self.print_flows = self.build_mfdata("print_flows", print_flows) - self.save_flows = self.build_mfdata("save_flows", save_flows) - self.cell_averaging = self.build_mfdata("cell_averaging", - cell_averaging) - self.cvoptions = self.build_mfdata("cvoptions", cvoptions) - self.newton = self.build_mfdata("newton", newton) - self.gnc_filerecord = self.build_mfdata("gnc_filerecord", - gnc_filerecord) - self.mvr_filerecord = self.build_mfdata("mvr_filerecord", - mvr_filerecord) - self._obs_filerecord = self.build_mfdata("obs_filerecord", - None) - self._obs_package = self.build_child_package("obs", observations, - "continuous", - self._obs_filerecord) - self.nexg = self.build_mfdata("nexg", nexg) - self.exchangedata = self.build_mfdata("exchangedata", exchangedata) - self._init_complete = True +# DO NOT MODIFY THIS FILE DIRECTLY. THIS FILE MUST BE CREATED BY +# mf6/utils/createpackages.py +from .. import mfpackage +from ..data.mfdatautil import ListTemplateGenerator + + +class ModflowGwfgwf(mfpackage.MFPackage): + """ + ModflowGwfgwf defines a gwfgwf package. + + Parameters + ---------- + simulation : MFSimulation + Simulation that this package is a part of. Package is automatically + added to simulation when it is initialized. + loading_package : bool + Do not set this parameter. It is intended for debugging and internal + processing purposes only. + exgtype : + * is the exchange type (GWF-GWF or GWF-GWT). + exgmnamea : + * is the name of the first model that is part of this exchange. + exgmnameb : + * is the name of the second model that is part of this exchange. + auxiliary : [string] + * auxiliary (string) an array of auxiliary variable names. There is no + limit on the number of auxiliary variables that can be provided. Most + auxiliary variables will not be used by the GWF-GWF Exchange, but + they will be available for use by other parts of the program. If an + auxiliary variable with the name "ANGLDEGX" is found, then this + information will be used as the angle (provided in degrees) between + the connection face normal and the x axis, where a value of zero + indicates that a normal vector points directly along the positive x + axis. The connection face normal is a normal vector on the cell face + shared between the cell in model 1 and the cell in model 2 pointing + away from the model 1 cell. Additional information on "ANGLDEGX" is + provided in the description of the DISU Package. If an auxiliary + variable with the name "CDIST" is found, then this information will + be used as the straight-line connection distance, including the + vertical component, between the two cell centers. Both ANGLDEGX and + CDIST are required if specific discharge is calculated for either of + the groundwater models. + print_input : boolean + * print_input (boolean) keyword to indicate that the list of exchange + entries will be echoed to the listing file immediately after it is + read. + print_flows : boolean + * print_flows (boolean) keyword to indicate that the list of exchange + flow rates will be printed to the listing file for every stress + period in which "SAVE BUDGET" is specified in Output Control. + save_flows : boolean + * save_flows (boolean) keyword to indicate that cell-by-cell flow terms + will be written to the budget file for each model provided that the + Output Control for the models are set up with the "BUDGET SAVE FILE" + option. + cell_averaging : string + * cell_averaging (string) is a keyword and text keyword to indicate the + method that will be used for calculating the conductance for + horizontal cell connections. The text value for CELL_AVERAGING can be + "HARMONIC", "LOGARITHMIC", or "AMT-LMK", which means "arithmetic-mean + thickness and logarithmic-mean hydraulic conductivity". If the user + does not specify a value for CELL_AVERAGING, then the harmonic-mean + method will be used. + cvoptions : [dewatered] + * dewatered (string) If the DEWATERED keyword is specified, then the + vertical conductance is calculated using only the saturated thickness + and properties of the overlying cell if the head in the underlying + cell is below its top. + newton : boolean + * newton (boolean) keyword that activates the Newton-Raphson + formulation for groundwater flow between connected, convertible + groundwater cells. Cells will not dry when this option is used. + gnc_filerecord : [gnc6_filename] + * gnc6_filename (string) is the file name for ghost node correction + input file. Information for the ghost nodes are provided in the file + provided with these keywords. The format for specifying the ghost + nodes is the same as described for the GNC Package of the GWF Model. + This includes specifying OPTIONS, DIMENSIONS, and GNCDATA blocks. The + order of the ghost nodes must follow the same order as the order of + the cells in the EXCHANGEDATA block. For the GNCDATA, noden and all + of the nodej values are assumed to be located in model 1, and nodem + is assumed to be in model 2. + mvr_filerecord : [mvr6_filename] + * mvr6_filename (string) is the file name of the water mover input file + to apply to this exchange. Information for the water mover are + provided in the file provided with these keywords. The format for + specifying the water mover information is the same as described for + the Water Mover (MVR) Package of the GWF Model, with two exceptions. + First, in the PACKAGES block, the model name must be included as a + separate string before each package. Second, the appropriate model + name must be included before package name 1 and package name 2 in the + BEGIN PERIOD block. This allows providers and receivers to be located + in both models listed as part of this exchange. + observations : {varname:data} or continuous data + * Contains data for the obs package. Data can be stored in a dictionary + containing data for the obs package with variable names as keys and + package data as values. Data just for the observations variable is + also acceptable. See obs package documentation for more information. + nexg : integer + * nexg (integer) keyword and integer value specifying the number of + GWF-GWF exchanges. + exchangedata : [cellidm1, cellidm2, ihc, cl1, cl2, hwva, aux] + * cellidm1 ((integer, ...)) is the cellid of the cell in model 1 as + specified in the simulation name file. For a structured grid that + uses the DIS input file, CELLIDM1 is the layer, row, and column + numbers of the cell. For a grid that uses the DISV input file, + CELLIDM1 is the layer number and CELL2D number for the two cells. If + the model uses the unstructured discretization (DISU) input file, + then CELLIDM1 is the node number for the cell. This argument is an + index variable, which means that it should be treated as zero-based + when working with FloPy and Python. Flopy will automatically subtract + one when loading index variables and add one when writing index + variables. + * cellidm2 ((integer, ...)) is the cellid of the cell in model 2 as + specified in the simulation name file. For a structured grid that + uses the DIS input file, CELLIDM2 is the layer, row, and column + numbers of the cell. For a grid that uses the DISV input file, + CELLIDM2 is the layer number and CELL2D number for the two cells. If + the model uses the unstructured discretization (DISU) input file, + then CELLIDM2 is the node number for the cell. This argument is an + index variable, which means that it should be treated as zero-based + when working with FloPy and Python. Flopy will automatically subtract + one when loading index variables and add one when writing index + variables. + * ihc (integer) is an integer flag indicating the direction between + node n and all of its m connections. If IHC = 0 then the connection + is vertical. If IHC = 1 then the connection is horizontal. If IHC = 2 + then the connection is horizontal for a vertically staggered grid. + * cl1 (double) is the distance between the center of cell 1 and the its + shared face with cell 2. + * cl2 (double) is the distance between the center of cell 2 and the its + shared face with cell 1. + * hwva (double) is the horizontal width of the flow connection between + cell 1 and cell 2 if IHC :math:`>` 0, or it is the area perpendicular + to flow of the vertical connection between cell 1 and cell 2 if IHC = + 0. + * aux (double) represents the values of the auxiliary variables for + each GWFGWF Exchange. The values of auxiliary variables must be + present for each exchange. The values must be specified in the order + of the auxiliary variables specified in the OPTIONS block. + filename : String + File name for this package. + pname : String + Package name for this package. + parent_file : MFPackage + Parent package file that references this package. Only needed for + utility packages (mfutl*). For example, mfutllaktab package must have + a mfgwflak package parent_file. + + """ + auxiliary = ListTemplateGenerator(('gwfgwf', 'options', 'auxiliary')) + gnc_filerecord = ListTemplateGenerator(('gwfgwf', 'options', + 'gnc_filerecord')) + mvr_filerecord = ListTemplateGenerator(('gwfgwf', 'options', + 'mvr_filerecord')) + obs_filerecord = ListTemplateGenerator(('gwfgwf', 'options', + 'obs_filerecord')) + exchangedata = ListTemplateGenerator(('gwfgwf', 'exchangedata', + 'exchangedata')) + package_abbr = "gwfgwf" + _package_type = "gwfgwf" + dfn_file_name = "exg-gwfgwf.dfn" + + dfn = [["block options", "name auxiliary", "type string", + "shape (naux)", "reader urword", "optional true"], + ["block options", "name print_input", "type keyword", + "reader urword", "optional true"], + ["block options", "name print_flows", "type keyword", + "reader urword", "optional true"], + ["block options", "name save_flows", "type keyword", + "reader urword", "optional true"], + ["block options", "name cell_averaging", "type string", + "valid harmonic logarithmic amt-lmk", "reader urword", + "optional true"], + ["block options", "name cvoptions", + "type record variablecv dewatered", "reader urword", + "optional true"], + ["block options", "name variablecv", "in_record true", + "type keyword", "reader urword"], + ["block options", "name dewatered", "in_record true", + "type keyword", "reader urword", "optional true"], + ["block options", "name newton", "type keyword", "reader urword", + "optional true"], + ["block options", "name gnc_filerecord", + "type record gnc6 filein gnc6_filename", "shape", "reader urword", + "tagged true", "optional true"], + ["block options", "name filein", "type keyword", "shape", + "in_record true", "reader urword", "tagged true", + "optional false"], + ["block options", "name gnc6", "type keyword", "shape", + "in_record true", "reader urword", "tagged true", + "optional false"], + ["block options", "name gnc6_filename", "type string", + "preserve_case true", "in_record true", "tagged false", + "reader urword", "optional false"], + ["block options", "name mvr_filerecord", + "type record mvr6 filein mvr6_filename", "shape", "reader urword", + "tagged true", "optional true"], + ["block options", "name mvr6", "type keyword", "shape", + "in_record true", "reader urword", "tagged true", + "optional false"], + ["block options", "name mvr6_filename", "type string", + "preserve_case true", "in_record true", "tagged false", + "reader urword", "optional false"], + ["block options", "name obs_filerecord", + "type record obs6 filein obs6_filename", "shape", "reader urword", + "tagged true", "optional true", "construct_package obs", + "construct_data continuous", "parameter_name observations"], + ["block options", "name obs6", "type keyword", "shape", + "in_record true", "reader urword", "tagged true", + "optional false"], + ["block options", "name obs6_filename", "type string", + "preserve_case true", "in_record true", "tagged false", + "reader urword", "optional false"], + ["block dimensions", "name nexg", "type integer", + "reader urword", "optional false"], + ["block exchangedata", "name exchangedata", + "type recarray cellidm1 cellidm2 ihc cl1 cl2 hwva aux", + "reader urword", "optional false"], + ["block exchangedata", "name cellidm1", "type integer", + "in_record true", "tagged false", "reader urword", + "optional false", "numeric_index true"], + ["block exchangedata", "name cellidm2", "type integer", + "in_record true", "tagged false", "reader urword", + "optional false", "numeric_index true"], + ["block exchangedata", "name ihc", "type integer", + "in_record true", "tagged false", "reader urword", + "optional false"], + ["block exchangedata", "name cl1", "type double precision", + "in_record true", "tagged false", "reader urword", + "optional false"], + ["block exchangedata", "name cl2", "type double precision", + "in_record true", "tagged false", "reader urword", + "optional false"], + ["block exchangedata", "name hwva", "type double precision", + "in_record true", "tagged false", "reader urword", + "optional false"], + ["block exchangedata", "name aux", "type double precision", + "in_record true", "tagged false", "shape (naux)", "reader urword", + "optional true"]] + + def __init__(self, simulation, loading_package=False, exgtype=None, + exgmnamea=None, exgmnameb=None, auxiliary=None, + print_input=None, print_flows=None, save_flows=None, + cell_averaging=None, cvoptions=None, newton=None, + gnc_filerecord=None, mvr_filerecord=None, observations=None, + nexg=None, exchangedata=None, filename=None, pname=None, + parent_file=None): + super(ModflowGwfgwf, self).__init__(simulation, "gwfgwf", filename, pname, + loading_package, parent_file) + + # set up variables + self.exgtype = exgtype + + self.exgmnamea = exgmnamea + + self.exgmnameb = exgmnameb + + simulation.register_exchange_file(self) + + self.auxiliary = self.build_mfdata("auxiliary", auxiliary) + self.print_input = self.build_mfdata("print_input", print_input) + self.print_flows = self.build_mfdata("print_flows", print_flows) + self.save_flows = self.build_mfdata("save_flows", save_flows) + self.cell_averaging = self.build_mfdata("cell_averaging", + cell_averaging) + self.cvoptions = self.build_mfdata("cvoptions", cvoptions) + self.newton = self.build_mfdata("newton", newton) + self.gnc_filerecord = self.build_mfdata("gnc_filerecord", + gnc_filerecord) + self.mvr_filerecord = self.build_mfdata("mvr_filerecord", + mvr_filerecord) + self._obs_filerecord = self.build_mfdata("obs_filerecord", + None) + self._obs_package = self.build_child_package("obs", observations, + "continuous", + self._obs_filerecord) + self.nexg = self.build_mfdata("nexg", nexg) + self.exchangedata = self.build_mfdata("exchangedata", exchangedata) + self._init_complete = True diff --git a/flopy/mf6/modflow/mfgwfhfb.py b/flopy/mf6/modflow/mfgwfhfb.py index b166c85dd6..b2453b9579 100644 --- a/flopy/mf6/modflow/mfgwfhfb.py +++ b/flopy/mf6/modflow/mfgwfhfb.py @@ -1,103 +1,103 @@ -# DO NOT MODIFY THIS FILE DIRECTLY. THIS FILE MUST BE CREATED BY -# mf6/utils/createpackages.py -from .. import mfpackage -from ..data.mfdatautil import ListTemplateGenerator - - -class ModflowGwfhfb(mfpackage.MFPackage): - """ - ModflowGwfhfb defines a hfb package within a gwf6 model. - - Parameters - ---------- - model : MFModel - Model that this package is a part of. Package is automatically - added to model when it is initialized. - loading_package : bool - Do not set this parameter. It is intended for debugging and internal - processing purposes only. - print_input : boolean - * print_input (boolean) keyword to indicate that the list of horizontal - flow barriers will be written to the listing file immediately after - it is read. - maxhfb : integer - * maxhfb (integer) integer value specifying the maximum number of - horizontal flow barriers that will be entered in this input file. The - value of MAXHFB is used to allocate memory for the horizontal flow - barriers. - stress_period_data : [cellid1, cellid2, hydchr] - * cellid1 ((integer, ...)) identifier for the first cell. For a - structured grid that uses the DIS input file, CELLID1 is the layer, - row, and column numbers of the cell. For a grid that uses the DISV - input file, CELLID1 is the layer number and CELL2D number for the two - cells. If the model uses the unstructured discretization (DISU) input - file, then CELLID1 is the node numbers for the cell. The barrier is - located between cells designated as CELLID1 and CELLID2. For models - that use the DIS and DISV grid types, the layer number for CELLID1 - and CELLID2 must be the same. For all grid types, cells must be - horizontally adjacent or the program will terminate with an error. - This argument is an index variable, which means that it should be - treated as zero-based when working with FloPy and Python. Flopy will - automatically subtract one when loading index variables and add one - when writing index variables. - * cellid2 ((integer, ...)) identifier for the second cell. See CELLID1 - for description of how to specify. This argument is an index - variable, which means that it should be treated as zero-based when - working with FloPy and Python. Flopy will automatically subtract one - when loading index variables and add one when writing index - variables. - * hydchr (double) is the hydraulic characteristic of the horizontal- - flow barrier. The hydraulic characteristic is the barrier hydraulic - conductivity divided by the width of the horizontal-flow barrier. If - the hydraulic characteristic is negative, then the absolute value of - HYDCHR acts as a multiplier to the conductance between the two model - cells specified as containing the barrier. For example, if the value - for HYDCHR was specified as -1.5, the conductance calculated for the - two cells would be multiplied by 1.5. - filename : String - File name for this package. - pname : String - Package name for this package. - parent_file : MFPackage - Parent package file that references this package. Only needed for - utility packages (mfutl*). For example, mfutllaktab package must have - a mfgwflak package parent_file. - - """ - stress_period_data = ListTemplateGenerator(('gwf6', 'hfb', 'period', - 'stress_period_data')) - package_abbr = "gwfhfb" - _package_type = "hfb" - dfn_file_name = "gwf-hfb.dfn" - - dfn = [["block options", "name print_input", "type keyword", - "reader urword", "optional true"], - ["block dimensions", "name maxhfb", "type integer", - "reader urword", "optional false"], - ["block period", "name iper", "type integer", - "block_variable True", "in_record true", "tagged false", "shape", - "valid", "reader urword", "optional false"], - ["block period", "name stress_period_data", - "type recarray cellid1 cellid2 hydchr", "shape (maxhfb)", - "reader urword"], - ["block period", "name cellid1", "type integer", - "shape (ncelldim)", "tagged false", "in_record true", - "reader urword"], - ["block period", "name cellid2", "type integer", - "shape (ncelldim)", "tagged false", "in_record true", - "reader urword"], - ["block period", "name hydchr", "type double precision", "shape", - "tagged false", "in_record true", "reader urword"]] - - def __init__(self, model, loading_package=False, print_input=None, - maxhfb=None, stress_period_data=None, filename=None, - pname=None, parent_file=None): - super(ModflowGwfhfb, self).__init__(model, "hfb", filename, pname, - loading_package, parent_file) - - # set up variables - self.print_input = self.build_mfdata("print_input", print_input) - self.maxhfb = self.build_mfdata("maxhfb", maxhfb) - self.stress_period_data = self.build_mfdata("stress_period_data", - stress_period_data) - self._init_complete = True +# DO NOT MODIFY THIS FILE DIRECTLY. THIS FILE MUST BE CREATED BY +# mf6/utils/createpackages.py +from .. import mfpackage +from ..data.mfdatautil import ListTemplateGenerator + + +class ModflowGwfhfb(mfpackage.MFPackage): + """ + ModflowGwfhfb defines a hfb package within a gwf6 model. + + Parameters + ---------- + model : MFModel + Model that this package is a part of. Package is automatically + added to model when it is initialized. + loading_package : bool + Do not set this parameter. It is intended for debugging and internal + processing purposes only. + print_input : boolean + * print_input (boolean) keyword to indicate that the list of horizontal + flow barriers will be written to the listing file immediately after + it is read. + maxhfb : integer + * maxhfb (integer) integer value specifying the maximum number of + horizontal flow barriers that will be entered in this input file. The + value of MAXHFB is used to allocate memory for the horizontal flow + barriers. + stress_period_data : [cellid1, cellid2, hydchr] + * cellid1 ((integer, ...)) identifier for the first cell. For a + structured grid that uses the DIS input file, CELLID1 is the layer, + row, and column numbers of the cell. For a grid that uses the DISV + input file, CELLID1 is the layer number and CELL2D number for the two + cells. If the model uses the unstructured discretization (DISU) input + file, then CELLID1 is the node numbers for the cell. The barrier is + located between cells designated as CELLID1 and CELLID2. For models + that use the DIS and DISV grid types, the layer number for CELLID1 + and CELLID2 must be the same. For all grid types, cells must be + horizontally adjacent or the program will terminate with an error. + This argument is an index variable, which means that it should be + treated as zero-based when working with FloPy and Python. Flopy will + automatically subtract one when loading index variables and add one + when writing index variables. + * cellid2 ((integer, ...)) identifier for the second cell. See CELLID1 + for description of how to specify. This argument is an index + variable, which means that it should be treated as zero-based when + working with FloPy and Python. Flopy will automatically subtract one + when loading index variables and add one when writing index + variables. + * hydchr (double) is the hydraulic characteristic of the horizontal- + flow barrier. The hydraulic characteristic is the barrier hydraulic + conductivity divided by the width of the horizontal-flow barrier. If + the hydraulic characteristic is negative, then the absolute value of + HYDCHR acts as a multiplier to the conductance between the two model + cells specified as containing the barrier. For example, if the value + for HYDCHR was specified as -1.5, the conductance calculated for the + two cells would be multiplied by 1.5. + filename : String + File name for this package. + pname : String + Package name for this package. + parent_file : MFPackage + Parent package file that references this package. Only needed for + utility packages (mfutl*). For example, mfutllaktab package must have + a mfgwflak package parent_file. + + """ + stress_period_data = ListTemplateGenerator(('gwf6', 'hfb', 'period', + 'stress_period_data')) + package_abbr = "gwfhfb" + _package_type = "hfb" + dfn_file_name = "gwf-hfb.dfn" + + dfn = [["block options", "name print_input", "type keyword", + "reader urword", "optional true"], + ["block dimensions", "name maxhfb", "type integer", + "reader urword", "optional false"], + ["block period", "name iper", "type integer", + "block_variable True", "in_record true", "tagged false", "shape", + "valid", "reader urword", "optional false"], + ["block period", "name stress_period_data", + "type recarray cellid1 cellid2 hydchr", "shape (maxhfb)", + "reader urword"], + ["block period", "name cellid1", "type integer", + "shape (ncelldim)", "tagged false", "in_record true", + "reader urword"], + ["block period", "name cellid2", "type integer", + "shape (ncelldim)", "tagged false", "in_record true", + "reader urword"], + ["block period", "name hydchr", "type double precision", "shape", + "tagged false", "in_record true", "reader urword"]] + + def __init__(self, model, loading_package=False, print_input=None, + maxhfb=None, stress_period_data=None, filename=None, + pname=None, parent_file=None): + super(ModflowGwfhfb, self).__init__(model, "hfb", filename, pname, + loading_package, parent_file) + + # set up variables + self.print_input = self.build_mfdata("print_input", print_input) + self.maxhfb = self.build_mfdata("maxhfb", maxhfb) + self.stress_period_data = self.build_mfdata("stress_period_data", + stress_period_data) + self._init_complete = True diff --git a/flopy/mf6/modflow/mfgwflak.py b/flopy/mf6/modflow/mfgwflak.py index cdf3da93f6..1619f5c3a1 100644 --- a/flopy/mf6/modflow/mfgwflak.py +++ b/flopy/mf6/modflow/mfgwflak.py @@ -1,697 +1,697 @@ -# DO NOT MODIFY THIS FILE DIRECTLY. THIS FILE MUST BE CREATED BY -# mf6/utils/createpackages.py -from .. import mfpackage -from ..data.mfdatautil import ListTemplateGenerator - - -class ModflowGwflak(mfpackage.MFPackage): - """ - ModflowGwflak defines a lak package within a gwf6 model. - - Parameters - ---------- - model : MFModel - Model that this package is a part of. Package is automatically - added to model when it is initialized. - loading_package : bool - Do not set this parameter. It is intended for debugging and internal - processing purposes only. - auxiliary : [string] - * auxiliary (string) defines an array of one or more auxiliary variable - names. There is no limit on the number of auxiliary variables that - can be provided on this line; however, lists of information provided - in subsequent blocks must have a column of data for each auxiliary - variable name defined here. The number of auxiliary variables - detected on this line determines the value for naux. Comments cannot - be provided anywhere on this line as they will be interpreted as - auxiliary variable names. Auxiliary variables may not be used by the - package, but they will be available for use by other parts of the - program. The program will terminate with an error if auxiliary - variables are specified on more than one line in the options block. - boundnames : boolean - * boundnames (boolean) keyword to indicate that boundary names may be - provided with the list of lake cells. - print_input : boolean - * print_input (boolean) keyword to indicate that the list of lake - information will be written to the listing file immediately after it - is read. - print_stage : boolean - * print_stage (boolean) keyword to indicate that the list of lake - stages will be printed to the listing file for every stress period in - which "HEAD PRINT" is specified in Output Control. If there is no - Output Control option and PRINT_STAGE is specified, then stages are - printed for the last time step of each stress period. - print_flows : boolean - * print_flows (boolean) keyword to indicate that the list of lake flow - rates will be printed to the listing file for every stress period - time step in which "BUDGET PRINT" is specified in Output Control. If - there is no Output Control option and "PRINT_FLOWS" is specified, - then flow rates are printed for the last time step of each stress - period. - save_flows : boolean - * save_flows (boolean) keyword to indicate that lake flow terms will be - written to the file specified with "BUDGET FILEOUT" in Output - Control. - stage_filerecord : [stagefile] - * stagefile (string) name of the binary output file to write stage - information. - budget_filerecord : [budgetfile] - * budgetfile (string) name of the binary output file to write budget - information. - timeseries : {varname:data} or timeseries data - * Contains data for the ts package. Data can be stored in a dictionary - containing data for the ts package with variable names as keys and - package data as values. Data just for the timeseries variable is also - acceptable. See ts package documentation for more information. - observations : {varname:data} or continuous data - * Contains data for the obs package. Data can be stored in a dictionary - containing data for the obs package with variable names as keys and - package data as values. Data just for the observations variable is - also acceptable. See obs package documentation for more information. - mover : boolean - * mover (boolean) keyword to indicate that this instance of the LAK - Package can be used with the Water Mover (MVR) Package. When the - MOVER option is specified, additional memory is allocated within the - package to store the available, provided, and received water. - surfdep : double - * surfdep (double) real value that defines the surface depression depth - for VERTICAL lake-GWF connections. If specified, SURFDEP must be - greater than or equal to zero. If SURFDEP is not specified, a default - value of zero is used for all vertical lake-GWF connections. - time_conversion : double - * time_conversion (double) value that is used in converting outlet flow - terms that use Manning's equation or gravitational acceleration to - consistent time units. TIME_CONVERSION should be set to 1.0, 60.0, - 3,600.0, 86,400.0, and 31,557,600.0 when using time units - (TIME_UNITS) of seconds, minutes, hours, days, or years in the - simulation, respectively. CONVTIME does not need to be specified if - no lake outlets are specified or TIME_UNITS are seconds. - length_conversion : double - * length_conversion (double) real value that is used in converting - outlet flow terms that use Manning's equation or gravitational - acceleration to consistent length units. LENGTH_CONVERSION should be - set to 3.28081, 1.0, and 100.0 when using length units (LENGTH_UNITS) - of feet, meters, or centimeters in the simulation, respectively. - LENGTH_CONVERSION does not need to be specified if no lake outlets - are specified or LENGTH_UNITS are meters. - nlakes : integer - * nlakes (integer) value specifying the number of lakes that will be - simulated for all stress periods. - noutlets : integer - * noutlets (integer) value specifying the number of outlets that will - be simulated for all stress periods. If NOUTLETS is not specified, a - default value of zero is used. - ntables : integer - * ntables (integer) value specifying the number of lakes tables that - will be used to define the lake stage, volume relation, and surface - area. If NTABLES is not specified, a default value of zero is used. - packagedata : [lakeno, strt, nlakeconn, aux, boundname] - * lakeno (integer) integer value that defines the lake number - associated with the specified PACKAGEDATA data on the line. LAKENO - must be greater than zero and less than or equal to NLAKES. Lake - information must be specified for every lake or the program will - terminate with an error. The program will also terminate with an - error if information for a lake is specified more than once. This - argument is an index variable, which means that it should be treated - as zero-based when working with FloPy and Python. Flopy will - automatically subtract one when loading index variables and add one - when writing index variables. - * strt (double) real value that defines the starting stage for the - lake. - * nlakeconn (integer) integer value that defines the number of GWF - cells connected to this (LAKENO) lake. There can only be one vertical - lake connection to each GWF cell. NLAKECONN must be greater than - zero. - * aux (double) represents the values of the auxiliary variables for - each lake. The values of auxiliary variables must be present for each - lake. The values must be specified in the order of the auxiliary - variables specified in the OPTIONS block. If the package supports - time series and the Options block includes a TIMESERIESFILE entry - (see the "Time-Variable Input" section), values can be obtained from - a time series by entering the time-series name in place of a numeric - value. - * boundname (string) name of the lake cell. BOUNDNAME is an ASCII - character variable that can contain as many as 40 characters. If - BOUNDNAME contains spaces in it, then the entire name must be - enclosed within single quotes. - connectiondata : [lakeno, iconn, cellid, claktype, bedleak, belev, telev, - connlen, connwidth] - * lakeno (integer) integer value that defines the lake number - associated with the specified CONNECTIONDATA data on the line. LAKENO - must be greater than zero and less than or equal to NLAKES. Lake - connection information must be specified for every lake connection to - the GWF model (NLAKECONN) or the program will terminate with an - error. The program will also terminate with an error if connection - information for a lake connection to the GWF model is specified more - than once. This argument is an index variable, which means that it - should be treated as zero-based when working with FloPy and Python. - Flopy will automatically subtract one when loading index variables - and add one when writing index variables. - * iconn (integer) integer value that defines the GWF connection number - for this lake connection entry. ICONN must be greater than zero and - less than or equal to NLAKECONN for lake LAKENO. This argument is an - index variable, which means that it should be treated as zero-based - when working with FloPy and Python. Flopy will automatically subtract - one when loading index variables and add one when writing index - variables. - * cellid ((integer, ...)) is the cell identifier, and depends on the - type of grid that is used for the simulation. For a structured grid - that uses the DIS input file, CELLID is the layer, row, and column. - For a grid that uses the DISV input file, CELLID is the layer and - CELL2D number. If the model uses the unstructured discretization - (DISU) input file, CELLID is the node number for the cell. This - argument is an index variable, which means that it should be treated - as zero-based when working with FloPy and Python. Flopy will - automatically subtract one when loading index variables and add one - when writing index variables. - * claktype (string) character string that defines the lake-GWF - connection type for the lake connection. Possible lake-GWF connection - type strings include: VERTICAL--character keyword to indicate the - lake-GWF connection is vertical and connection conductance - calculations use the hydraulic conductivity corresponding to the - :math:`K_{33}` tensor component defined for CELLID in the NPF - package. HORIZONTAL--character keyword to indicate the lake-GWF - connection is horizontal and connection conductance calculations use - the hydraulic conductivity corresponding to the :math:`K_{11}` tensor - component defined for CELLID in the NPF package. EMBEDDEDH--character - keyword to indicate the lake-GWF connection is embedded in a single - cell and connection conductance calculations use the hydraulic - conductivity corresponding to the :math:`K_{11}` tensor component - defined for CELLID in the NPF package. EMBEDDEDV--character keyword - to indicate the lake-GWF connection is embedded in a single cell and - connection conductance calculations use the hydraulic conductivity - corresponding to the :math:`K_{33}` tensor component defined for - CELLID in the NPF package. Embedded lakes can only be connected to a - single cell (NLAKECONN = 1) and there must be a lake table associated - with each embedded lake. - * bedleak (double) character string or real value that defines the bed - leakance for the lake-GWF connection. BEDLEAK must be greater than or - equal to zero or specified to be NONE. If BEDLEAK is specified to be - NONE, the lake-GWF connection conductance is solely a function of - aquifer properties in the connected GWF cell and lakebed sediments - are assumed to be absent. - * belev (double) real value that defines the bottom elevation for a - HORIZONTAL lake-GWF connection. Any value can be specified if - CLAKTYPE is VERTICAL, EMBEDDEDH, or EMBEDDEDV. If CLAKTYPE is - HORIZONTAL and BELEV is not equal to TELEV, BELEV must be greater - than or equal to the bottom of the GWF cell CELLID. If BELEV is equal - to TELEV, BELEV is reset to the bottom of the GWF cell CELLID. - * telev (double) real value that defines the top elevation for a - HORIZONTAL lake-GWF connection. Any value can be specified if - CLAKTYPE is VERTICAL, EMBEDDEDH, or EMBEDDEDV. If CLAKTYPE is - HORIZONTAL and TELEV is not equal to BELEV, TELEV must be less than - or equal to the top of the GWF cell CELLID. If TELEV is equal to - BELEV, TELEV is reset to the top of the GWF cell CELLID. - * connlen (double) real value that defines the distance between the - connected GWF CELLID node and the lake for a HORIZONTAL, EMBEDDEDH, - or EMBEDDEDV lake-GWF connection. CONLENN must be greater than zero - for a HORIZONTAL, EMBEDDEDH, or EMBEDDEDV lake-GWF connection. Any - value can be specified if CLAKTYPE is VERTICAL. - * connwidth (double) real value that defines the connection face width - for a HORIZONTAL lake-GWF connection. CONNWIDTH must be greater than - zero for a HORIZONTAL lake-GWF connection. Any value can be specified - if CLAKTYPE is VERTICAL, EMBEDDEDH, or EMBEDDEDV. - tables : [lakeno, tab6_filename] - * lakeno (integer) integer value that defines the lake number - associated with the specified TABLES data on the line. LAKENO must be - greater than zero and less than or equal to NLAKES. The program will - terminate with an error if table information for a lake is specified - more than once or the number of specified tables is less than - NTABLES. This argument is an index variable, which means that it - should be treated as zero-based when working with FloPy and Python. - Flopy will automatically subtract one when loading index variables - and add one when writing index variables. - * tab6_filename (string) character string that defines the path and - filename for the file containing lake table data for the lake - connection. The CTABNAME file includes the number of entries in the - file and the relation between stage, surface area, and volume for - each entry in the file. Lake table files for EMBEDDEDH and EMBEDDEDV - lake-GWF connections also include lake-GWF exchange area data for - each entry in the file. Input instructions for the CTABNAME file is - included at the LAK package lake table file input instructions - section. - outlets : [outletno, lakein, lakeout, couttype, invert, width, rough, - slope] - * outletno (integer) integer value that defines the outlet number - associated with the specified OUTLETS data on the line. OUTLETNO must - be greater than zero and less than or equal to NOUTLETS. Outlet - information must be specified for every outlet or the program will - terminate with an error. The program will also terminate with an - error if information for a outlet is specified more than once. This - argument is an index variable, which means that it should be treated - as zero-based when working with FloPy and Python. Flopy will - automatically subtract one when loading index variables and add one - when writing index variables. - * lakein (integer) integer value that defines the lake number that - outlet is connected to. LAKEIN must be greater than zero and less - than or equal to NLAKES. This argument is an index variable, which - means that it should be treated as zero-based when working with FloPy - and Python. Flopy will automatically subtract one when loading index - variables and add one when writing index variables. - * lakeout (integer) integer value that defines the lake number that - outlet discharge from lake outlet OUTLETNO is routed to. LAKEOUT must - be greater than or equal to zero and less than or equal to NLAKES. If - LAKEOUT is zero, outlet discharge from lake outlet OUTLETNO is - discharged to an external boundary. This argument is an index - variable, which means that it should be treated as zero-based when - working with FloPy and Python. Flopy will automatically subtract one - when loading index variables and add one when writing index - variables. - * couttype (string) character string that defines the outlet type for - the outlet OUTLETNO. Possible COUTTYPE strings include: SPECIFIED-- - character keyword to indicate the outlet is defined as a specified - flow. MANNING--character keyword to indicate the outlet is defined - using Manning's equation. WEIR--character keyword to indicate the - outlet is defined using a sharp weir equation. - * invert (double) real value that defines the invert elevation for the - lake outlet. Any value can be specified if COUTTYPE is SPECIFIED. If - the Options block includes a TIMESERIESFILE entry (see the "Time- - Variable Input" section), values can be obtained from a time series - by entering the time-series name in place of a numeric value. - * width (double) real value that defines the width of the lake outlet. - Any value can be specified if COUTTYPE is SPECIFIED. If the Options - block includes a TIMESERIESFILE entry (see the "Time-Variable Input" - section), values can be obtained from a time series by entering the - time-series name in place of a numeric value. - * rough (double) real value that defines the roughness coefficient for - the lake outlet. Any value can be specified if COUTTYPE is not - MANNING. If the Options block includes a TIMESERIESFILE entry (see - the "Time-Variable Input" section), values can be obtained from a - time series by entering the time-series name in place of a numeric - value. - * slope (double) real value that defines the bed slope for the lake - outlet. Any value can be specified if COUTTYPE is not MANNING. If the - Options block includes a TIMESERIESFILE entry (see the "Time-Variable - Input" section), values can be obtained from a time series by - entering the time-series name in place of a numeric value. - perioddata : [number, laksetting] - * number (integer) integer value that defines the lake or outlet number - associated with the specified PERIOD data on the line. NUMBER must be - greater than zero and less than or equal to NLAKES for a lake number - and less than or equal to NOUTLETS for an outlet number. This - argument is an index variable, which means that it should be treated - as zero-based when working with FloPy and Python. Flopy will - automatically subtract one when loading index variables and add one - when writing index variables. - * laksetting (keystring) line of information that is parsed into a - keyword and values. Keyword values that can be used to start the - LAKSETTING string include both keywords for lake settings and - keywords for outlet settings. Keywords for lake settings include: - STATUS, STAGE, RAINFALL, EVAPORATION, RUNOFF, INFLOW, WITHDRAWAL, and - AUXILIARY. Keywords for outlet settings include RATE, INVERT, WIDTH, - SLOPE, and ROUGH. - status : [string] - * status (string) keyword option to define lake status. STATUS - can be ACTIVE, INACTIVE, or CONSTANT. By default, STATUS is - ACTIVE. - stage : [string] - * stage (string) real or character value that defines the stage - for the lake. The specified STAGE is only applied if the lake - is a constant stage lake. If the Options block includes a - TIMESERIESFILE entry (see the "Time-Variable Input" section), - values can be obtained from a time series by entering the - time-series name in place of a numeric value. - rainfall : [string] - * rainfall (string) real or character value that defines the - rainfall rate :math:`(LT^{-1})` for the lake. Value must be - greater than or equal to zero. If the Options block includes - a TIMESERIESFILE entry (see the "Time-Variable Input" - section), values can be obtained from a time series by - entering the time-series name in place of a numeric value. - evaporation : [string] - * evaporation (string) real or character value that defines the - maximum evaporation rate :math:`(LT^{-1})` for the lake. - Value must be greater than or equal to zero. If the Options - block includes a TIMESERIESFILE entry (see the "Time-Variable - Input" section), values can be obtained from a time series by - entering the time-series name in place of a numeric value. - runoff : [string] - * runoff (string) real or character value that defines the - runoff rate :math:`(L^3 T^{-1})` for the lake. Value must be - greater than or equal to zero. If the Options block includes - a TIMESERIESFILE entry (see the "Time-Variable Input" - section), values can be obtained from a time series by - entering the time-series name in place of a numeric value. - inflow : [string] - * inflow (string) real or character value that defines the - volumetric inflow rate :math:`(L^3 T^{-1})` for the lake. - Value must be greater than or equal to zero. If the Options - block includes a TIMESERIESFILE entry (see the "Time-Variable - Input" section), values can be obtained from a time series by - entering the time-series name in place of a numeric value. By - default, inflow rates are zero for each lake. - withdrawal : [string] - * withdrawal (string) real or character value that defines the - maximum withdrawal rate :math:`(L^3 T^{-1})` for the lake. - Value must be greater than or equal to zero. If the Options - block includes a TIMESERIESFILE entry (see the "Time-Variable - Input" section), values can be obtained from a time series by - entering the time-series name in place of a numeric value. - rate : [string] - * rate (string) real or character value that defines the - extraction rate for the lake outflow. A positive value - indicates inflow and a negative value indicates outflow from - the lake. RATE only applies to active (IBOUND :math:`>` 0) - lakes. A specified RATE is only applied if COUTTYPE for the - OUTLETNO is SPECIFIED. If the Options block includes a - TIMESERIESFILE entry (see the "Time-Variable Input" section), - values can be obtained from a time series by entering the - time-series name in place of a numeric value. By default, the - RATE for each SPECIFIED lake outlet is zero. - invert : [string] - * invert (string) real or character value that defines the - invert elevation for the lake outlet. A specified INVERT - value is only used for active lakes if COUTTYPE for lake - outlet OUTLETNO is not SPECIFIED. If the Options block - includes a TIMESERIESFILE entry (see the "Time-Variable - Input" section), values can be obtained from a time series by - entering the time-series name in place of a numeric value. - width : [string] - * width (string) real or character value that defines the width - of the lake outlet. A specified WIDTH value is only used for - active lakes if COUTTYPE for lake outlet OUTLETNO is not - SPECIFIED. If the Options block includes a TIMESERIESFILE - entry (see the "Time-Variable Input" section), values can be - obtained from a time series by entering the time-series name - in place of a numeric value. - slope : [string] - * slope (string) real or character value that defines the bed - slope for the lake outlet. A specified SLOPE value is only - used for active lakes if COUTTYPE for lake outlet OUTLETNO is - MANNING. If the Options block includes a TIMESERIESFILE entry - (see the "Time-Variable Input" section), values can be - obtained from a time series by entering the time-series name - in place of a numeric value. - rough : [string] - * rough (string) real value that defines the roughness - coefficient for the lake outlet. Any value can be specified - if COUTTYPE is not MANNING. If the Options block includes a - TIMESERIESFILE entry (see the "Time-Variable Input" section), - values can be obtained from a time series by entering the - time-series name in place of a numeric value. - auxiliaryrecord : [auxname, auxval] - * auxname (string) name for the auxiliary variable to be - assigned AUXVAL. AUXNAME must match one of the auxiliary - variable names defined in the OPTIONS block. If AUXNAME does - not match one of the auxiliary variable names defined in the - OPTIONS block the data are ignored. - * auxval (double) value for the auxiliary variable. If the - Options block includes a TIMESERIESFILE entry (see the "Time- - Variable Input" section), values can be obtained from a time - series by entering the time-series name in place of a numeric - value. - filename : String - File name for this package. - pname : String - Package name for this package. - parent_file : MFPackage - Parent package file that references this package. Only needed for - utility packages (mfutl*). For example, mfutllaktab package must have - a mfgwflak package parent_file. - - """ - auxiliary = ListTemplateGenerator(('gwf6', 'lak', 'options', - 'auxiliary')) - stage_filerecord = ListTemplateGenerator(('gwf6', 'lak', 'options', - 'stage_filerecord')) - budget_filerecord = ListTemplateGenerator(('gwf6', 'lak', 'options', - 'budget_filerecord')) - ts_filerecord = ListTemplateGenerator(('gwf6', 'lak', 'options', - 'ts_filerecord')) - obs_filerecord = ListTemplateGenerator(('gwf6', 'lak', 'options', - 'obs_filerecord')) - packagedata = ListTemplateGenerator(('gwf6', 'lak', 'packagedata', - 'packagedata')) - connectiondata = ListTemplateGenerator(('gwf6', 'lak', - 'connectiondata', - 'connectiondata')) - tables = ListTemplateGenerator(('gwf6', 'lak', 'tables', 'tables')) - outlets = ListTemplateGenerator(('gwf6', 'lak', 'outlets', - 'outlets')) - perioddata = ListTemplateGenerator(('gwf6', 'lak', 'period', - 'perioddata')) - package_abbr = "gwflak" - _package_type = "lak" - dfn_file_name = "gwf-lak.dfn" - - dfn = [["block options", "name auxiliary", "type string", - "shape (naux)", "reader urword", "optional true"], - ["block options", "name boundnames", "type keyword", "shape", - "reader urword", "optional true"], - ["block options", "name print_input", "type keyword", - "reader urword", "optional true"], - ["block options", "name print_stage", "type keyword", - "reader urword", "optional true"], - ["block options", "name print_flows", "type keyword", - "reader urword", "optional true"], - ["block options", "name save_flows", "type keyword", - "reader urword", "optional true"], - ["block options", "name stage_filerecord", - "type record stage fileout stagefile", "shape", "reader urword", - "tagged true", "optional true"], - ["block options", "name stage", "type keyword", "shape", - "in_record true", "reader urword", "tagged true", - "optional false"], - ["block options", "name stagefile", "type string", - "preserve_case true", "shape", "in_record true", "reader urword", - "tagged false", "optional false"], - ["block options", "name budget_filerecord", - "type record budget fileout budgetfile", "shape", "reader urword", - "tagged true", "optional true"], - ["block options", "name budget", "type keyword", "shape", - "in_record true", "reader urword", "tagged true", - "optional false"], - ["block options", "name fileout", "type keyword", "shape", - "in_record true", "reader urword", "tagged true", - "optional false"], - ["block options", "name budgetfile", "type string", - "preserve_case true", "shape", "in_record true", "reader urword", - "tagged false", "optional false"], - ["block options", "name ts_filerecord", - "type record ts6 filein ts6_filename", "shape", "reader urword", - "tagged true", "optional true", "construct_package ts", - "construct_data timeseries", "parameter_name timeseries"], - ["block options", "name ts6", "type keyword", "shape", - "in_record true", "reader urword", "tagged true", - "optional false"], - ["block options", "name filein", "type keyword", "shape", - "in_record true", "reader urword", "tagged true", - "optional false"], - ["block options", "name ts6_filename", "type string", - "preserve_case true", "in_record true", "reader urword", - "optional false", "tagged false"], - ["block options", "name obs_filerecord", - "type record obs6 filein obs6_filename", "shape", "reader urword", - "tagged true", "optional true", "construct_package obs", - "construct_data continuous", "parameter_name observations"], - ["block options", "name obs6", "type keyword", "shape", - "in_record true", "reader urword", "tagged true", - "optional false"], - ["block options", "name obs6_filename", "type string", - "preserve_case true", "in_record true", "tagged false", - "reader urword", "optional false"], - ["block options", "name mover", "type keyword", "tagged true", - "reader urword", "optional true"], - ["block options", "name surfdep", "type double precision", - "reader urword", "optional true"], - ["block options", "name time_conversion", - "type double precision", "reader urword", "optional true"], - ["block options", "name length_conversion", - "type double precision", "reader urword", "optional true"], - ["block dimensions", "name nlakes", "type integer", - "reader urword", "optional false"], - ["block dimensions", "name noutlets", "type integer", - "reader urword", "optional false"], - ["block dimensions", "name ntables", "type integer", - "reader urword", "optional false"], - ["block packagedata", "name packagedata", - "type recarray lakeno strt nlakeconn aux boundname", - "shape (maxbound)", "reader urword"], - ["block packagedata", "name lakeno", "type integer", "shape", - "tagged false", "in_record true", "reader urword", - "numeric_index true"], - ["block packagedata", "name strt", "type double precision", - "shape", "tagged false", "in_record true", "reader urword"], - ["block packagedata", "name nlakeconn", "type integer", "shape", - "tagged false", "in_record true", "reader urword"], - ["block packagedata", "name aux", "type double precision", - "in_record true", "tagged false", "shape (naux)", "reader urword", - "time_series true", "optional true"], - ["block packagedata", "name boundname", "type string", "shape", - "tagged false", "in_record true", "reader urword", - "optional true"], - ["block connectiondata", "name connectiondata", - "type recarray lakeno iconn cellid claktype bedleak belev telev " - "connlen connwidth", - "shape (sum(nlakeconn))", "reader urword"], - ["block connectiondata", "name lakeno", "type integer", "shape", - "tagged false", "in_record true", "reader urword", - "numeric_index true"], - ["block connectiondata", "name iconn", "type integer", "shape", - "tagged false", "in_record true", "reader urword", - "numeric_index true"], - ["block connectiondata", "name cellid", "type integer", - "shape (ncelldim)", "tagged false", "in_record true", - "reader urword"], - ["block connectiondata", "name claktype", "type string", "shape", - "tagged false", "in_record true", "reader urword"], - ["block connectiondata", "name bedleak", "type double precision", - "shape", "tagged false", "in_record true", "reader urword"], - ["block connectiondata", "name belev", "type double precision", - "shape", "tagged false", "in_record true", "reader urword"], - ["block connectiondata", "name telev", "type double precision", - "shape", "tagged false", "in_record true", "reader urword"], - ["block connectiondata", "name connlen", "type double precision", - "shape", "tagged false", "in_record true", "reader urword"], - ["block connectiondata", "name connwidth", - "type double precision", "shape", "tagged false", - "in_record true", "reader urword"], - ["block tables", "name tables", - "type recarray lakeno tab6 filein tab6_filename", - "shape (ntables)", "reader urword"], - ["block tables", "name lakeno", "type integer", "shape", - "tagged false", "in_record true", "reader urword", - "numeric_index true"], - ["block tables", "name tab6", "type keyword", "shape", - "in_record true", "reader urword", "tagged true", - "optional false"], - ["block tables", "name filein", "type keyword", "shape", - "in_record true", "reader urword", "tagged true", - "optional false"], - ["block tables", "name tab6_filename", "type string", - "preserve_case true", "in_record true", "reader urword", - "optional false", "tagged false"], - ["block outlets", "name outlets", - "type recarray outletno lakein lakeout couttype invert width " - "rough slope", - "shape (noutlets)", "reader urword"], - ["block outlets", "name outletno", "type integer", "shape", - "tagged false", "in_record true", "reader urword", - "numeric_index true"], - ["block outlets", "name lakein", "type integer", "shape", - "tagged false", "in_record true", "reader urword", - "numeric_index true"], - ["block outlets", "name lakeout", "type integer", "shape", - "tagged false", "in_record true", "reader urword", - "numeric_index true"], - ["block outlets", "name couttype", "type string", "shape", - "tagged false", "in_record true", "reader urword"], - ["block outlets", "name invert", "type double precision", - "shape", "tagged false", "in_record true", "reader urword", - "time_series true"], - ["block outlets", "name width", "type double precision", "shape", - "tagged false", "in_record true", "reader urword", - "time_series true"], - ["block outlets", "name rough", "type double precision", "shape", - "tagged false", "in_record true", "reader urword", - "time_series true"], - ["block outlets", "name slope", "type double precision", "shape", - "tagged false", "in_record true", "reader urword", - "time_series true"], - ["block period", "name iper", "type integer", - "block_variable True", "in_record true", "tagged false", "shape", - "valid", "reader urword", "optional false"], - ["block period", "name perioddata", - "type recarray number laksetting", "shape", "reader urword"], - ["block period", "name number", "type integer", "shape", - "tagged false", "in_record true", "reader urword", - "numeric_index true"], - ["block period", "name laksetting", - "type keystring status stage rainfall evaporation runoff inflow " - "withdrawal rate invert width slope rough auxiliaryrecord", - "shape", "tagged false", "in_record true", "reader urword"], - ["block period", "name status", "type string", "shape", - "tagged true", "in_record true", "reader urword"], - ["block period", "name stage", "type string", "shape", - "tagged true", "in_record true", "time_series true", - "reader urword"], - ["block period", "name rainfall", "type string", "shape", - "tagged true", "in_record true", "reader urword", - "time_series true"], - ["block period", "name evaporation", "type string", "shape", - "tagged true", "in_record true", "reader urword", - "time_series true"], - ["block period", "name runoff", "type string", "shape", - "tagged true", "in_record true", "reader urword", - "time_series true"], - ["block period", "name inflow", "type string", "shape", - "tagged true", "in_record true", "reader urword", - "time_series true"], - ["block period", "name withdrawal", "type string", "shape", - "tagged true", "in_record true", "reader urword", - "time_series true"], - ["block period", "name rate", "type string", "shape", - "tagged true", "in_record true", "reader urword", - "time_series true"], - ["block period", "name invert", "type string", "shape", - "tagged true", "in_record true", "reader urword", - "time_series true"], - ["block period", "name rough", "type string", "shape", - "tagged true", "in_record true", "reader urword", - "time_series true"], - ["block period", "name width", "type string", "shape", - "tagged true", "in_record true", "reader urword", - "time_series true"], - ["block period", "name slope", "type string", "shape", - "tagged true", "in_record true", "reader urword", - "time_series true"], - ["block period", "name auxiliaryrecord", - "type record auxiliary auxname auxval", "shape", "tagged", - "in_record true", "reader urword"], - ["block period", "name auxiliary", "type keyword", "shape", - "in_record true", "reader urword"], - ["block period", "name auxname", "type string", "shape", - "tagged false", "in_record true", "reader urword"], - ["block period", "name auxval", "type double precision", "shape", - "tagged false", "in_record true", "reader urword", - "time_series true"]] - - def __init__(self, model, loading_package=False, auxiliary=None, - boundnames=None, print_input=None, print_stage=None, - print_flows=None, save_flows=None, stage_filerecord=None, - budget_filerecord=None, timeseries=None, observations=None, - mover=None, surfdep=None, time_conversion=None, - length_conversion=None, nlakes=None, noutlets=None, - ntables=None, packagedata=None, connectiondata=None, - tables=None, outlets=None, perioddata=None, filename=None, - pname=None, parent_file=None): - super(ModflowGwflak, self).__init__(model, "lak", filename, pname, - loading_package, parent_file) - - # set up variables - self.auxiliary = self.build_mfdata("auxiliary", auxiliary) - self.boundnames = self.build_mfdata("boundnames", boundnames) - self.print_input = self.build_mfdata("print_input", print_input) - self.print_stage = self.build_mfdata("print_stage", print_stage) - self.print_flows = self.build_mfdata("print_flows", print_flows) - self.save_flows = self.build_mfdata("save_flows", save_flows) - self.stage_filerecord = self.build_mfdata("stage_filerecord", - stage_filerecord) - self.budget_filerecord = self.build_mfdata("budget_filerecord", - budget_filerecord) - self._ts_filerecord = self.build_mfdata("ts_filerecord", - None) - self._ts_package = self.build_child_package("ts", timeseries, - "timeseries", - self._ts_filerecord) - self._obs_filerecord = self.build_mfdata("obs_filerecord", - None) - self._obs_package = self.build_child_package("obs", observations, - "continuous", - self._obs_filerecord) - self.mover = self.build_mfdata("mover", mover) - self.surfdep = self.build_mfdata("surfdep", surfdep) - self.time_conversion = self.build_mfdata("time_conversion", - time_conversion) - self.length_conversion = self.build_mfdata("length_conversion", - length_conversion) - self.nlakes = self.build_mfdata("nlakes", nlakes) - self.noutlets = self.build_mfdata("noutlets", noutlets) - self.ntables = self.build_mfdata("ntables", ntables) - self.packagedata = self.build_mfdata("packagedata", packagedata) - self.connectiondata = self.build_mfdata("connectiondata", - connectiondata) - self.tables = self.build_mfdata("tables", tables) - self.outlets = self.build_mfdata("outlets", outlets) - self.perioddata = self.build_mfdata("perioddata", perioddata) - self._init_complete = True +# DO NOT MODIFY THIS FILE DIRECTLY. THIS FILE MUST BE CREATED BY +# mf6/utils/createpackages.py +from .. import mfpackage +from ..data.mfdatautil import ListTemplateGenerator + + +class ModflowGwflak(mfpackage.MFPackage): + """ + ModflowGwflak defines a lak package within a gwf6 model. + + Parameters + ---------- + model : MFModel + Model that this package is a part of. Package is automatically + added to model when it is initialized. + loading_package : bool + Do not set this parameter. It is intended for debugging and internal + processing purposes only. + auxiliary : [string] + * auxiliary (string) defines an array of one or more auxiliary variable + names. There is no limit on the number of auxiliary variables that + can be provided on this line; however, lists of information provided + in subsequent blocks must have a column of data for each auxiliary + variable name defined here. The number of auxiliary variables + detected on this line determines the value for naux. Comments cannot + be provided anywhere on this line as they will be interpreted as + auxiliary variable names. Auxiliary variables may not be used by the + package, but they will be available for use by other parts of the + program. The program will terminate with an error if auxiliary + variables are specified on more than one line in the options block. + boundnames : boolean + * boundnames (boolean) keyword to indicate that boundary names may be + provided with the list of lake cells. + print_input : boolean + * print_input (boolean) keyword to indicate that the list of lake + information will be written to the listing file immediately after it + is read. + print_stage : boolean + * print_stage (boolean) keyword to indicate that the list of lake + stages will be printed to the listing file for every stress period in + which "HEAD PRINT" is specified in Output Control. If there is no + Output Control option and PRINT_STAGE is specified, then stages are + printed for the last time step of each stress period. + print_flows : boolean + * print_flows (boolean) keyword to indicate that the list of lake flow + rates will be printed to the listing file for every stress period + time step in which "BUDGET PRINT" is specified in Output Control. If + there is no Output Control option and "PRINT_FLOWS" is specified, + then flow rates are printed for the last time step of each stress + period. + save_flows : boolean + * save_flows (boolean) keyword to indicate that lake flow terms will be + written to the file specified with "BUDGET FILEOUT" in Output + Control. + stage_filerecord : [stagefile] + * stagefile (string) name of the binary output file to write stage + information. + budget_filerecord : [budgetfile] + * budgetfile (string) name of the binary output file to write budget + information. + timeseries : {varname:data} or timeseries data + * Contains data for the ts package. Data can be stored in a dictionary + containing data for the ts package with variable names as keys and + package data as values. Data just for the timeseries variable is also + acceptable. See ts package documentation for more information. + observations : {varname:data} or continuous data + * Contains data for the obs package. Data can be stored in a dictionary + containing data for the obs package with variable names as keys and + package data as values. Data just for the observations variable is + also acceptable. See obs package documentation for more information. + mover : boolean + * mover (boolean) keyword to indicate that this instance of the LAK + Package can be used with the Water Mover (MVR) Package. When the + MOVER option is specified, additional memory is allocated within the + package to store the available, provided, and received water. + surfdep : double + * surfdep (double) real value that defines the surface depression depth + for VERTICAL lake-GWF connections. If specified, SURFDEP must be + greater than or equal to zero. If SURFDEP is not specified, a default + value of zero is used for all vertical lake-GWF connections. + time_conversion : double + * time_conversion (double) value that is used in converting outlet flow + terms that use Manning's equation or gravitational acceleration to + consistent time units. TIME_CONVERSION should be set to 1.0, 60.0, + 3,600.0, 86,400.0, and 31,557,600.0 when using time units + (TIME_UNITS) of seconds, minutes, hours, days, or years in the + simulation, respectively. CONVTIME does not need to be specified if + no lake outlets are specified or TIME_UNITS are seconds. + length_conversion : double + * length_conversion (double) real value that is used in converting + outlet flow terms that use Manning's equation or gravitational + acceleration to consistent length units. LENGTH_CONVERSION should be + set to 3.28081, 1.0, and 100.0 when using length units (LENGTH_UNITS) + of feet, meters, or centimeters in the simulation, respectively. + LENGTH_CONVERSION does not need to be specified if no lake outlets + are specified or LENGTH_UNITS are meters. + nlakes : integer + * nlakes (integer) value specifying the number of lakes that will be + simulated for all stress periods. + noutlets : integer + * noutlets (integer) value specifying the number of outlets that will + be simulated for all stress periods. If NOUTLETS is not specified, a + default value of zero is used. + ntables : integer + * ntables (integer) value specifying the number of lakes tables that + will be used to define the lake stage, volume relation, and surface + area. If NTABLES is not specified, a default value of zero is used. + packagedata : [lakeno, strt, nlakeconn, aux, boundname] + * lakeno (integer) integer value that defines the lake number + associated with the specified PACKAGEDATA data on the line. LAKENO + must be greater than zero and less than or equal to NLAKES. Lake + information must be specified for every lake or the program will + terminate with an error. The program will also terminate with an + error if information for a lake is specified more than once. This + argument is an index variable, which means that it should be treated + as zero-based when working with FloPy and Python. Flopy will + automatically subtract one when loading index variables and add one + when writing index variables. + * strt (double) real value that defines the starting stage for the + lake. + * nlakeconn (integer) integer value that defines the number of GWF + cells connected to this (LAKENO) lake. There can only be one vertical + lake connection to each GWF cell. NLAKECONN must be greater than + zero. + * aux (double) represents the values of the auxiliary variables for + each lake. The values of auxiliary variables must be present for each + lake. The values must be specified in the order of the auxiliary + variables specified in the OPTIONS block. If the package supports + time series and the Options block includes a TIMESERIESFILE entry + (see the "Time-Variable Input" section), values can be obtained from + a time series by entering the time-series name in place of a numeric + value. + * boundname (string) name of the lake cell. BOUNDNAME is an ASCII + character variable that can contain as many as 40 characters. If + BOUNDNAME contains spaces in it, then the entire name must be + enclosed within single quotes. + connectiondata : [lakeno, iconn, cellid, claktype, bedleak, belev, telev, + connlen, connwidth] + * lakeno (integer) integer value that defines the lake number + associated with the specified CONNECTIONDATA data on the line. LAKENO + must be greater than zero and less than or equal to NLAKES. Lake + connection information must be specified for every lake connection to + the GWF model (NLAKECONN) or the program will terminate with an + error. The program will also terminate with an error if connection + information for a lake connection to the GWF model is specified more + than once. This argument is an index variable, which means that it + should be treated as zero-based when working with FloPy and Python. + Flopy will automatically subtract one when loading index variables + and add one when writing index variables. + * iconn (integer) integer value that defines the GWF connection number + for this lake connection entry. ICONN must be greater than zero and + less than or equal to NLAKECONN for lake LAKENO. This argument is an + index variable, which means that it should be treated as zero-based + when working with FloPy and Python. Flopy will automatically subtract + one when loading index variables and add one when writing index + variables. + * cellid ((integer, ...)) is the cell identifier, and depends on the + type of grid that is used for the simulation. For a structured grid + that uses the DIS input file, CELLID is the layer, row, and column. + For a grid that uses the DISV input file, CELLID is the layer and + CELL2D number. If the model uses the unstructured discretization + (DISU) input file, CELLID is the node number for the cell. This + argument is an index variable, which means that it should be treated + as zero-based when working with FloPy and Python. Flopy will + automatically subtract one when loading index variables and add one + when writing index variables. + * claktype (string) character string that defines the lake-GWF + connection type for the lake connection. Possible lake-GWF connection + type strings include: VERTICAL--character keyword to indicate the + lake-GWF connection is vertical and connection conductance + calculations use the hydraulic conductivity corresponding to the + :math:`K_{33}` tensor component defined for CELLID in the NPF + package. HORIZONTAL--character keyword to indicate the lake-GWF + connection is horizontal and connection conductance calculations use + the hydraulic conductivity corresponding to the :math:`K_{11}` tensor + component defined for CELLID in the NPF package. EMBEDDEDH--character + keyword to indicate the lake-GWF connection is embedded in a single + cell and connection conductance calculations use the hydraulic + conductivity corresponding to the :math:`K_{11}` tensor component + defined for CELLID in the NPF package. EMBEDDEDV--character keyword + to indicate the lake-GWF connection is embedded in a single cell and + connection conductance calculations use the hydraulic conductivity + corresponding to the :math:`K_{33}` tensor component defined for + CELLID in the NPF package. Embedded lakes can only be connected to a + single cell (NLAKECONN = 1) and there must be a lake table associated + with each embedded lake. + * bedleak (double) character string or real value that defines the bed + leakance for the lake-GWF connection. BEDLEAK must be greater than or + equal to zero or specified to be NONE. If BEDLEAK is specified to be + NONE, the lake-GWF connection conductance is solely a function of + aquifer properties in the connected GWF cell and lakebed sediments + are assumed to be absent. + * belev (double) real value that defines the bottom elevation for a + HORIZONTAL lake-GWF connection. Any value can be specified if + CLAKTYPE is VERTICAL, EMBEDDEDH, or EMBEDDEDV. If CLAKTYPE is + HORIZONTAL and BELEV is not equal to TELEV, BELEV must be greater + than or equal to the bottom of the GWF cell CELLID. If BELEV is equal + to TELEV, BELEV is reset to the bottom of the GWF cell CELLID. + * telev (double) real value that defines the top elevation for a + HORIZONTAL lake-GWF connection. Any value can be specified if + CLAKTYPE is VERTICAL, EMBEDDEDH, or EMBEDDEDV. If CLAKTYPE is + HORIZONTAL and TELEV is not equal to BELEV, TELEV must be less than + or equal to the top of the GWF cell CELLID. If TELEV is equal to + BELEV, TELEV is reset to the top of the GWF cell CELLID. + * connlen (double) real value that defines the distance between the + connected GWF CELLID node and the lake for a HORIZONTAL, EMBEDDEDH, + or EMBEDDEDV lake-GWF connection. CONLENN must be greater than zero + for a HORIZONTAL, EMBEDDEDH, or EMBEDDEDV lake-GWF connection. Any + value can be specified if CLAKTYPE is VERTICAL. + * connwidth (double) real value that defines the connection face width + for a HORIZONTAL lake-GWF connection. CONNWIDTH must be greater than + zero for a HORIZONTAL lake-GWF connection. Any value can be specified + if CLAKTYPE is VERTICAL, EMBEDDEDH, or EMBEDDEDV. + tables : [lakeno, tab6_filename] + * lakeno (integer) integer value that defines the lake number + associated with the specified TABLES data on the line. LAKENO must be + greater than zero and less than or equal to NLAKES. The program will + terminate with an error if table information for a lake is specified + more than once or the number of specified tables is less than + NTABLES. This argument is an index variable, which means that it + should be treated as zero-based when working with FloPy and Python. + Flopy will automatically subtract one when loading index variables + and add one when writing index variables. + * tab6_filename (string) character string that defines the path and + filename for the file containing lake table data for the lake + connection. The CTABNAME file includes the number of entries in the + file and the relation between stage, surface area, and volume for + each entry in the file. Lake table files for EMBEDDEDH and EMBEDDEDV + lake-GWF connections also include lake-GWF exchange area data for + each entry in the file. Input instructions for the CTABNAME file is + included at the LAK package lake table file input instructions + section. + outlets : [outletno, lakein, lakeout, couttype, invert, width, rough, + slope] + * outletno (integer) integer value that defines the outlet number + associated with the specified OUTLETS data on the line. OUTLETNO must + be greater than zero and less than or equal to NOUTLETS. Outlet + information must be specified for every outlet or the program will + terminate with an error. The program will also terminate with an + error if information for a outlet is specified more than once. This + argument is an index variable, which means that it should be treated + as zero-based when working with FloPy and Python. Flopy will + automatically subtract one when loading index variables and add one + when writing index variables. + * lakein (integer) integer value that defines the lake number that + outlet is connected to. LAKEIN must be greater than zero and less + than or equal to NLAKES. This argument is an index variable, which + means that it should be treated as zero-based when working with FloPy + and Python. Flopy will automatically subtract one when loading index + variables and add one when writing index variables. + * lakeout (integer) integer value that defines the lake number that + outlet discharge from lake outlet OUTLETNO is routed to. LAKEOUT must + be greater than or equal to zero and less than or equal to NLAKES. If + LAKEOUT is zero, outlet discharge from lake outlet OUTLETNO is + discharged to an external boundary. This argument is an index + variable, which means that it should be treated as zero-based when + working with FloPy and Python. Flopy will automatically subtract one + when loading index variables and add one when writing index + variables. + * couttype (string) character string that defines the outlet type for + the outlet OUTLETNO. Possible COUTTYPE strings include: SPECIFIED-- + character keyword to indicate the outlet is defined as a specified + flow. MANNING--character keyword to indicate the outlet is defined + using Manning's equation. WEIR--character keyword to indicate the + outlet is defined using a sharp weir equation. + * invert (double) real value that defines the invert elevation for the + lake outlet. Any value can be specified if COUTTYPE is SPECIFIED. If + the Options block includes a TIMESERIESFILE entry (see the "Time- + Variable Input" section), values can be obtained from a time series + by entering the time-series name in place of a numeric value. + * width (double) real value that defines the width of the lake outlet. + Any value can be specified if COUTTYPE is SPECIFIED. If the Options + block includes a TIMESERIESFILE entry (see the "Time-Variable Input" + section), values can be obtained from a time series by entering the + time-series name in place of a numeric value. + * rough (double) real value that defines the roughness coefficient for + the lake outlet. Any value can be specified if COUTTYPE is not + MANNING. If the Options block includes a TIMESERIESFILE entry (see + the "Time-Variable Input" section), values can be obtained from a + time series by entering the time-series name in place of a numeric + value. + * slope (double) real value that defines the bed slope for the lake + outlet. Any value can be specified if COUTTYPE is not MANNING. If the + Options block includes a TIMESERIESFILE entry (see the "Time-Variable + Input" section), values can be obtained from a time series by + entering the time-series name in place of a numeric value. + perioddata : [number, laksetting] + * number (integer) integer value that defines the lake or outlet number + associated with the specified PERIOD data on the line. NUMBER must be + greater than zero and less than or equal to NLAKES for a lake number + and less than or equal to NOUTLETS for an outlet number. This + argument is an index variable, which means that it should be treated + as zero-based when working with FloPy and Python. Flopy will + automatically subtract one when loading index variables and add one + when writing index variables. + * laksetting (keystring) line of information that is parsed into a + keyword and values. Keyword values that can be used to start the + LAKSETTING string include both keywords for lake settings and + keywords for outlet settings. Keywords for lake settings include: + STATUS, STAGE, RAINFALL, EVAPORATION, RUNOFF, INFLOW, WITHDRAWAL, and + AUXILIARY. Keywords for outlet settings include RATE, INVERT, WIDTH, + SLOPE, and ROUGH. + status : [string] + * status (string) keyword option to define lake status. STATUS + can be ACTIVE, INACTIVE, or CONSTANT. By default, STATUS is + ACTIVE. + stage : [string] + * stage (string) real or character value that defines the stage + for the lake. The specified STAGE is only applied if the lake + is a constant stage lake. If the Options block includes a + TIMESERIESFILE entry (see the "Time-Variable Input" section), + values can be obtained from a time series by entering the + time-series name in place of a numeric value. + rainfall : [string] + * rainfall (string) real or character value that defines the + rainfall rate :math:`(LT^{-1})` for the lake. Value must be + greater than or equal to zero. If the Options block includes + a TIMESERIESFILE entry (see the "Time-Variable Input" + section), values can be obtained from a time series by + entering the time-series name in place of a numeric value. + evaporation : [string] + * evaporation (string) real or character value that defines the + maximum evaporation rate :math:`(LT^{-1})` for the lake. + Value must be greater than or equal to zero. If the Options + block includes a TIMESERIESFILE entry (see the "Time-Variable + Input" section), values can be obtained from a time series by + entering the time-series name in place of a numeric value. + runoff : [string] + * runoff (string) real or character value that defines the + runoff rate :math:`(L^3 T^{-1})` for the lake. Value must be + greater than or equal to zero. If the Options block includes + a TIMESERIESFILE entry (see the "Time-Variable Input" + section), values can be obtained from a time series by + entering the time-series name in place of a numeric value. + inflow : [string] + * inflow (string) real or character value that defines the + volumetric inflow rate :math:`(L^3 T^{-1})` for the lake. + Value must be greater than or equal to zero. If the Options + block includes a TIMESERIESFILE entry (see the "Time-Variable + Input" section), values can be obtained from a time series by + entering the time-series name in place of a numeric value. By + default, inflow rates are zero for each lake. + withdrawal : [string] + * withdrawal (string) real or character value that defines the + maximum withdrawal rate :math:`(L^3 T^{-1})` for the lake. + Value must be greater than or equal to zero. If the Options + block includes a TIMESERIESFILE entry (see the "Time-Variable + Input" section), values can be obtained from a time series by + entering the time-series name in place of a numeric value. + rate : [string] + * rate (string) real or character value that defines the + extraction rate for the lake outflow. A positive value + indicates inflow and a negative value indicates outflow from + the lake. RATE only applies to active (IBOUND :math:`>` 0) + lakes. A specified RATE is only applied if COUTTYPE for the + OUTLETNO is SPECIFIED. If the Options block includes a + TIMESERIESFILE entry (see the "Time-Variable Input" section), + values can be obtained from a time series by entering the + time-series name in place of a numeric value. By default, the + RATE for each SPECIFIED lake outlet is zero. + invert : [string] + * invert (string) real or character value that defines the + invert elevation for the lake outlet. A specified INVERT + value is only used for active lakes if COUTTYPE for lake + outlet OUTLETNO is not SPECIFIED. If the Options block + includes a TIMESERIESFILE entry (see the "Time-Variable + Input" section), values can be obtained from a time series by + entering the time-series name in place of a numeric value. + width : [string] + * width (string) real or character value that defines the width + of the lake outlet. A specified WIDTH value is only used for + active lakes if COUTTYPE for lake outlet OUTLETNO is not + SPECIFIED. If the Options block includes a TIMESERIESFILE + entry (see the "Time-Variable Input" section), values can be + obtained from a time series by entering the time-series name + in place of a numeric value. + slope : [string] + * slope (string) real or character value that defines the bed + slope for the lake outlet. A specified SLOPE value is only + used for active lakes if COUTTYPE for lake outlet OUTLETNO is + MANNING. If the Options block includes a TIMESERIESFILE entry + (see the "Time-Variable Input" section), values can be + obtained from a time series by entering the time-series name + in place of a numeric value. + rough : [string] + * rough (string) real value that defines the roughness + coefficient for the lake outlet. Any value can be specified + if COUTTYPE is not MANNING. If the Options block includes a + TIMESERIESFILE entry (see the "Time-Variable Input" section), + values can be obtained from a time series by entering the + time-series name in place of a numeric value. + auxiliaryrecord : [auxname, auxval] + * auxname (string) name for the auxiliary variable to be + assigned AUXVAL. AUXNAME must match one of the auxiliary + variable names defined in the OPTIONS block. If AUXNAME does + not match one of the auxiliary variable names defined in the + OPTIONS block the data are ignored. + * auxval (double) value for the auxiliary variable. If the + Options block includes a TIMESERIESFILE entry (see the "Time- + Variable Input" section), values can be obtained from a time + series by entering the time-series name in place of a numeric + value. + filename : String + File name for this package. + pname : String + Package name for this package. + parent_file : MFPackage + Parent package file that references this package. Only needed for + utility packages (mfutl*). For example, mfutllaktab package must have + a mfgwflak package parent_file. + + """ + auxiliary = ListTemplateGenerator(('gwf6', 'lak', 'options', + 'auxiliary')) + stage_filerecord = ListTemplateGenerator(('gwf6', 'lak', 'options', + 'stage_filerecord')) + budget_filerecord = ListTemplateGenerator(('gwf6', 'lak', 'options', + 'budget_filerecord')) + ts_filerecord = ListTemplateGenerator(('gwf6', 'lak', 'options', + 'ts_filerecord')) + obs_filerecord = ListTemplateGenerator(('gwf6', 'lak', 'options', + 'obs_filerecord')) + packagedata = ListTemplateGenerator(('gwf6', 'lak', 'packagedata', + 'packagedata')) + connectiondata = ListTemplateGenerator(('gwf6', 'lak', + 'connectiondata', + 'connectiondata')) + tables = ListTemplateGenerator(('gwf6', 'lak', 'tables', 'tables')) + outlets = ListTemplateGenerator(('gwf6', 'lak', 'outlets', + 'outlets')) + perioddata = ListTemplateGenerator(('gwf6', 'lak', 'period', + 'perioddata')) + package_abbr = "gwflak" + _package_type = "lak" + dfn_file_name = "gwf-lak.dfn" + + dfn = [["block options", "name auxiliary", "type string", + "shape (naux)", "reader urword", "optional true"], + ["block options", "name boundnames", "type keyword", "shape", + "reader urword", "optional true"], + ["block options", "name print_input", "type keyword", + "reader urword", "optional true"], + ["block options", "name print_stage", "type keyword", + "reader urword", "optional true"], + ["block options", "name print_flows", "type keyword", + "reader urword", "optional true"], + ["block options", "name save_flows", "type keyword", + "reader urword", "optional true"], + ["block options", "name stage_filerecord", + "type record stage fileout stagefile", "shape", "reader urword", + "tagged true", "optional true"], + ["block options", "name stage", "type keyword", "shape", + "in_record true", "reader urword", "tagged true", + "optional false"], + ["block options", "name stagefile", "type string", + "preserve_case true", "shape", "in_record true", "reader urword", + "tagged false", "optional false"], + ["block options", "name budget_filerecord", + "type record budget fileout budgetfile", "shape", "reader urword", + "tagged true", "optional true"], + ["block options", "name budget", "type keyword", "shape", + "in_record true", "reader urword", "tagged true", + "optional false"], + ["block options", "name fileout", "type keyword", "shape", + "in_record true", "reader urword", "tagged true", + "optional false"], + ["block options", "name budgetfile", "type string", + "preserve_case true", "shape", "in_record true", "reader urword", + "tagged false", "optional false"], + ["block options", "name ts_filerecord", + "type record ts6 filein ts6_filename", "shape", "reader urword", + "tagged true", "optional true", "construct_package ts", + "construct_data timeseries", "parameter_name timeseries"], + ["block options", "name ts6", "type keyword", "shape", + "in_record true", "reader urword", "tagged true", + "optional false"], + ["block options", "name filein", "type keyword", "shape", + "in_record true", "reader urword", "tagged true", + "optional false"], + ["block options", "name ts6_filename", "type string", + "preserve_case true", "in_record true", "reader urword", + "optional false", "tagged false"], + ["block options", "name obs_filerecord", + "type record obs6 filein obs6_filename", "shape", "reader urword", + "tagged true", "optional true", "construct_package obs", + "construct_data continuous", "parameter_name observations"], + ["block options", "name obs6", "type keyword", "shape", + "in_record true", "reader urword", "tagged true", + "optional false"], + ["block options", "name obs6_filename", "type string", + "preserve_case true", "in_record true", "tagged false", + "reader urword", "optional false"], + ["block options", "name mover", "type keyword", "tagged true", + "reader urword", "optional true"], + ["block options", "name surfdep", "type double precision", + "reader urword", "optional true"], + ["block options", "name time_conversion", + "type double precision", "reader urword", "optional true"], + ["block options", "name length_conversion", + "type double precision", "reader urword", "optional true"], + ["block dimensions", "name nlakes", "type integer", + "reader urword", "optional false"], + ["block dimensions", "name noutlets", "type integer", + "reader urword", "optional false"], + ["block dimensions", "name ntables", "type integer", + "reader urword", "optional false"], + ["block packagedata", "name packagedata", + "type recarray lakeno strt nlakeconn aux boundname", + "shape (maxbound)", "reader urword"], + ["block packagedata", "name lakeno", "type integer", "shape", + "tagged false", "in_record true", "reader urword", + "numeric_index true"], + ["block packagedata", "name strt", "type double precision", + "shape", "tagged false", "in_record true", "reader urword"], + ["block packagedata", "name nlakeconn", "type integer", "shape", + "tagged false", "in_record true", "reader urword"], + ["block packagedata", "name aux", "type double precision", + "in_record true", "tagged false", "shape (naux)", "reader urword", + "time_series true", "optional true"], + ["block packagedata", "name boundname", "type string", "shape", + "tagged false", "in_record true", "reader urword", + "optional true"], + ["block connectiondata", "name connectiondata", + "type recarray lakeno iconn cellid claktype bedleak belev telev " + "connlen connwidth", + "shape (sum(nlakeconn))", "reader urword"], + ["block connectiondata", "name lakeno", "type integer", "shape", + "tagged false", "in_record true", "reader urword", + "numeric_index true"], + ["block connectiondata", "name iconn", "type integer", "shape", + "tagged false", "in_record true", "reader urword", + "numeric_index true"], + ["block connectiondata", "name cellid", "type integer", + "shape (ncelldim)", "tagged false", "in_record true", + "reader urword"], + ["block connectiondata", "name claktype", "type string", "shape", + "tagged false", "in_record true", "reader urword"], + ["block connectiondata", "name bedleak", "type double precision", + "shape", "tagged false", "in_record true", "reader urword"], + ["block connectiondata", "name belev", "type double precision", + "shape", "tagged false", "in_record true", "reader urword"], + ["block connectiondata", "name telev", "type double precision", + "shape", "tagged false", "in_record true", "reader urword"], + ["block connectiondata", "name connlen", "type double precision", + "shape", "tagged false", "in_record true", "reader urword"], + ["block connectiondata", "name connwidth", + "type double precision", "shape", "tagged false", + "in_record true", "reader urword"], + ["block tables", "name tables", + "type recarray lakeno tab6 filein tab6_filename", + "shape (ntables)", "reader urword"], + ["block tables", "name lakeno", "type integer", "shape", + "tagged false", "in_record true", "reader urword", + "numeric_index true"], + ["block tables", "name tab6", "type keyword", "shape", + "in_record true", "reader urword", "tagged true", + "optional false"], + ["block tables", "name filein", "type keyword", "shape", + "in_record true", "reader urword", "tagged true", + "optional false"], + ["block tables", "name tab6_filename", "type string", + "preserve_case true", "in_record true", "reader urword", + "optional false", "tagged false"], + ["block outlets", "name outlets", + "type recarray outletno lakein lakeout couttype invert width " + "rough slope", + "shape (noutlets)", "reader urword"], + ["block outlets", "name outletno", "type integer", "shape", + "tagged false", "in_record true", "reader urword", + "numeric_index true"], + ["block outlets", "name lakein", "type integer", "shape", + "tagged false", "in_record true", "reader urword", + "numeric_index true"], + ["block outlets", "name lakeout", "type integer", "shape", + "tagged false", "in_record true", "reader urword", + "numeric_index true"], + ["block outlets", "name couttype", "type string", "shape", + "tagged false", "in_record true", "reader urword"], + ["block outlets", "name invert", "type double precision", + "shape", "tagged false", "in_record true", "reader urword", + "time_series true"], + ["block outlets", "name width", "type double precision", "shape", + "tagged false", "in_record true", "reader urword", + "time_series true"], + ["block outlets", "name rough", "type double precision", "shape", + "tagged false", "in_record true", "reader urword", + "time_series true"], + ["block outlets", "name slope", "type double precision", "shape", + "tagged false", "in_record true", "reader urword", + "time_series true"], + ["block period", "name iper", "type integer", + "block_variable True", "in_record true", "tagged false", "shape", + "valid", "reader urword", "optional false"], + ["block period", "name perioddata", + "type recarray number laksetting", "shape", "reader urword"], + ["block period", "name number", "type integer", "shape", + "tagged false", "in_record true", "reader urword", + "numeric_index true"], + ["block period", "name laksetting", + "type keystring status stage rainfall evaporation runoff inflow " + "withdrawal rate invert width slope rough auxiliaryrecord", + "shape", "tagged false", "in_record true", "reader urword"], + ["block period", "name status", "type string", "shape", + "tagged true", "in_record true", "reader urword"], + ["block period", "name stage", "type string", "shape", + "tagged true", "in_record true", "time_series true", + "reader urword"], + ["block period", "name rainfall", "type string", "shape", + "tagged true", "in_record true", "reader urword", + "time_series true"], + ["block period", "name evaporation", "type string", "shape", + "tagged true", "in_record true", "reader urword", + "time_series true"], + ["block period", "name runoff", "type string", "shape", + "tagged true", "in_record true", "reader urword", + "time_series true"], + ["block period", "name inflow", "type string", "shape", + "tagged true", "in_record true", "reader urword", + "time_series true"], + ["block period", "name withdrawal", "type string", "shape", + "tagged true", "in_record true", "reader urword", + "time_series true"], + ["block period", "name rate", "type string", "shape", + "tagged true", "in_record true", "reader urword", + "time_series true"], + ["block period", "name invert", "type string", "shape", + "tagged true", "in_record true", "reader urword", + "time_series true"], + ["block period", "name rough", "type string", "shape", + "tagged true", "in_record true", "reader urword", + "time_series true"], + ["block period", "name width", "type string", "shape", + "tagged true", "in_record true", "reader urword", + "time_series true"], + ["block period", "name slope", "type string", "shape", + "tagged true", "in_record true", "reader urword", + "time_series true"], + ["block period", "name auxiliaryrecord", + "type record auxiliary auxname auxval", "shape", "tagged", + "in_record true", "reader urword"], + ["block period", "name auxiliary", "type keyword", "shape", + "in_record true", "reader urword"], + ["block period", "name auxname", "type string", "shape", + "tagged false", "in_record true", "reader urword"], + ["block period", "name auxval", "type double precision", "shape", + "tagged false", "in_record true", "reader urword", + "time_series true"]] + + def __init__(self, model, loading_package=False, auxiliary=None, + boundnames=None, print_input=None, print_stage=None, + print_flows=None, save_flows=None, stage_filerecord=None, + budget_filerecord=None, timeseries=None, observations=None, + mover=None, surfdep=None, time_conversion=None, + length_conversion=None, nlakes=None, noutlets=None, + ntables=None, packagedata=None, connectiondata=None, + tables=None, outlets=None, perioddata=None, filename=None, + pname=None, parent_file=None): + super(ModflowGwflak, self).__init__(model, "lak", filename, pname, + loading_package, parent_file) + + # set up variables + self.auxiliary = self.build_mfdata("auxiliary", auxiliary) + self.boundnames = self.build_mfdata("boundnames", boundnames) + self.print_input = self.build_mfdata("print_input", print_input) + self.print_stage = self.build_mfdata("print_stage", print_stage) + self.print_flows = self.build_mfdata("print_flows", print_flows) + self.save_flows = self.build_mfdata("save_flows", save_flows) + self.stage_filerecord = self.build_mfdata("stage_filerecord", + stage_filerecord) + self.budget_filerecord = self.build_mfdata("budget_filerecord", + budget_filerecord) + self._ts_filerecord = self.build_mfdata("ts_filerecord", + None) + self._ts_package = self.build_child_package("ts", timeseries, + "timeseries", + self._ts_filerecord) + self._obs_filerecord = self.build_mfdata("obs_filerecord", + None) + self._obs_package = self.build_child_package("obs", observations, + "continuous", + self._obs_filerecord) + self.mover = self.build_mfdata("mover", mover) + self.surfdep = self.build_mfdata("surfdep", surfdep) + self.time_conversion = self.build_mfdata("time_conversion", + time_conversion) + self.length_conversion = self.build_mfdata("length_conversion", + length_conversion) + self.nlakes = self.build_mfdata("nlakes", nlakes) + self.noutlets = self.build_mfdata("noutlets", noutlets) + self.ntables = self.build_mfdata("ntables", ntables) + self.packagedata = self.build_mfdata("packagedata", packagedata) + self.connectiondata = self.build_mfdata("connectiondata", + connectiondata) + self.tables = self.build_mfdata("tables", tables) + self.outlets = self.build_mfdata("outlets", outlets) + self.perioddata = self.build_mfdata("perioddata", perioddata) + self._init_complete = True diff --git a/flopy/mf6/modflow/mfgwfmaw.py b/flopy/mf6/modflow/mfgwfmaw.py index 659cca7409..b4155629d5 100644 --- a/flopy/mf6/modflow/mfgwfmaw.py +++ b/flopy/mf6/modflow/mfgwfmaw.py @@ -1,561 +1,561 @@ -# DO NOT MODIFY THIS FILE DIRECTLY. THIS FILE MUST BE CREATED BY -# mf6/utils/createpackages.py -from .. import mfpackage -from ..data.mfdatautil import ListTemplateGenerator - - -class ModflowGwfmaw(mfpackage.MFPackage): - """ - ModflowGwfmaw defines a maw package within a gwf6 model. - - Parameters - ---------- - model : MFModel - Model that this package is a part of. Package is automatically - added to model when it is initialized. - loading_package : bool - Do not set this parameter. It is intended for debugging and internal - processing purposes only. - auxiliary : [string] - * auxiliary (string) defines an array of one or more auxiliary variable - names. There is no limit on the number of auxiliary variables that - can be provided on this line; however, lists of information provided - in subsequent blocks must have a column of data for each auxiliary - variable name defined here. The number of auxiliary variables - detected on this line determines the value for naux. Comments cannot - be provided anywhere on this line as they will be interpreted as - auxiliary variable names. Auxiliary variables may not be used by the - package, but they will be available for use by other parts of the - program. The program will terminate with an error if auxiliary - variables are specified on more than one line in the options block. - boundnames : boolean - * boundnames (boolean) keyword to indicate that boundary names may be - provided with the list of multi-aquifer well cells. - print_input : boolean - * print_input (boolean) keyword to indicate that the list of multi- - aquifer well information will be written to the listing file - immediately after it is read. - print_head : boolean - * print_head (boolean) keyword to indicate that the list of multi- - aquifer well heads will be printed to the listing file for every - stress period in which "HEAD PRINT" is specified in Output Control. - If there is no Output Control option and PRINT_HEAD is specified, - then heads are printed for the last time step of each stress period. - print_flows : boolean - * print_flows (boolean) keyword to indicate that the list of multi- - aquifer well flow rates will be printed to the listing file for every - stress period time step in which "BUDGET PRINT" is specified in - Output Control. If there is no Output Control option and - "PRINT_FLOWS" is specified, then flow rates are printed for the last - time step of each stress period. - save_flows : boolean - * save_flows (boolean) keyword to indicate that multi-aquifer well flow - terms will be written to the file specified with "BUDGET FILEOUT" in - Output Control. - stage_filerecord : [headfile] - * headfile (string) name of the binary output file to write stage - information. - budget_filerecord : [budgetfile] - * budgetfile (string) name of the binary output file to write budget - information. - no_well_storage : boolean - * no_well_storage (boolean) keyword that deactivates inclusion of well - storage contributions to the multi-aquifer well package continuity - equation. - flowing_wells : boolean - * flowing_wells (boolean) keyword that activates the flowing wells - option for the multi-aquifer well package. - shutdown_theta : double - * shutdown_theta (double) value that defines the weight applied to - discharge rate for wells that limit the water level in a discharging - well (defined using the HEAD_LIMIT keyword in the stress period - data). SHUTDOWN_THETA is used to control discharge rate oscillations - when the flow rate from the aquifer is less than the specified flow - rate from the aquifer to the well. Values range between 0.0 and 1.0, - and larger values increase the weight (decrease under-relaxation) - applied to the well discharge rate. The HEAD_LIMIT option has been - included to facilitate backward compatibility with previous versions - of MODFLOW but use of the RATE_SCALING option instead of the - HEAD_LIMIT option is recommended. By default, SHUTDOWN_THETA is 0.7. - shutdown_kappa : double - * shutdown_kappa (double) value that defines the weight applied to - discharge rate for wells that limit the water level in a discharging - well (defined using the HEAD_LIMIT keyword in the stress period - data). SHUTDOWN_KAPPA is used to control discharge rate oscillations - when the flow rate from the aquifer is less than the specified flow - rate from the aquifer to the well. Values range between 0.0 and 1.0, - and larger values increase the weight applied to the well discharge - rate. The HEAD_LIMIT option has been included to facilitate backward - compatibility with previous versions of MODFLOW but use of the - RATE_SCALING option instead of the HEAD_LIMIT option is recommended. - By default, SHUTDOWN_KAPPA is 0.0001. - timeseries : {varname:data} or timeseries data - * Contains data for the ts package. Data can be stored in a dictionary - containing data for the ts package with variable names as keys and - package data as values. Data just for the timeseries variable is also - acceptable. See ts package documentation for more information. - observations : {varname:data} or continuous data - * Contains data for the obs package. Data can be stored in a dictionary - containing data for the obs package with variable names as keys and - package data as values. Data just for the observations variable is - also acceptable. See obs package documentation for more information. - mover : boolean - * mover (boolean) keyword to indicate that this instance of the MAW - Package can be used with the Water Mover (MVR) Package. When the - MOVER option is specified, additional memory is allocated within the - package to store the available, provided, and received water. - nmawwells : integer - * nmawwells (integer) integer value specifying the number of multi- - aquifer wells that will be simulated for all stress periods. - packagedata : [wellno, radius, bottom, strt, condeqn, ngwfnodes, aux, - boundname] - * wellno (integer) integer value that defines the well number - associated with the specified PACKAGEDATA data on the line. WELLNO - must be greater than zero and less than or equal to NMAWWELLS. Multi- - aquifer well information must be specified for every multi-aquifer - well or the program will terminate with an error. The program will - also terminate with an error if information for a multi-aquifer well - is specified more than once. This argument is an index variable, - which means that it should be treated as zero-based when working with - FloPy and Python. Flopy will automatically subtract one when loading - index variables and add one when writing index variables. - * radius (double) radius for the multi-aquifer well. - * bottom (double) bottom elevation of the multi-aquifer well. The well - bottom is reset to the cell bottom in the lowermost GWF cell - connection in cases where the specified well bottom is above the - bottom of this GWF cell. - * strt (double) starting head for the multi-aquifer well. - * condeqn (string) character string that defines the conductance - equation that is used to calculate the saturated conductance for the - multi-aquifer well. Possible multi-aquifer well CONDEQN strings - include: SPECIFIED--character keyword to indicate the multi-aquifer - well saturated conductance will be specified. THIEM--character - keyword to indicate the multi-aquifer well saturated conductance will - be calculated using the Thiem equation, which considers the cell top - and bottom, aquifer hydraulic conductivity, and effective cell and - well radius. SKIN--character keyword to indicate that the multi- - aquifer well saturated conductance will be calculated using the cell - top and bottom, aquifer and screen hydraulic conductivity, and well - and skin radius. CUMULATIVE--character keyword to indicate that the - multi-aquifer well saturated conductance will be calculated using a - combination of the Thiem and SKIN equations. MEAN--character keyword - to indicate the multi-aquifer well saturated conductance will be - calculated using the aquifer and screen top and bottom, aquifer and - screen hydraulic conductivity, and well and skin radius. - * ngwfnodes (integer) integer value that defines the number of GWF - nodes connected to this (WELLNO) multi-aquifer well. NGWFNODES must - be greater than zero. - * aux (double) represents the values of the auxiliary variables for - each multi-aquifer well. The values of auxiliary variables must be - present for each multi-aquifer well. The values must be specified in - the order of the auxiliary variables specified in the OPTIONS block. - If the package supports time series and the Options block includes a - TIMESERIESFILE entry (see the "Time-Variable Input" section), values - can be obtained from a time series by entering the time-series name - in place of a numeric value. - * boundname (string) name of the multi-aquifer well cell. BOUNDNAME is - an ASCII character variable that can contain as many as 40 - characters. If BOUNDNAME contains spaces in it, then the entire name - must be enclosed within single quotes. - connectiondata : [wellno, icon, cellid, scrn_top, scrn_bot, hk_skin, - radius_skin] - * wellno (integer) integer value that defines the well number - associated with the specified CONNECTIONDATA data on the line. WELLNO - must be greater than zero and less than or equal to NMAWWELLS. Multi- - aquifer well connection information must be specified for every - multi-aquifer well connection to the GWF model (NGWFNODES) or the - program will terminate with an error. The program will also terminate - with an error if connection information for a multi-aquifer well - connection to the GWF model is specified more than once. This - argument is an index variable, which means that it should be treated - as zero-based when working with FloPy and Python. Flopy will - automatically subtract one when loading index variables and add one - when writing index variables. - * icon (integer) integer value that defines the GWF connection number - for this multi-aquifer well connection entry. ICONN must be greater - than zero and less than or equal to NGWFNODES for multi-aquifer well - WELLNO. This argument is an index variable, which means that it - should be treated as zero-based when working with FloPy and Python. - Flopy will automatically subtract one when loading index variables - and add one when writing index variables. - * cellid ((integer, ...)) is the cell identifier, and depends on the - type of grid that is used for the simulation. For a structured grid - that uses the DIS input file, CELLID is the layer, row, and column. - For a grid that uses the DISV input file, CELLID is the layer and - CELL2D number. If the model uses the unstructured discretization - (DISU) input file, CELLID is the node number for the cell. One or - more screened intervals can be connected to the same CELLID if - CONDEQN for a well is MEAN. The program will terminate with an error - if MAW wells using SPECIFIED, THIEM, SKIN, or CUMULATIVE conductance - equations have more than one connection to the same CELLID. This - argument is an index variable, which means that it should be treated - as zero-based when working with FloPy and Python. Flopy will - automatically subtract one when loading index variables and add one - when writing index variables. - * scrn_top (double) value that defines the top elevation of the screen - for the multi-aquifer well connection. If the specified SCRN_TOP is - greater than the top of the GWF cell it is set equal to the top of - the cell. SCRN_TOP can be any value if CONDEQN is SPECIFIED, THIEM, - SKIN, or COMPOSITE and SCRN_TOP is set to the top of the cell. - * scrn_bot (double) value that defines the bottom elevation of the - screen for the multi-aquifer well connection. If the specified - SCRN_BOT is less than the bottom of the GWF cell it is set equal to - the bottom of the cell. SCRN_BOT can be any value if CONDEQN is - SPECIFIED, THIEM, SKIN, or COMPOSITE and SCRN_BOT is set to the - bottom of the cell. - * hk_skin (double) value that defines the skin (filter pack) hydraulic - conductivity (if CONDEQN for the multi-aquifer well is SKIN, - CUMULATIVE, or MEAN) or conductance (if CONDEQN for the multi-aquifer - well is SPECIFIED) for each GWF node connected to the multi-aquifer - well (NGWFNODES). HK_SKIN can be any value if CONDEQN is THIEM. - * radius_skin (double) real value that defines the skin radius (filter - pack radius) for the multi-aquifer well. RADIUS_SKIN can be any value - if CONDEQN is SPECIFIED or THIEM. Otherwise, RADIUS_SKIN must be - greater than RADIUS for the multi-aquifer well. - perioddata : [wellno, mawsetting] - * wellno (integer) integer value that defines the well number - associated with the specified PERIOD data on the line. WELLNO must be - greater than zero and less than or equal to NMAWWELLS. This argument - is an index variable, which means that it should be treated as zero- - based when working with FloPy and Python. Flopy will automatically - subtract one when loading index variables and add one when writing - index variables. - * mawsetting (keystring) line of information that is parsed into a - keyword and values. Keyword values that can be used to start the - MAWSETTING string include: STATUS, FLOWING_WELL, RATE, WELL_HEAD, - HEAD_LIMIT, SHUT_OFF, RATE_SCALING, and AUXILIARY. - status : [string] - * status (string) keyword option to define well status. STATUS - can be ACTIVE, INACTIVE, or CONSTANT. By default, STATUS is - ACTIVE. - flowing_wellrecord : [fwelev, fwcond, fwrlen] - * fwelev (double) elevation used to determine whether or not - the well is flowing. - * fwcond (double) conductance used to calculate the discharge - of a free flowing well. Flow occurs when the head in the well - is above the well top elevation (FWELEV). - * fwrlen (double) length used to reduce the conductance of the - flowing well. When the head in the well drops below the well - top plus the reduction length, then the conductance is - reduced. This reduction length can be used to improve the - stability of simulations with flowing wells so that there is - not an abrupt change in flowing well rates. - rate : [double] - * rate (double) is the volumetric pumping rate for the multi- - aquifer well. A positive value indicates recharge and a - negative value indicates discharge (pumping). RATE only - applies to active (IBOUND :math:`>` 0) multi-aquifer wells. - If the Options block includes a TIMESERIESFILE entry (see the - "Time-Variable Input" section), values can be obtained from a - time series by entering the time-series name in place of a - numeric value. By default, the RATE for each multi-aquifer - well is zero. - well_head : [double] - * well_head (double) is the head in the multi-aquifer well. - WELL_HEAD is only applied to constant head (STATUS is - CONSTANT) and inactive (STATUS is INACTIVE) multi-aquifer - wells. If the Options block includes a TIMESERIESFILE entry - (see the "Time-Variable Input" section), values can be - obtained from a time series by entering the time-series name - in place of a numeric value. - head_limit : [string] - * head_limit (string) is the limiting water level (head) in the - well, which is the minimum of the well RATE or the well - inflow rate from the aquifer. HEAD_LIMIT can be applied to - extraction wells (RATE :math:`<` 0) or injection wells (RATE - :math:`>` 0). HEAD\_LIMIT can be deactivated by specifying - the text string `OFF'. The HEAD\_LIMIT option is based on the - HEAD\_LIMIT functionality available in the - MNW2~\citep{konikow2009} package for MODFLOW-2005. The - HEAD\_LIMIT option has been included to facilitate backward - compatibility with previous versions of MODFLOW but use of - the RATE\_SCALING option instead of the HEAD\_LIMIT option is - recommended. By default, HEAD\_LIMIT is `OFF'. - shutoffrecord : [minrate, maxrate] - * minrate (double) is the minimum rate that a well must exceed - to shutoff a well during a stress period. The well will shut - down during a time step if the flow rate to the well from the - aquifer is less than MINRATE. If a well is shut down during a - time step, reactivation of the well cannot occur until the - next time step to reduce oscillations. MINRATE must be less - than maxrate. - * maxrate (double) is the maximum rate that a well must exceed - to reactivate a well during a stress period. The well will - reactivate during a timestep if the well was shutdown during - the previous time step and the flow rate to the well from the - aquifer exceeds maxrate. Reactivation of the well cannot - occur until the next time step if a well is shutdown to - reduce oscillations. maxrate must be greater than MINRATE. - rate_scalingrecord : [pump_elevation, scaling_length] - * pump_elevation (double) is the elevation of the multi-aquifer - well pump (PUMP_ELEVATION). PUMP_ELEVATION should not be less - than the bottom elevation (BOTTOM) of the multi-aquifer well. - * scaling_length (double) height above the pump elevation - (SCALING_LENGTH). If the simulated well head is below this - elevation (pump elevation plus the scaling length), then the - pumping rate is reduced. - auxiliaryrecord : [auxname, auxval] - * auxname (string) name for the auxiliary variable to be - assigned AUXVAL. AUXNAME must match one of the auxiliary - variable names defined in the OPTIONS block. If AUXNAME does - not match one of the auxiliary variable names defined in the - OPTIONS block the data are ignored. - * auxval (double) value for the auxiliary variable. If the - Options block includes a TIMESERIESFILE entry (see the "Time- - Variable Input" section), values can be obtained from a time - series by entering the time-series name in place of a numeric - value. - filename : String - File name for this package. - pname : String - Package name for this package. - parent_file : MFPackage - Parent package file that references this package. Only needed for - utility packages (mfutl*). For example, mfutllaktab package must have - a mfgwflak package parent_file. - - """ - auxiliary = ListTemplateGenerator(('gwf6', 'maw', 'options', - 'auxiliary')) - stage_filerecord = ListTemplateGenerator(('gwf6', 'maw', 'options', - 'stage_filerecord')) - budget_filerecord = ListTemplateGenerator(('gwf6', 'maw', 'options', - 'budget_filerecord')) - ts_filerecord = ListTemplateGenerator(('gwf6', 'maw', 'options', - 'ts_filerecord')) - obs_filerecord = ListTemplateGenerator(('gwf6', 'maw', 'options', - 'obs_filerecord')) - packagedata = ListTemplateGenerator(('gwf6', 'maw', 'packagedata', - 'packagedata')) - connectiondata = ListTemplateGenerator(('gwf6', 'maw', - 'connectiondata', - 'connectiondata')) - perioddata = ListTemplateGenerator(('gwf6', 'maw', 'period', - 'perioddata')) - package_abbr = "gwfmaw" - _package_type = "maw" - dfn_file_name = "gwf-maw.dfn" - - dfn = [["block options", "name auxiliary", "type string", - "shape (naux)", "reader urword", "optional true"], - ["block options", "name boundnames", "type keyword", "shape", - "reader urword", "optional true"], - ["block options", "name print_input", "type keyword", - "reader urword", "optional true"], - ["block options", "name print_head", "type keyword", - "reader urword", "optional true"], - ["block options", "name print_flows", "type keyword", - "reader urword", "optional true"], - ["block options", "name save_flows", "type keyword", - "reader urword", "optional true"], - ["block options", "name stage_filerecord", - "type record head fileout headfile", "shape", "reader urword", - "tagged true", "optional true"], - ["block options", "name head", "type keyword", "shape", - "in_record true", "reader urword", "tagged true", - "optional false"], - ["block options", "name headfile", "type string", - "preserve_case true", "shape", "in_record true", "reader urword", - "tagged false", "optional false"], - ["block options", "name budget_filerecord", - "type record budget fileout budgetfile", "shape", "reader urword", - "tagged true", "optional true"], - ["block options", "name budget", "type keyword", "shape", - "in_record true", "reader urword", "tagged true", - "optional false"], - ["block options", "name fileout", "type keyword", "shape", - "in_record true", "reader urword", "tagged true", - "optional false"], - ["block options", "name budgetfile", "type string", - "preserve_case true", "shape", "in_record true", "reader urword", - "tagged false", "optional false"], - ["block options", "name no_well_storage", "type keyword", - "reader urword", "optional true"], - ["block options", "name flowing_wells", "type keyword", - "reader urword", "optional true"], - ["block options", "name shutdown_theta", "type double precision", - "reader urword", "optional true"], - ["block options", "name shutdown_kappa", "type double precision", - "reader urword", "optional true"], - ["block options", "name ts_filerecord", - "type record ts6 filein ts6_filename", "shape", "reader urword", - "tagged true", "optional true", "construct_package ts", - "construct_data timeseries", "parameter_name timeseries"], - ["block options", "name ts6", "type keyword", "shape", - "in_record true", "reader urword", "tagged true", - "optional false"], - ["block options", "name filein", "type keyword", "shape", - "in_record true", "reader urword", "tagged true", - "optional false"], - ["block options", "name ts6_filename", "type string", - "preserve_case true", "in_record true", "reader urword", - "optional false", "tagged false"], - ["block options", "name obs_filerecord", - "type record obs6 filein obs6_filename", "shape", "reader urword", - "tagged true", "optional true", "construct_package obs", - "construct_data continuous", "parameter_name observations"], - ["block options", "name obs6", "type keyword", "shape", - "in_record true", "reader urword", "tagged true", - "optional false"], - ["block options", "name obs6_filename", "type string", - "preserve_case true", "in_record true", "tagged false", - "reader urword", "optional false"], - ["block options", "name mover", "type keyword", "tagged true", - "reader urword", "optional true"], - ["block dimensions", "name nmawwells", "type integer", - "reader urword", "optional false"], - ["block packagedata", "name packagedata", - "type recarray wellno radius bottom strt condeqn ngwfnodes aux " - "boundname", - "shape (nmawwells)", "reader urword"], - ["block packagedata", "name wellno", "type integer", "shape", - "tagged false", "in_record true", "reader urword", - "numeric_index true"], - ["block packagedata", "name radius", "type double precision", - "shape", "tagged false", "in_record true", "reader urword"], - ["block packagedata", "name bottom", "type double precision", - "shape", "tagged false", "in_record true", "reader urword"], - ["block packagedata", "name strt", "type double precision", - "shape", "tagged false", "in_record true", "reader urword"], - ["block packagedata", "name condeqn", "type string", "shape", - "tagged false", "in_record true", "reader urword"], - ["block packagedata", "name ngwfnodes", "type integer", "shape", - "tagged false", "in_record true", "reader urword"], - ["block packagedata", "name aux", "type double precision", - "in_record true", "tagged false", "shape (naux)", "reader urword", - "time_series true", "optional true"], - ["block packagedata", "name boundname", "type string", "shape", - "tagged false", "in_record true", "reader urword", - "optional true"], - ["block connectiondata", "name connectiondata", - "type recarray wellno icon cellid scrn_top scrn_bot hk_skin " - "radius_skin", - "reader urword"], - ["block connectiondata", "name wellno", "type integer", "shape", - "tagged false", "in_record true", "reader urword", - "numeric_index true"], - ["block connectiondata", "name icon", "type integer", "shape", - "tagged false", "in_record true", "reader urword", - "numeric_index true"], - ["block connectiondata", "name cellid", "type integer", - "shape (ncelldim)", "tagged false", "in_record true", - "reader urword"], - ["block connectiondata", "name scrn_top", - "type double precision", "shape", "tagged false", - "in_record true", "reader urword"], - ["block connectiondata", "name scrn_bot", - "type double precision", "shape", "tagged false", - "in_record true", "reader urword"], - ["block connectiondata", "name hk_skin", "type double precision", - "shape", "tagged false", "in_record true", "reader urword"], - ["block connectiondata", "name radius_skin", - "type double precision", "shape", "tagged false", - "in_record true", "reader urword"], - ["block period", "name iper", "type integer", - "block_variable True", "in_record true", "tagged false", "shape", - "valid", "reader urword", "optional false"], - ["block period", "name perioddata", - "type recarray wellno mawsetting", "shape", "reader urword"], - ["block period", "name wellno", "type integer", "shape", - "tagged false", "in_record true", "reader urword", - "numeric_index true"], - ["block period", "name mawsetting", - "type keystring status flowing_wellrecord rate well_head " - "head_limit shutoffrecord rate_scalingrecord auxiliaryrecord", - "shape", "tagged false", "in_record true", "reader urword"], - ["block period", "name status", "type string", "shape", - "tagged true", "in_record true", "reader urword"], - ["block period", "name flowing_wellrecord", - "type record flowing_well fwelev fwcond fwrlen", "shape", - "tagged", "in_record true", "reader urword"], - ["block period", "name flowing_well", "type keyword", "shape", - "in_record true", "reader urword"], - ["block period", "name fwelev", "type double precision", "shape", - "tagged false", "in_record true", "reader urword"], - ["block period", "name fwcond", "type double precision", "shape", - "tagged false", "in_record true", "reader urword"], - ["block period", "name fwrlen", "type double precision", "shape", - "tagged false", "in_record true", "reader urword"], - ["block period", "name rate", "type double precision", "shape", - "tagged true", "in_record true", "reader urword", - "time_series true"], - ["block period", "name well_head", "type double precision", - "shape", "tagged true", "in_record true", "reader urword", - "time_series true"], - ["block period", "name head_limit", "type string", "shape", - "tagged true", "in_record true", "reader urword"], - ["block period", "name shutoffrecord", - "type record shut_off minrate maxrate", "shape", "tagged", - "in_record true", "reader urword"], - ["block period", "name shut_off", "type keyword", "shape", - "in_record true", "reader urword"], - ["block period", "name minrate", "type double precision", - "shape", "tagged false", "in_record true", "reader urword"], - ["block period", "name maxrate", "type double precision", - "shape", "tagged false", "in_record true", "reader urword"], - ["block period", "name rate_scalingrecord", - "type record rate_scaling pump_elevation scaling_length", "shape", - "tagged", "in_record true", "reader urword"], - ["block period", "name rate_scaling", "type keyword", "shape", - "in_record true", "reader urword"], - ["block period", "name pump_elevation", "type double precision", - "shape", "tagged false", "in_record true", "reader urword"], - ["block period", "name scaling_length", "type double precision", - "shape", "tagged false", "in_record true", "reader urword"], - ["block period", "name auxiliaryrecord", - "type record auxiliary auxname auxval", "shape", "tagged", - "in_record true", "reader urword"], - ["block period", "name auxiliary", "type keyword", "shape", - "in_record true", "reader urword"], - ["block period", "name auxname", "type string", "shape", - "tagged false", "in_record true", "reader urword"], - ["block period", "name auxval", "type double precision", "shape", - "tagged false", "in_record true", "reader urword", - "time_series true"]] - - def __init__(self, model, loading_package=False, auxiliary=None, - boundnames=None, print_input=None, print_head=None, - print_flows=None, save_flows=None, stage_filerecord=None, - budget_filerecord=None, no_well_storage=None, - flowing_wells=None, shutdown_theta=None, shutdown_kappa=None, - timeseries=None, observations=None, mover=None, - nmawwells=None, packagedata=None, connectiondata=None, - perioddata=None, filename=None, pname=None, parent_file=None): - super(ModflowGwfmaw, self).__init__(model, "maw", filename, pname, - loading_package, parent_file) - - # set up variables - self.auxiliary = self.build_mfdata("auxiliary", auxiliary) - self.boundnames = self.build_mfdata("boundnames", boundnames) - self.print_input = self.build_mfdata("print_input", print_input) - self.print_head = self.build_mfdata("print_head", print_head) - self.print_flows = self.build_mfdata("print_flows", print_flows) - self.save_flows = self.build_mfdata("save_flows", save_flows) - self.stage_filerecord = self.build_mfdata("stage_filerecord", - stage_filerecord) - self.budget_filerecord = self.build_mfdata("budget_filerecord", - budget_filerecord) - self.no_well_storage = self.build_mfdata("no_well_storage", - no_well_storage) - self.flowing_wells = self.build_mfdata("flowing_wells", flowing_wells) - self.shutdown_theta = self.build_mfdata("shutdown_theta", - shutdown_theta) - self.shutdown_kappa = self.build_mfdata("shutdown_kappa", - shutdown_kappa) - self._ts_filerecord = self.build_mfdata("ts_filerecord", - None) - self._ts_package = self.build_child_package("ts", timeseries, - "timeseries", - self._ts_filerecord) - self._obs_filerecord = self.build_mfdata("obs_filerecord", - None) - self._obs_package = self.build_child_package("obs", observations, - "continuous", - self._obs_filerecord) - self.mover = self.build_mfdata("mover", mover) - self.nmawwells = self.build_mfdata("nmawwells", nmawwells) - self.packagedata = self.build_mfdata("packagedata", packagedata) - self.connectiondata = self.build_mfdata("connectiondata", - connectiondata) - self.perioddata = self.build_mfdata("perioddata", perioddata) - self._init_complete = True +# DO NOT MODIFY THIS FILE DIRECTLY. THIS FILE MUST BE CREATED BY +# mf6/utils/createpackages.py +from .. import mfpackage +from ..data.mfdatautil import ListTemplateGenerator + + +class ModflowGwfmaw(mfpackage.MFPackage): + """ + ModflowGwfmaw defines a maw package within a gwf6 model. + + Parameters + ---------- + model : MFModel + Model that this package is a part of. Package is automatically + added to model when it is initialized. + loading_package : bool + Do not set this parameter. It is intended for debugging and internal + processing purposes only. + auxiliary : [string] + * auxiliary (string) defines an array of one or more auxiliary variable + names. There is no limit on the number of auxiliary variables that + can be provided on this line; however, lists of information provided + in subsequent blocks must have a column of data for each auxiliary + variable name defined here. The number of auxiliary variables + detected on this line determines the value for naux. Comments cannot + be provided anywhere on this line as they will be interpreted as + auxiliary variable names. Auxiliary variables may not be used by the + package, but they will be available for use by other parts of the + program. The program will terminate with an error if auxiliary + variables are specified on more than one line in the options block. + boundnames : boolean + * boundnames (boolean) keyword to indicate that boundary names may be + provided with the list of multi-aquifer well cells. + print_input : boolean + * print_input (boolean) keyword to indicate that the list of multi- + aquifer well information will be written to the listing file + immediately after it is read. + print_head : boolean + * print_head (boolean) keyword to indicate that the list of multi- + aquifer well heads will be printed to the listing file for every + stress period in which "HEAD PRINT" is specified in Output Control. + If there is no Output Control option and PRINT_HEAD is specified, + then heads are printed for the last time step of each stress period. + print_flows : boolean + * print_flows (boolean) keyword to indicate that the list of multi- + aquifer well flow rates will be printed to the listing file for every + stress period time step in which "BUDGET PRINT" is specified in + Output Control. If there is no Output Control option and + "PRINT_FLOWS" is specified, then flow rates are printed for the last + time step of each stress period. + save_flows : boolean + * save_flows (boolean) keyword to indicate that multi-aquifer well flow + terms will be written to the file specified with "BUDGET FILEOUT" in + Output Control. + stage_filerecord : [headfile] + * headfile (string) name of the binary output file to write stage + information. + budget_filerecord : [budgetfile] + * budgetfile (string) name of the binary output file to write budget + information. + no_well_storage : boolean + * no_well_storage (boolean) keyword that deactivates inclusion of well + storage contributions to the multi-aquifer well package continuity + equation. + flowing_wells : boolean + * flowing_wells (boolean) keyword that activates the flowing wells + option for the multi-aquifer well package. + shutdown_theta : double + * shutdown_theta (double) value that defines the weight applied to + discharge rate for wells that limit the water level in a discharging + well (defined using the HEAD_LIMIT keyword in the stress period + data). SHUTDOWN_THETA is used to control discharge rate oscillations + when the flow rate from the aquifer is less than the specified flow + rate from the aquifer to the well. Values range between 0.0 and 1.0, + and larger values increase the weight (decrease under-relaxation) + applied to the well discharge rate. The HEAD_LIMIT option has been + included to facilitate backward compatibility with previous versions + of MODFLOW but use of the RATE_SCALING option instead of the + HEAD_LIMIT option is recommended. By default, SHUTDOWN_THETA is 0.7. + shutdown_kappa : double + * shutdown_kappa (double) value that defines the weight applied to + discharge rate for wells that limit the water level in a discharging + well (defined using the HEAD_LIMIT keyword in the stress period + data). SHUTDOWN_KAPPA is used to control discharge rate oscillations + when the flow rate from the aquifer is less than the specified flow + rate from the aquifer to the well. Values range between 0.0 and 1.0, + and larger values increase the weight applied to the well discharge + rate. The HEAD_LIMIT option has been included to facilitate backward + compatibility with previous versions of MODFLOW but use of the + RATE_SCALING option instead of the HEAD_LIMIT option is recommended. + By default, SHUTDOWN_KAPPA is 0.0001. + timeseries : {varname:data} or timeseries data + * Contains data for the ts package. Data can be stored in a dictionary + containing data for the ts package with variable names as keys and + package data as values. Data just for the timeseries variable is also + acceptable. See ts package documentation for more information. + observations : {varname:data} or continuous data + * Contains data for the obs package. Data can be stored in a dictionary + containing data for the obs package with variable names as keys and + package data as values. Data just for the observations variable is + also acceptable. See obs package documentation for more information. + mover : boolean + * mover (boolean) keyword to indicate that this instance of the MAW + Package can be used with the Water Mover (MVR) Package. When the + MOVER option is specified, additional memory is allocated within the + package to store the available, provided, and received water. + nmawwells : integer + * nmawwells (integer) integer value specifying the number of multi- + aquifer wells that will be simulated for all stress periods. + packagedata : [wellno, radius, bottom, strt, condeqn, ngwfnodes, aux, + boundname] + * wellno (integer) integer value that defines the well number + associated with the specified PACKAGEDATA data on the line. WELLNO + must be greater than zero and less than or equal to NMAWWELLS. Multi- + aquifer well information must be specified for every multi-aquifer + well or the program will terminate with an error. The program will + also terminate with an error if information for a multi-aquifer well + is specified more than once. This argument is an index variable, + which means that it should be treated as zero-based when working with + FloPy and Python. Flopy will automatically subtract one when loading + index variables and add one when writing index variables. + * radius (double) radius for the multi-aquifer well. + * bottom (double) bottom elevation of the multi-aquifer well. The well + bottom is reset to the cell bottom in the lowermost GWF cell + connection in cases where the specified well bottom is above the + bottom of this GWF cell. + * strt (double) starting head for the multi-aquifer well. + * condeqn (string) character string that defines the conductance + equation that is used to calculate the saturated conductance for the + multi-aquifer well. Possible multi-aquifer well CONDEQN strings + include: SPECIFIED--character keyword to indicate the multi-aquifer + well saturated conductance will be specified. THIEM--character + keyword to indicate the multi-aquifer well saturated conductance will + be calculated using the Thiem equation, which considers the cell top + and bottom, aquifer hydraulic conductivity, and effective cell and + well radius. SKIN--character keyword to indicate that the multi- + aquifer well saturated conductance will be calculated using the cell + top and bottom, aquifer and screen hydraulic conductivity, and well + and skin radius. CUMULATIVE--character keyword to indicate that the + multi-aquifer well saturated conductance will be calculated using a + combination of the Thiem and SKIN equations. MEAN--character keyword + to indicate the multi-aquifer well saturated conductance will be + calculated using the aquifer and screen top and bottom, aquifer and + screen hydraulic conductivity, and well and skin radius. + * ngwfnodes (integer) integer value that defines the number of GWF + nodes connected to this (WELLNO) multi-aquifer well. NGWFNODES must + be greater than zero. + * aux (double) represents the values of the auxiliary variables for + each multi-aquifer well. The values of auxiliary variables must be + present for each multi-aquifer well. The values must be specified in + the order of the auxiliary variables specified in the OPTIONS block. + If the package supports time series and the Options block includes a + TIMESERIESFILE entry (see the "Time-Variable Input" section), values + can be obtained from a time series by entering the time-series name + in place of a numeric value. + * boundname (string) name of the multi-aquifer well cell. BOUNDNAME is + an ASCII character variable that can contain as many as 40 + characters. If BOUNDNAME contains spaces in it, then the entire name + must be enclosed within single quotes. + connectiondata : [wellno, icon, cellid, scrn_top, scrn_bot, hk_skin, + radius_skin] + * wellno (integer) integer value that defines the well number + associated with the specified CONNECTIONDATA data on the line. WELLNO + must be greater than zero and less than or equal to NMAWWELLS. Multi- + aquifer well connection information must be specified for every + multi-aquifer well connection to the GWF model (NGWFNODES) or the + program will terminate with an error. The program will also terminate + with an error if connection information for a multi-aquifer well + connection to the GWF model is specified more than once. This + argument is an index variable, which means that it should be treated + as zero-based when working with FloPy and Python. Flopy will + automatically subtract one when loading index variables and add one + when writing index variables. + * icon (integer) integer value that defines the GWF connection number + for this multi-aquifer well connection entry. ICONN must be greater + than zero and less than or equal to NGWFNODES for multi-aquifer well + WELLNO. This argument is an index variable, which means that it + should be treated as zero-based when working with FloPy and Python. + Flopy will automatically subtract one when loading index variables + and add one when writing index variables. + * cellid ((integer, ...)) is the cell identifier, and depends on the + type of grid that is used for the simulation. For a structured grid + that uses the DIS input file, CELLID is the layer, row, and column. + For a grid that uses the DISV input file, CELLID is the layer and + CELL2D number. If the model uses the unstructured discretization + (DISU) input file, CELLID is the node number for the cell. One or + more screened intervals can be connected to the same CELLID if + CONDEQN for a well is MEAN. The program will terminate with an error + if MAW wells using SPECIFIED, THIEM, SKIN, or CUMULATIVE conductance + equations have more than one connection to the same CELLID. This + argument is an index variable, which means that it should be treated + as zero-based when working with FloPy and Python. Flopy will + automatically subtract one when loading index variables and add one + when writing index variables. + * scrn_top (double) value that defines the top elevation of the screen + for the multi-aquifer well connection. If the specified SCRN_TOP is + greater than the top of the GWF cell it is set equal to the top of + the cell. SCRN_TOP can be any value if CONDEQN is SPECIFIED, THIEM, + SKIN, or COMPOSITE and SCRN_TOP is set to the top of the cell. + * scrn_bot (double) value that defines the bottom elevation of the + screen for the multi-aquifer well connection. If the specified + SCRN_BOT is less than the bottom of the GWF cell it is set equal to + the bottom of the cell. SCRN_BOT can be any value if CONDEQN is + SPECIFIED, THIEM, SKIN, or COMPOSITE and SCRN_BOT is set to the + bottom of the cell. + * hk_skin (double) value that defines the skin (filter pack) hydraulic + conductivity (if CONDEQN for the multi-aquifer well is SKIN, + CUMULATIVE, or MEAN) or conductance (if CONDEQN for the multi-aquifer + well is SPECIFIED) for each GWF node connected to the multi-aquifer + well (NGWFNODES). HK_SKIN can be any value if CONDEQN is THIEM. + * radius_skin (double) real value that defines the skin radius (filter + pack radius) for the multi-aquifer well. RADIUS_SKIN can be any value + if CONDEQN is SPECIFIED or THIEM. Otherwise, RADIUS_SKIN must be + greater than RADIUS for the multi-aquifer well. + perioddata : [wellno, mawsetting] + * wellno (integer) integer value that defines the well number + associated with the specified PERIOD data on the line. WELLNO must be + greater than zero and less than or equal to NMAWWELLS. This argument + is an index variable, which means that it should be treated as zero- + based when working with FloPy and Python. Flopy will automatically + subtract one when loading index variables and add one when writing + index variables. + * mawsetting (keystring) line of information that is parsed into a + keyword and values. Keyword values that can be used to start the + MAWSETTING string include: STATUS, FLOWING_WELL, RATE, WELL_HEAD, + HEAD_LIMIT, SHUT_OFF, RATE_SCALING, and AUXILIARY. + status : [string] + * status (string) keyword option to define well status. STATUS + can be ACTIVE, INACTIVE, or CONSTANT. By default, STATUS is + ACTIVE. + flowing_wellrecord : [fwelev, fwcond, fwrlen] + * fwelev (double) elevation used to determine whether or not + the well is flowing. + * fwcond (double) conductance used to calculate the discharge + of a free flowing well. Flow occurs when the head in the well + is above the well top elevation (FWELEV). + * fwrlen (double) length used to reduce the conductance of the + flowing well. When the head in the well drops below the well + top plus the reduction length, then the conductance is + reduced. This reduction length can be used to improve the + stability of simulations with flowing wells so that there is + not an abrupt change in flowing well rates. + rate : [double] + * rate (double) is the volumetric pumping rate for the multi- + aquifer well. A positive value indicates recharge and a + negative value indicates discharge (pumping). RATE only + applies to active (IBOUND :math:`>` 0) multi-aquifer wells. + If the Options block includes a TIMESERIESFILE entry (see the + "Time-Variable Input" section), values can be obtained from a + time series by entering the time-series name in place of a + numeric value. By default, the RATE for each multi-aquifer + well is zero. + well_head : [double] + * well_head (double) is the head in the multi-aquifer well. + WELL_HEAD is only applied to constant head (STATUS is + CONSTANT) and inactive (STATUS is INACTIVE) multi-aquifer + wells. If the Options block includes a TIMESERIESFILE entry + (see the "Time-Variable Input" section), values can be + obtained from a time series by entering the time-series name + in place of a numeric value. + head_limit : [string] + * head_limit (string) is the limiting water level (head) in the + well, which is the minimum of the well RATE or the well + inflow rate from the aquifer. HEAD_LIMIT can be applied to + extraction wells (RATE :math:`<` 0) or injection wells (RATE + :math:`>` 0). HEAD\_LIMIT can be deactivated by specifying + the text string `OFF'. The HEAD\_LIMIT option is based on the + HEAD\_LIMIT functionality available in the + MNW2~\citep{konikow2009} package for MODFLOW-2005. The + HEAD\_LIMIT option has been included to facilitate backward + compatibility with previous versions of MODFLOW but use of + the RATE\_SCALING option instead of the HEAD\_LIMIT option is + recommended. By default, HEAD\_LIMIT is `OFF'. + shutoffrecord : [minrate, maxrate] + * minrate (double) is the minimum rate that a well must exceed + to shutoff a well during a stress period. The well will shut + down during a time step if the flow rate to the well from the + aquifer is less than MINRATE. If a well is shut down during a + time step, reactivation of the well cannot occur until the + next time step to reduce oscillations. MINRATE must be less + than maxrate. + * maxrate (double) is the maximum rate that a well must exceed + to reactivate a well during a stress period. The well will + reactivate during a timestep if the well was shutdown during + the previous time step and the flow rate to the well from the + aquifer exceeds maxrate. Reactivation of the well cannot + occur until the next time step if a well is shutdown to + reduce oscillations. maxrate must be greater than MINRATE. + rate_scalingrecord : [pump_elevation, scaling_length] + * pump_elevation (double) is the elevation of the multi-aquifer + well pump (PUMP_ELEVATION). PUMP_ELEVATION should not be less + than the bottom elevation (BOTTOM) of the multi-aquifer well. + * scaling_length (double) height above the pump elevation + (SCALING_LENGTH). If the simulated well head is below this + elevation (pump elevation plus the scaling length), then the + pumping rate is reduced. + auxiliaryrecord : [auxname, auxval] + * auxname (string) name for the auxiliary variable to be + assigned AUXVAL. AUXNAME must match one of the auxiliary + variable names defined in the OPTIONS block. If AUXNAME does + not match one of the auxiliary variable names defined in the + OPTIONS block the data are ignored. + * auxval (double) value for the auxiliary variable. If the + Options block includes a TIMESERIESFILE entry (see the "Time- + Variable Input" section), values can be obtained from a time + series by entering the time-series name in place of a numeric + value. + filename : String + File name for this package. + pname : String + Package name for this package. + parent_file : MFPackage + Parent package file that references this package. Only needed for + utility packages (mfutl*). For example, mfutllaktab package must have + a mfgwflak package parent_file. + + """ + auxiliary = ListTemplateGenerator(('gwf6', 'maw', 'options', + 'auxiliary')) + stage_filerecord = ListTemplateGenerator(('gwf6', 'maw', 'options', + 'stage_filerecord')) + budget_filerecord = ListTemplateGenerator(('gwf6', 'maw', 'options', + 'budget_filerecord')) + ts_filerecord = ListTemplateGenerator(('gwf6', 'maw', 'options', + 'ts_filerecord')) + obs_filerecord = ListTemplateGenerator(('gwf6', 'maw', 'options', + 'obs_filerecord')) + packagedata = ListTemplateGenerator(('gwf6', 'maw', 'packagedata', + 'packagedata')) + connectiondata = ListTemplateGenerator(('gwf6', 'maw', + 'connectiondata', + 'connectiondata')) + perioddata = ListTemplateGenerator(('gwf6', 'maw', 'period', + 'perioddata')) + package_abbr = "gwfmaw" + _package_type = "maw" + dfn_file_name = "gwf-maw.dfn" + + dfn = [["block options", "name auxiliary", "type string", + "shape (naux)", "reader urword", "optional true"], + ["block options", "name boundnames", "type keyword", "shape", + "reader urword", "optional true"], + ["block options", "name print_input", "type keyword", + "reader urword", "optional true"], + ["block options", "name print_head", "type keyword", + "reader urword", "optional true"], + ["block options", "name print_flows", "type keyword", + "reader urword", "optional true"], + ["block options", "name save_flows", "type keyword", + "reader urword", "optional true"], + ["block options", "name stage_filerecord", + "type record head fileout headfile", "shape", "reader urword", + "tagged true", "optional true"], + ["block options", "name head", "type keyword", "shape", + "in_record true", "reader urword", "tagged true", + "optional false"], + ["block options", "name headfile", "type string", + "preserve_case true", "shape", "in_record true", "reader urword", + "tagged false", "optional false"], + ["block options", "name budget_filerecord", + "type record budget fileout budgetfile", "shape", "reader urword", + "tagged true", "optional true"], + ["block options", "name budget", "type keyword", "shape", + "in_record true", "reader urword", "tagged true", + "optional false"], + ["block options", "name fileout", "type keyword", "shape", + "in_record true", "reader urword", "tagged true", + "optional false"], + ["block options", "name budgetfile", "type string", + "preserve_case true", "shape", "in_record true", "reader urword", + "tagged false", "optional false"], + ["block options", "name no_well_storage", "type keyword", + "reader urword", "optional true"], + ["block options", "name flowing_wells", "type keyword", + "reader urword", "optional true"], + ["block options", "name shutdown_theta", "type double precision", + "reader urword", "optional true"], + ["block options", "name shutdown_kappa", "type double precision", + "reader urword", "optional true"], + ["block options", "name ts_filerecord", + "type record ts6 filein ts6_filename", "shape", "reader urword", + "tagged true", "optional true", "construct_package ts", + "construct_data timeseries", "parameter_name timeseries"], + ["block options", "name ts6", "type keyword", "shape", + "in_record true", "reader urword", "tagged true", + "optional false"], + ["block options", "name filein", "type keyword", "shape", + "in_record true", "reader urword", "tagged true", + "optional false"], + ["block options", "name ts6_filename", "type string", + "preserve_case true", "in_record true", "reader urword", + "optional false", "tagged false"], + ["block options", "name obs_filerecord", + "type record obs6 filein obs6_filename", "shape", "reader urword", + "tagged true", "optional true", "construct_package obs", + "construct_data continuous", "parameter_name observations"], + ["block options", "name obs6", "type keyword", "shape", + "in_record true", "reader urword", "tagged true", + "optional false"], + ["block options", "name obs6_filename", "type string", + "preserve_case true", "in_record true", "tagged false", + "reader urword", "optional false"], + ["block options", "name mover", "type keyword", "tagged true", + "reader urword", "optional true"], + ["block dimensions", "name nmawwells", "type integer", + "reader urword", "optional false"], + ["block packagedata", "name packagedata", + "type recarray wellno radius bottom strt condeqn ngwfnodes aux " + "boundname", + "shape (nmawwells)", "reader urword"], + ["block packagedata", "name wellno", "type integer", "shape", + "tagged false", "in_record true", "reader urword", + "numeric_index true"], + ["block packagedata", "name radius", "type double precision", + "shape", "tagged false", "in_record true", "reader urword"], + ["block packagedata", "name bottom", "type double precision", + "shape", "tagged false", "in_record true", "reader urword"], + ["block packagedata", "name strt", "type double precision", + "shape", "tagged false", "in_record true", "reader urword"], + ["block packagedata", "name condeqn", "type string", "shape", + "tagged false", "in_record true", "reader urword"], + ["block packagedata", "name ngwfnodes", "type integer", "shape", + "tagged false", "in_record true", "reader urword"], + ["block packagedata", "name aux", "type double precision", + "in_record true", "tagged false", "shape (naux)", "reader urword", + "time_series true", "optional true"], + ["block packagedata", "name boundname", "type string", "shape", + "tagged false", "in_record true", "reader urword", + "optional true"], + ["block connectiondata", "name connectiondata", + "type recarray wellno icon cellid scrn_top scrn_bot hk_skin " + "radius_skin", + "reader urword"], + ["block connectiondata", "name wellno", "type integer", "shape", + "tagged false", "in_record true", "reader urword", + "numeric_index true"], + ["block connectiondata", "name icon", "type integer", "shape", + "tagged false", "in_record true", "reader urword", + "numeric_index true"], + ["block connectiondata", "name cellid", "type integer", + "shape (ncelldim)", "tagged false", "in_record true", + "reader urword"], + ["block connectiondata", "name scrn_top", + "type double precision", "shape", "tagged false", + "in_record true", "reader urword"], + ["block connectiondata", "name scrn_bot", + "type double precision", "shape", "tagged false", + "in_record true", "reader urword"], + ["block connectiondata", "name hk_skin", "type double precision", + "shape", "tagged false", "in_record true", "reader urword"], + ["block connectiondata", "name radius_skin", + "type double precision", "shape", "tagged false", + "in_record true", "reader urword"], + ["block period", "name iper", "type integer", + "block_variable True", "in_record true", "tagged false", "shape", + "valid", "reader urword", "optional false"], + ["block period", "name perioddata", + "type recarray wellno mawsetting", "shape", "reader urword"], + ["block period", "name wellno", "type integer", "shape", + "tagged false", "in_record true", "reader urword", + "numeric_index true"], + ["block period", "name mawsetting", + "type keystring status flowing_wellrecord rate well_head " + "head_limit shutoffrecord rate_scalingrecord auxiliaryrecord", + "shape", "tagged false", "in_record true", "reader urword"], + ["block period", "name status", "type string", "shape", + "tagged true", "in_record true", "reader urword"], + ["block period", "name flowing_wellrecord", + "type record flowing_well fwelev fwcond fwrlen", "shape", + "tagged", "in_record true", "reader urword"], + ["block period", "name flowing_well", "type keyword", "shape", + "in_record true", "reader urword"], + ["block period", "name fwelev", "type double precision", "shape", + "tagged false", "in_record true", "reader urword"], + ["block period", "name fwcond", "type double precision", "shape", + "tagged false", "in_record true", "reader urword"], + ["block period", "name fwrlen", "type double precision", "shape", + "tagged false", "in_record true", "reader urword"], + ["block period", "name rate", "type double precision", "shape", + "tagged true", "in_record true", "reader urword", + "time_series true"], + ["block period", "name well_head", "type double precision", + "shape", "tagged true", "in_record true", "reader urword", + "time_series true"], + ["block period", "name head_limit", "type string", "shape", + "tagged true", "in_record true", "reader urword"], + ["block period", "name shutoffrecord", + "type record shut_off minrate maxrate", "shape", "tagged", + "in_record true", "reader urword"], + ["block period", "name shut_off", "type keyword", "shape", + "in_record true", "reader urword"], + ["block period", "name minrate", "type double precision", + "shape", "tagged false", "in_record true", "reader urword"], + ["block period", "name maxrate", "type double precision", + "shape", "tagged false", "in_record true", "reader urword"], + ["block period", "name rate_scalingrecord", + "type record rate_scaling pump_elevation scaling_length", "shape", + "tagged", "in_record true", "reader urword"], + ["block period", "name rate_scaling", "type keyword", "shape", + "in_record true", "reader urword"], + ["block period", "name pump_elevation", "type double precision", + "shape", "tagged false", "in_record true", "reader urword"], + ["block period", "name scaling_length", "type double precision", + "shape", "tagged false", "in_record true", "reader urword"], + ["block period", "name auxiliaryrecord", + "type record auxiliary auxname auxval", "shape", "tagged", + "in_record true", "reader urword"], + ["block period", "name auxiliary", "type keyword", "shape", + "in_record true", "reader urword"], + ["block period", "name auxname", "type string", "shape", + "tagged false", "in_record true", "reader urword"], + ["block period", "name auxval", "type double precision", "shape", + "tagged false", "in_record true", "reader urword", + "time_series true"]] + + def __init__(self, model, loading_package=False, auxiliary=None, + boundnames=None, print_input=None, print_head=None, + print_flows=None, save_flows=None, stage_filerecord=None, + budget_filerecord=None, no_well_storage=None, + flowing_wells=None, shutdown_theta=None, shutdown_kappa=None, + timeseries=None, observations=None, mover=None, + nmawwells=None, packagedata=None, connectiondata=None, + perioddata=None, filename=None, pname=None, parent_file=None): + super(ModflowGwfmaw, self).__init__(model, "maw", filename, pname, + loading_package, parent_file) + + # set up variables + self.auxiliary = self.build_mfdata("auxiliary", auxiliary) + self.boundnames = self.build_mfdata("boundnames", boundnames) + self.print_input = self.build_mfdata("print_input", print_input) + self.print_head = self.build_mfdata("print_head", print_head) + self.print_flows = self.build_mfdata("print_flows", print_flows) + self.save_flows = self.build_mfdata("save_flows", save_flows) + self.stage_filerecord = self.build_mfdata("stage_filerecord", + stage_filerecord) + self.budget_filerecord = self.build_mfdata("budget_filerecord", + budget_filerecord) + self.no_well_storage = self.build_mfdata("no_well_storage", + no_well_storage) + self.flowing_wells = self.build_mfdata("flowing_wells", flowing_wells) + self.shutdown_theta = self.build_mfdata("shutdown_theta", + shutdown_theta) + self.shutdown_kappa = self.build_mfdata("shutdown_kappa", + shutdown_kappa) + self._ts_filerecord = self.build_mfdata("ts_filerecord", + None) + self._ts_package = self.build_child_package("ts", timeseries, + "timeseries", + self._ts_filerecord) + self._obs_filerecord = self.build_mfdata("obs_filerecord", + None) + self._obs_package = self.build_child_package("obs", observations, + "continuous", + self._obs_filerecord) + self.mover = self.build_mfdata("mover", mover) + self.nmawwells = self.build_mfdata("nmawwells", nmawwells) + self.packagedata = self.build_mfdata("packagedata", packagedata) + self.connectiondata = self.build_mfdata("connectiondata", + connectiondata) + self.perioddata = self.build_mfdata("perioddata", perioddata) + self._init_complete = True diff --git a/flopy/mf6/modflow/mfgwfmvr.py b/flopy/mf6/modflow/mfgwfmvr.py index e6344e3498..c657635dd5 100644 --- a/flopy/mf6/modflow/mfgwfmvr.py +++ b/flopy/mf6/modflow/mfgwfmvr.py @@ -1,186 +1,186 @@ -# DO NOT MODIFY THIS FILE DIRECTLY. THIS FILE MUST BE CREATED BY -# mf6/utils/createpackages.py -from .. import mfpackage -from ..data.mfdatautil import ListTemplateGenerator - - -class ModflowGwfmvr(mfpackage.MFPackage): - """ - ModflowGwfmvr defines a mvr package within a gwf6 model. - - Parameters - ---------- - model : MFModel - Model that this package is a part of. Package is automatically - added to model when it is initialized. - loading_package : bool - Do not set this parameter. It is intended for debugging and internal - processing purposes only. - print_input : boolean - * print_input (boolean) keyword to indicate that the list of MVR - information will be written to the listing file immediately after it - is read. - print_flows : boolean - * print_flows (boolean) keyword to indicate that the list of MVR flow - rates will be printed to the listing file for every stress period - time step in which "BUDGET PRINT" is specified in Output Control. If - there is no Output Control option and "PRINT_FLOWS" is specified, - then flow rates are printed for the last time step of each stress - period. - modelnames : boolean - * modelnames (boolean) keyword to indicate that all package names will - be preceded by the model name for the package. Model names are - required when the Mover Package is used with a GWF-GWF Exchange. The - MODELNAME keyword should not be used for a Mover Package that is for - a single GWF Model. - budget_filerecord : [budgetfile] - * budgetfile (string) name of the output file to write budget - information. - maxmvr : integer - * maxmvr (integer) integer value specifying the maximum number of water - mover entries that will specified for any stress period. - maxpackages : integer - * maxpackages (integer) integer value specifying the number of unique - packages that are included in this water mover input file. - packages : [mname, pname] - * mname (string) name of model containing the package. Model names are - assigned by the user in the simulation name file. - * pname (string) is the name of a package that may be included in a - subsequent stress period block. The package name is assigned in the - name file for the GWF Model. Package names are optionally provided in - the name file. If they are not provided by the user, then packages - are assigned a default value, which is the package acronym followed - by a hyphen and the package number. For example, the first Drain - Package is named DRN-1. The second Drain Package is named DRN-2, and - so forth. - perioddata : [mname1, pname1, id1, mname2, pname2, id2, mvrtype, value] - * mname1 (string) name of model containing the package, PNAME1. - * pname1 (string) is the package name for the provider. The package - PNAME1 must be designated to provide water through the MVR Package by - specifying the keyword "MOVER" in its OPTIONS block. - * id1 (integer) is the identifier for the provider. For the standard - boundary packages, the provider identifier is the number of the - boundary as it is listed in the package input file. (Note that the - order of these boundaries may change by stress period, which must be - accounted for in the Mover Package.) So the first well has an - identifier of one. The second is two, and so forth. For the advanced - packages, the identifier is the reach number (SFR Package), well - number (MAW Package), or UZF cell number. For the Lake Package, ID1 - is the lake outlet number. Thus, outflows from a single lake can be - routed to different streams, for example. This argument is an index - variable, which means that it should be treated as zero-based when - working with FloPy and Python. Flopy will automatically subtract one - when loading index variables and add one when writing index - variables. - * mname2 (string) name of model containing the package, PNAME2. - * pname2 (string) is the package name for the receiver. The package - PNAME2 must be designated to receive water from the MVR Package by - specifying the keyword "MOVER" in its OPTIONS block. - * id2 (integer) is the identifier for the receiver. The receiver - identifier is the reach number (SFR Package), Lake number (LAK - Package), well number (MAW Package), or UZF cell number. This - argument is an index variable, which means that it should be treated - as zero-based when working with FloPy and Python. Flopy will - automatically subtract one when loading index variables and add one - when writing index variables. - * mvrtype (string) is the character string signifying the method for - determining how much water will be moved. Supported values are - "FACTOR" "EXCESS" "THRESHOLD" and "UPTO". These four options - determine how the receiver flow rate, :math:`Q_R`, is calculated. - These options are based the options available in the SFR2 Package for - diverting stream flow. - * value (double) is the value to be used in the equation for - calculating the amount of water to move. For the "FACTOR" option, - VALUE is the :math:`\\alpha` factor. For the remaining options, VALUE - is the specified flow rate, :math:`Q_S`. - filename : String - File name for this package. - pname : String - Package name for this package. - parent_file : MFPackage - Parent package file that references this package. Only needed for - utility packages (mfutl*). For example, mfutllaktab package must have - a mfgwflak package parent_file. - - """ - budget_filerecord = ListTemplateGenerator(('gwf6', 'mvr', 'options', - 'budget_filerecord')) - packages = ListTemplateGenerator(('gwf6', 'mvr', 'packages', - 'packages')) - perioddata = ListTemplateGenerator(('gwf6', 'mvr', 'period', - 'perioddata')) - package_abbr = "gwfmvr" - _package_type = "mvr" - dfn_file_name = "gwf-mvr.dfn" - - dfn = [["block options", "name print_input", "type keyword", - "reader urword", "optional true"], - ["block options", "name print_flows", "type keyword", - "reader urword", "optional true"], - ["block options", "name modelnames", "type keyword", - "reader urword", "optional true"], - ["block options", "name budget_filerecord", - "type record budget fileout budgetfile", "shape", "reader urword", - "tagged true", "optional true"], - ["block options", "name budget", "type keyword", "shape", - "in_record true", "reader urword", "tagged true", - "optional false"], - ["block options", "name fileout", "type keyword", "shape", - "in_record true", "reader urword", "tagged true", - "optional false"], - ["block options", "name budgetfile", "type string", - "preserve_case true", "shape", "in_record true", "reader urword", - "tagged false", "optional false"], - ["block dimensions", "name maxmvr", "type integer", - "reader urword", "optional false"], - ["block dimensions", "name maxpackages", "type integer", - "reader urword", "optional false"], - ["block packages", "name packages", "type recarray mname pname", - "reader urword", "shape (npackages)", "optional false"], - ["block packages", "name mname", "type string", "reader urword", - "shape", "tagged false", "in_record true", "optional true"], - ["block packages", "name pname", "type string", "reader urword", - "shape", "tagged false", "in_record true", "optional false"], - ["block period", "name iper", "type integer", - "block_variable True", "in_record true", "tagged false", "shape", - "valid", "reader urword", "optional false"], - ["block period", "name perioddata", - "type recarray mname1 pname1 id1 mname2 pname2 id2 mvrtype value", - "shape (maxbound)", "reader urword"], - ["block period", "name mname1", "type string", "reader urword", - "shape", "tagged false", "in_record true", "optional true"], - ["block period", "name pname1", "type string", "shape", - "tagged false", "in_record true", "reader urword"], - ["block period", "name id1", "type integer", "shape", - "tagged false", "in_record true", "reader urword", - "numeric_index true"], - ["block period", "name mname2", "type string", "reader urword", - "shape", "tagged false", "in_record true", "optional true"], - ["block period", "name pname2", "type string", "shape", - "tagged false", "in_record true", "reader urword"], - ["block period", "name id2", "type integer", "shape", - "tagged false", "in_record true", "reader urword", - "numeric_index true"], - ["block period", "name mvrtype", "type string", "shape", - "tagged false", "in_record true", "reader urword"], - ["block period", "name value", "type double precision", "shape", - "tagged false", "in_record true", "reader urword"]] - - def __init__(self, model, loading_package=False, print_input=None, - print_flows=None, modelnames=None, budget_filerecord=None, - maxmvr=None, maxpackages=None, packages=None, perioddata=None, - filename=None, pname=None, parent_file=None): - super(ModflowGwfmvr, self).__init__(model, "mvr", filename, pname, - loading_package, parent_file) - - # set up variables - self.print_input = self.build_mfdata("print_input", print_input) - self.print_flows = self.build_mfdata("print_flows", print_flows) - self.modelnames = self.build_mfdata("modelnames", modelnames) - self.budget_filerecord = self.build_mfdata("budget_filerecord", - budget_filerecord) - self.maxmvr = self.build_mfdata("maxmvr", maxmvr) - self.maxpackages = self.build_mfdata("maxpackages", maxpackages) - self.packages = self.build_mfdata("packages", packages) - self.perioddata = self.build_mfdata("perioddata", perioddata) - self._init_complete = True +# DO NOT MODIFY THIS FILE DIRECTLY. THIS FILE MUST BE CREATED BY +# mf6/utils/createpackages.py +from .. import mfpackage +from ..data.mfdatautil import ListTemplateGenerator + + +class ModflowGwfmvr(mfpackage.MFPackage): + """ + ModflowGwfmvr defines a mvr package within a gwf6 model. + + Parameters + ---------- + model : MFModel + Model that this package is a part of. Package is automatically + added to model when it is initialized. + loading_package : bool + Do not set this parameter. It is intended for debugging and internal + processing purposes only. + print_input : boolean + * print_input (boolean) keyword to indicate that the list of MVR + information will be written to the listing file immediately after it + is read. + print_flows : boolean + * print_flows (boolean) keyword to indicate that the list of MVR flow + rates will be printed to the listing file for every stress period + time step in which "BUDGET PRINT" is specified in Output Control. If + there is no Output Control option and "PRINT_FLOWS" is specified, + then flow rates are printed for the last time step of each stress + period. + modelnames : boolean + * modelnames (boolean) keyword to indicate that all package names will + be preceded by the model name for the package. Model names are + required when the Mover Package is used with a GWF-GWF Exchange. The + MODELNAME keyword should not be used for a Mover Package that is for + a single GWF Model. + budget_filerecord : [budgetfile] + * budgetfile (string) name of the output file to write budget + information. + maxmvr : integer + * maxmvr (integer) integer value specifying the maximum number of water + mover entries that will specified for any stress period. + maxpackages : integer + * maxpackages (integer) integer value specifying the number of unique + packages that are included in this water mover input file. + packages : [mname, pname] + * mname (string) name of model containing the package. Model names are + assigned by the user in the simulation name file. + * pname (string) is the name of a package that may be included in a + subsequent stress period block. The package name is assigned in the + name file for the GWF Model. Package names are optionally provided in + the name file. If they are not provided by the user, then packages + are assigned a default value, which is the package acronym followed + by a hyphen and the package number. For example, the first Drain + Package is named DRN-1. The second Drain Package is named DRN-2, and + so forth. + perioddata : [mname1, pname1, id1, mname2, pname2, id2, mvrtype, value] + * mname1 (string) name of model containing the package, PNAME1. + * pname1 (string) is the package name for the provider. The package + PNAME1 must be designated to provide water through the MVR Package by + specifying the keyword "MOVER" in its OPTIONS block. + * id1 (integer) is the identifier for the provider. For the standard + boundary packages, the provider identifier is the number of the + boundary as it is listed in the package input file. (Note that the + order of these boundaries may change by stress period, which must be + accounted for in the Mover Package.) So the first well has an + identifier of one. The second is two, and so forth. For the advanced + packages, the identifier is the reach number (SFR Package), well + number (MAW Package), or UZF cell number. For the Lake Package, ID1 + is the lake outlet number. Thus, outflows from a single lake can be + routed to different streams, for example. This argument is an index + variable, which means that it should be treated as zero-based when + working with FloPy and Python. Flopy will automatically subtract one + when loading index variables and add one when writing index + variables. + * mname2 (string) name of model containing the package, PNAME2. + * pname2 (string) is the package name for the receiver. The package + PNAME2 must be designated to receive water from the MVR Package by + specifying the keyword "MOVER" in its OPTIONS block. + * id2 (integer) is the identifier for the receiver. The receiver + identifier is the reach number (SFR Package), Lake number (LAK + Package), well number (MAW Package), or UZF cell number. This + argument is an index variable, which means that it should be treated + as zero-based when working with FloPy and Python. Flopy will + automatically subtract one when loading index variables and add one + when writing index variables. + * mvrtype (string) is the character string signifying the method for + determining how much water will be moved. Supported values are + "FACTOR" "EXCESS" "THRESHOLD" and "UPTO". These four options + determine how the receiver flow rate, :math:`Q_R`, is calculated. + These options are based the options available in the SFR2 Package for + diverting stream flow. + * value (double) is the value to be used in the equation for + calculating the amount of water to move. For the "FACTOR" option, + VALUE is the :math:`\\alpha` factor. For the remaining options, VALUE + is the specified flow rate, :math:`Q_S`. + filename : String + File name for this package. + pname : String + Package name for this package. + parent_file : MFPackage + Parent package file that references this package. Only needed for + utility packages (mfutl*). For example, mfutllaktab package must have + a mfgwflak package parent_file. + + """ + budget_filerecord = ListTemplateGenerator(('gwf6', 'mvr', 'options', + 'budget_filerecord')) + packages = ListTemplateGenerator(('gwf6', 'mvr', 'packages', + 'packages')) + perioddata = ListTemplateGenerator(('gwf6', 'mvr', 'period', + 'perioddata')) + package_abbr = "gwfmvr" + _package_type = "mvr" + dfn_file_name = "gwf-mvr.dfn" + + dfn = [["block options", "name print_input", "type keyword", + "reader urword", "optional true"], + ["block options", "name print_flows", "type keyword", + "reader urword", "optional true"], + ["block options", "name modelnames", "type keyword", + "reader urword", "optional true"], + ["block options", "name budget_filerecord", + "type record budget fileout budgetfile", "shape", "reader urword", + "tagged true", "optional true"], + ["block options", "name budget", "type keyword", "shape", + "in_record true", "reader urword", "tagged true", + "optional false"], + ["block options", "name fileout", "type keyword", "shape", + "in_record true", "reader urword", "tagged true", + "optional false"], + ["block options", "name budgetfile", "type string", + "preserve_case true", "shape", "in_record true", "reader urword", + "tagged false", "optional false"], + ["block dimensions", "name maxmvr", "type integer", + "reader urword", "optional false"], + ["block dimensions", "name maxpackages", "type integer", + "reader urword", "optional false"], + ["block packages", "name packages", "type recarray mname pname", + "reader urword", "shape (npackages)", "optional false"], + ["block packages", "name mname", "type string", "reader urword", + "shape", "tagged false", "in_record true", "optional true"], + ["block packages", "name pname", "type string", "reader urword", + "shape", "tagged false", "in_record true", "optional false"], + ["block period", "name iper", "type integer", + "block_variable True", "in_record true", "tagged false", "shape", + "valid", "reader urword", "optional false"], + ["block period", "name perioddata", + "type recarray mname1 pname1 id1 mname2 pname2 id2 mvrtype value", + "shape (maxbound)", "reader urword"], + ["block period", "name mname1", "type string", "reader urword", + "shape", "tagged false", "in_record true", "optional true"], + ["block period", "name pname1", "type string", "shape", + "tagged false", "in_record true", "reader urword"], + ["block period", "name id1", "type integer", "shape", + "tagged false", "in_record true", "reader urword", + "numeric_index true"], + ["block period", "name mname2", "type string", "reader urword", + "shape", "tagged false", "in_record true", "optional true"], + ["block period", "name pname2", "type string", "shape", + "tagged false", "in_record true", "reader urword"], + ["block period", "name id2", "type integer", "shape", + "tagged false", "in_record true", "reader urword", + "numeric_index true"], + ["block period", "name mvrtype", "type string", "shape", + "tagged false", "in_record true", "reader urword"], + ["block period", "name value", "type double precision", "shape", + "tagged false", "in_record true", "reader urword"]] + + def __init__(self, model, loading_package=False, print_input=None, + print_flows=None, modelnames=None, budget_filerecord=None, + maxmvr=None, maxpackages=None, packages=None, perioddata=None, + filename=None, pname=None, parent_file=None): + super(ModflowGwfmvr, self).__init__(model, "mvr", filename, pname, + loading_package, parent_file) + + # set up variables + self.print_input = self.build_mfdata("print_input", print_input) + self.print_flows = self.build_mfdata("print_flows", print_flows) + self.modelnames = self.build_mfdata("modelnames", modelnames) + self.budget_filerecord = self.build_mfdata("budget_filerecord", + budget_filerecord) + self.maxmvr = self.build_mfdata("maxmvr", maxmvr) + self.maxpackages = self.build_mfdata("maxpackages", maxpackages) + self.packages = self.build_mfdata("packages", packages) + self.perioddata = self.build_mfdata("perioddata", perioddata) + self._init_complete = True diff --git a/flopy/mf6/modflow/mfgwfnpf.py b/flopy/mf6/modflow/mfgwfnpf.py index bf958395c7..0b17dda049 100644 --- a/flopy/mf6/modflow/mfgwfnpf.py +++ b/flopy/mf6/modflow/mfgwfnpf.py @@ -1,300 +1,300 @@ -# DO NOT MODIFY THIS FILE DIRECTLY. THIS FILE MUST BE CREATED BY -# mf6/utils/createpackages.py -from .. import mfpackage -from ..data.mfdatautil import ListTemplateGenerator, ArrayTemplateGenerator - - -class ModflowGwfnpf(mfpackage.MFPackage): - """ - ModflowGwfnpf defines a npf package within a gwf6 model. - - Parameters - ---------- - model : MFModel - Model that this package is a part of. Package is automatically - added to model when it is initialized. - loading_package : bool - Do not set this parameter. It is intended for debugging and internal - processing purposes only. - save_flows : boolean - * save_flows (boolean) keyword to indicate that cell-by-cell flow terms - will be written to the file specified with "BUDGET SAVE FILE" in - Output Control. - alternative_cell_averaging : string - * alternative_cell_averaging (string) is a text keyword to indicate - that an alternative method will be used for calculating the - conductance for horizontal cell connections. The text value for - ALTERNATIVE_CELL_AVERAGING can be "LOGARITHMIC", "AMT-LMK", or "AMT- - HMK". "AMT-LMK" signifies that the conductance will be calculated - using arithmetic-mean thickness and logarithmic-mean hydraulic - conductivity. "AMT-HMK" signifies that the conductance will be - calculated using arithmetic-mean thickness and harmonic-mean - hydraulic conductivity. If the user does not specify a value for - ALTERNATIVE_CELL_AVERAGING, then the harmonic-mean method will be - used. This option cannot be used if the XT3D option is invoked. - thickstrt : boolean - * thickstrt (boolean) indicates that cells having a negative ICELLTYPE - are confined, and their cell thickness for conductance calculations - will be computed as STRT-BOT rather than TOP-BOT. - cvoptions : [dewatered] - * dewatered (string) If the DEWATERED keyword is specified, then the - vertical conductance is calculated using only the saturated thickness - and properties of the overlying cell if the head in the underlying - cell is below its top. - perched : boolean - * perched (boolean) keyword to indicate that when a cell is overlying a - dewatered convertible cell, the head difference used in Darcy's Law - is equal to the head in the overlying cell minus the bottom elevation - of the overlying cell. If not specified, then the default is to use - the head difference between the two cells. - rewet_record : [wetfct, iwetit, ihdwet] - * wetfct (double) is a keyword and factor that is included in the - calculation of the head that is initially established at a cell when - that cell is converted from dry to wet. - * iwetit (integer) is a keyword and iteration interval for attempting - to wet cells. Wetting is attempted every IWETIT iteration. This - applies to outer iterations and not inner iterations. If IWETIT is - specified as zero or less, then the value is changed to 1. - * ihdwet (integer) is a keyword and integer flag that determines which - equation is used to define the initial head at cells that become wet. - If IHDWET is 0, h = BOT + WETFCT (hm - BOT). If IHDWET is not 0, h = - BOT + WETFCT (THRESH). - xt3doptions : [rhs] - * rhs (string) If the RHS keyword is also included, then the XT3D - additional terms will be added to the right-hand side. If the RHS - keyword is excluded, then the XT3D terms will be put into the - coefficient matrix. - save_specific_discharge : boolean - * save_specific_discharge (boolean) keyword to indicate that x, y, and - z components of specific discharge will be calculated at cell centers - and written to the cell-by-cell flow file, which is specified with - "BUDGET SAVE FILE" in Output Control. If this option is activated, - then additional information may be required in the discretization - packages and the GWF Exchange package (if GWF models are coupled). - Specifically, ANGLDEGX must be specified in the CONNECTIONDATA block - of the DISU Package; ANGLDEGX must also be specified for the GWF - Exchange as an auxiliary variable. - k22overk : boolean - * k22overk (boolean) keyword to indicate that specified K22 is a ratio - of K22 divided by K. If this option is specified, then the K22 array - entered in the NPF Package will be multiplied by K after being read. - k33overk : boolean - * k33overk (boolean) keyword to indicate that specified K33 is a ratio - of K33 divided by K. If this option is specified, then the K33 array - entered in the NPF Package will be multiplied by K after being read. - icelltype : [integer] - * icelltype (integer) flag for each cell that specifies how saturated - thickness is treated. 0 means saturated thickness is held constant; - :math:`>`0 means saturated thickness varies with computed head when - head is below the cell top; :math:`<`0 means saturated thickness - varies with computed head unless the THICKSTRT option is in effect. - When THICKSTRT is in effect, a negative value of icelltype indicates - that saturated thickness will be computed as STRT-BOT and held - constant. - k : [double] - * k (double) is the hydraulic conductivity. For the common case in - which the user would like to specify the horizontal hydraulic - conductivity and the vertical hydraulic conductivity, then K should - be assigned as the horizontal hydraulic conductivity, K33 should be - assigned as the vertical hydraulic conductivity, and texttt{K22} and - the three rotation angles should not be specified. When more - sophisticated anisotropy is required, then K corresponds to the K11 - hydraulic conductivity axis. All included cells (IDOMAIN :math:`>` 0) - must have a K value greater than zero. - k22 : [double] - * k22 (double) is the hydraulic conductivity of the second ellipsoid - axis (or the ratio of K22/K if the K22OVERK option is specified); for - an unrotated case this is the hydraulic conductivity in the y - direction. If K22 is not included in the GRIDDATA block, then K22 is - set equal to K. For a regular MODFLOW grid (DIS Package is used) in - which no rotation angles are specified, K22 is the hydraulic - conductivity along columns in the y direction. For an unstructured - DISU grid, the user must assign principal x and y axes and provide - the angle for each cell face relative to the assigned x direction. - All included cells (IDOMAIN :math:`>` 0) must have a K22 value - greater than zero. - k33 : [double] - * k33 (double) is the hydraulic conductivity of the third ellipsoid - axis (or the ratio of K33/K if the K33OVERK option is specified); for - an unrotated case, this is the vertical hydraulic conductivity. When - anisotropy is applied, K33 corresponds to the K33 tensor component. - All included cells (IDOMAIN :math:`>` 0) must have a K33 value - greater than zero. - angle1 : [double] - * angle1 (double) is a rotation angle of the hydraulic conductivity - tensor in degrees. The angle represents the first of three sequential - rotations of the hydraulic conductivity ellipsoid. With the K11, K22, - and K33 axes of the ellipsoid initially aligned with the x, y, and z - coordinate axes, respectively, ANGLE1 rotates the ellipsoid about its - K33 axis (within the x - y plane). A positive value represents - counter-clockwise rotation when viewed from any point on the positive - K33 axis, looking toward the center of the ellipsoid. A value of zero - indicates that the K11 axis lies within the x - z plane. If ANGLE1 is - not specified, default values of zero are assigned to ANGLE1, ANGLE2, - and ANGLE3, in which case the K11, K22, and K33 axes are aligned with - the x, y, and z axes, respectively. - angle2 : [double] - * angle2 (double) is a rotation angle of the hydraulic conductivity - tensor in degrees. The angle represents the second of three - sequential rotations of the hydraulic conductivity ellipsoid. - Following the rotation by ANGLE1 described above, ANGLE2 rotates the - ellipsoid about its K22 axis (out of the x - y plane). An array can - be specified for ANGLE2 only if ANGLE1 is also specified. A positive - value of ANGLE2 represents clockwise rotation when viewed from any - point on the positive K22 axis, looking toward the center of the - ellipsoid. A value of zero indicates that the K11 axis lies within - the x - y plane. If ANGLE2 is not specified, default values of zero - are assigned to ANGLE2 and ANGLE3; connections that are not user- - designated as vertical are assumed to be strictly horizontal (that - is, to have no z component to their orientation); and connection - lengths are based on horizontal distances. - angle3 : [double] - * angle3 (double) is a rotation angle of the hydraulic conductivity - tensor in degrees. The angle represents the third of three sequential - rotations of the hydraulic conductivity ellipsoid. Following the - rotations by ANGLE1 and ANGLE2 described above, ANGLE3 rotates the - ellipsoid about its K11 axis. An array can be specified for ANGLE3 - only if ANGLE1 and ANGLE2 are also specified. An array must be - specified for ANGLE3 if ANGLE2 is specified. A positive value of - ANGLE3 represents clockwise rotation when viewed from any point on - the positive K11 axis, looking toward the center of the ellipsoid. A - value of zero indicates that the K22 axis lies within the x - y - plane. - wetdry : [double] - * wetdry (double) is a combination of the wetting threshold and a flag - to indicate which neighboring cells can cause a cell to become wet. - If WETDRY :math:`<` 0, only a cell below a dry cell can cause the - cell to become wet. If WETDRY :math:`>` 0, the cell below a dry cell - and horizontally adjacent cells can cause a cell to become wet. If - WETDRY is 0, the cell cannot be wetted. The absolute value of WETDRY - is the wetting threshold. When the sum of BOT and the absolute value - of WETDRY at a dry cell is equaled or exceeded by the head at an - adjacent cell, the cell is wetted. WETDRY must be specified if - "REWET" is specified in the OPTIONS block. If "REWET" is not - specified in the options block, then WETDRY can be entered, and - memory will be allocated for it, even though it is not used. - filename : String - File name for this package. - pname : String - Package name for this package. - parent_file : MFPackage - Parent package file that references this package. Only needed for - utility packages (mfutl*). For example, mfutllaktab package must have - a mfgwflak package parent_file. - - """ - rewet_record = ListTemplateGenerator(('gwf6', 'npf', 'options', - 'rewet_record')) - icelltype = ArrayTemplateGenerator(('gwf6', 'npf', 'griddata', - 'icelltype')) - k = ArrayTemplateGenerator(('gwf6', 'npf', 'griddata', 'k')) - k22 = ArrayTemplateGenerator(('gwf6', 'npf', 'griddata', 'k22')) - k33 = ArrayTemplateGenerator(('gwf6', 'npf', 'griddata', 'k33')) - angle1 = ArrayTemplateGenerator(('gwf6', 'npf', 'griddata', - 'angle1')) - angle2 = ArrayTemplateGenerator(('gwf6', 'npf', 'griddata', - 'angle2')) - angle3 = ArrayTemplateGenerator(('gwf6', 'npf', 'griddata', - 'angle3')) - wetdry = ArrayTemplateGenerator(('gwf6', 'npf', 'griddata', - 'wetdry')) - package_abbr = "gwfnpf" - _package_type = "npf" - dfn_file_name = "gwf-npf.dfn" - - dfn = [["block options", "name save_flows", "type keyword", - "reader urword", "optional true"], - ["block options", "name alternative_cell_averaging", - "type string", "valid logarithmic amt-lmk amt-hmk", - "reader urword", "optional true"], - ["block options", "name thickstrt", "type keyword", - "reader urword", "optional true"], - ["block options", "name cvoptions", - "type record variablecv dewatered", "reader urword", - "optional true"], - ["block options", "name variablecv", "in_record true", - "type keyword", "reader urword"], - ["block options", "name dewatered", "in_record true", - "type keyword", "reader urword", "optional true"], - ["block options", "name perched", "type keyword", - "reader urword", "optional true"], - ["block options", "name rewet_record", - "type record rewet wetfct iwetit ihdwet", "reader urword", - "optional true"], - ["block options", "name rewet", "type keyword", "in_record true", - "reader urword", "optional false"], - ["block options", "name wetfct", "type double precision", - "in_record true", "reader urword", "optional false"], - ["block options", "name iwetit", "type integer", - "in_record true", "reader urword", "optional false"], - ["block options", "name ihdwet", "type integer", - "in_record true", "reader urword", "optional false"], - ["block options", "name xt3doptions", "type record xt3d rhs", - "reader urword", "optional true"], - ["block options", "name xt3d", "in_record true", "type keyword", - "reader urword"], - ["block options", "name rhs", "in_record true", "type keyword", - "reader urword", "optional true"], - ["block options", "name save_specific_discharge", "type keyword", - "reader urword", "optional true"], - ["block options", "name k22overk", "type keyword", - "reader urword", "optional true"], - ["block options", "name k33overk", "type keyword", - "reader urword", "optional true"], - ["block griddata", "name icelltype", "type integer", - "shape (nodes)", "valid", "reader readarray", "layered true", - "optional", "default_value 0"], - ["block griddata", "name k", "type double precision", - "shape (nodes)", "valid", "reader readarray", "layered true", - "optional", "default_value 1.0"], - ["block griddata", "name k22", "type double precision", - "shape (nodes)", "valid", "reader readarray", "layered true", - "optional true"], - ["block griddata", "name k33", "type double precision", - "shape (nodes)", "valid", "reader readarray", "layered true", - "optional true"], - ["block griddata", "name angle1", "type double precision", - "shape (nodes)", "valid", "reader readarray", "layered true", - "optional true"], - ["block griddata", "name angle2", "type double precision", - "shape (nodes)", "valid", "reader readarray", "layered true", - "optional true"], - ["block griddata", "name angle3", "type double precision", - "shape (nodes)", "valid", "reader readarray", "layered true", - "optional true"], - ["block griddata", "name wetdry", "type double precision", - "shape (nodes)", "valid", "reader readarray", "layered true", - "optional true"]] - - def __init__(self, model, loading_package=False, save_flows=None, - alternative_cell_averaging=None, thickstrt=None, - cvoptions=None, perched=None, rewet_record=None, - xt3doptions=None, save_specific_discharge=None, k22overk=None, - k33overk=None, icelltype=0, k=1.0, k22=None, k33=None, - angle1=None, angle2=None, angle3=None, wetdry=None, - filename=None, pname=None, parent_file=None): - super(ModflowGwfnpf, self).__init__(model, "npf", filename, pname, - loading_package, parent_file) - - # set up variables - self.save_flows = self.build_mfdata("save_flows", save_flows) - self.alternative_cell_averaging = self.build_mfdata( - "alternative_cell_averaging", alternative_cell_averaging) - self.thickstrt = self.build_mfdata("thickstrt", thickstrt) - self.cvoptions = self.build_mfdata("cvoptions", cvoptions) - self.perched = self.build_mfdata("perched", perched) - self.rewet_record = self.build_mfdata("rewet_record", rewet_record) - self.xt3doptions = self.build_mfdata("xt3doptions", xt3doptions) - self.save_specific_discharge = self.build_mfdata( - "save_specific_discharge", save_specific_discharge) - self.k22overk = self.build_mfdata("k22overk", k22overk) - self.k33overk = self.build_mfdata("k33overk", k33overk) - self.icelltype = self.build_mfdata("icelltype", icelltype) - self.k = self.build_mfdata("k", k) - self.k22 = self.build_mfdata("k22", k22) - self.k33 = self.build_mfdata("k33", k33) - self.angle1 = self.build_mfdata("angle1", angle1) - self.angle2 = self.build_mfdata("angle2", angle2) - self.angle3 = self.build_mfdata("angle3", angle3) - self.wetdry = self.build_mfdata("wetdry", wetdry) - self._init_complete = True +# DO NOT MODIFY THIS FILE DIRECTLY. THIS FILE MUST BE CREATED BY +# mf6/utils/createpackages.py +from .. import mfpackage +from ..data.mfdatautil import ListTemplateGenerator, ArrayTemplateGenerator + + +class ModflowGwfnpf(mfpackage.MFPackage): + """ + ModflowGwfnpf defines a npf package within a gwf6 model. + + Parameters + ---------- + model : MFModel + Model that this package is a part of. Package is automatically + added to model when it is initialized. + loading_package : bool + Do not set this parameter. It is intended for debugging and internal + processing purposes only. + save_flows : boolean + * save_flows (boolean) keyword to indicate that cell-by-cell flow terms + will be written to the file specified with "BUDGET SAVE FILE" in + Output Control. + alternative_cell_averaging : string + * alternative_cell_averaging (string) is a text keyword to indicate + that an alternative method will be used for calculating the + conductance for horizontal cell connections. The text value for + ALTERNATIVE_CELL_AVERAGING can be "LOGARITHMIC", "AMT-LMK", or "AMT- + HMK". "AMT-LMK" signifies that the conductance will be calculated + using arithmetic-mean thickness and logarithmic-mean hydraulic + conductivity. "AMT-HMK" signifies that the conductance will be + calculated using arithmetic-mean thickness and harmonic-mean + hydraulic conductivity. If the user does not specify a value for + ALTERNATIVE_CELL_AVERAGING, then the harmonic-mean method will be + used. This option cannot be used if the XT3D option is invoked. + thickstrt : boolean + * thickstrt (boolean) indicates that cells having a negative ICELLTYPE + are confined, and their cell thickness for conductance calculations + will be computed as STRT-BOT rather than TOP-BOT. + cvoptions : [dewatered] + * dewatered (string) If the DEWATERED keyword is specified, then the + vertical conductance is calculated using only the saturated thickness + and properties of the overlying cell if the head in the underlying + cell is below its top. + perched : boolean + * perched (boolean) keyword to indicate that when a cell is overlying a + dewatered convertible cell, the head difference used in Darcy's Law + is equal to the head in the overlying cell minus the bottom elevation + of the overlying cell. If not specified, then the default is to use + the head difference between the two cells. + rewet_record : [wetfct, iwetit, ihdwet] + * wetfct (double) is a keyword and factor that is included in the + calculation of the head that is initially established at a cell when + that cell is converted from dry to wet. + * iwetit (integer) is a keyword and iteration interval for attempting + to wet cells. Wetting is attempted every IWETIT iteration. This + applies to outer iterations and not inner iterations. If IWETIT is + specified as zero or less, then the value is changed to 1. + * ihdwet (integer) is a keyword and integer flag that determines which + equation is used to define the initial head at cells that become wet. + If IHDWET is 0, h = BOT + WETFCT (hm - BOT). If IHDWET is not 0, h = + BOT + WETFCT (THRESH). + xt3doptions : [rhs] + * rhs (string) If the RHS keyword is also included, then the XT3D + additional terms will be added to the right-hand side. If the RHS + keyword is excluded, then the XT3D terms will be put into the + coefficient matrix. + save_specific_discharge : boolean + * save_specific_discharge (boolean) keyword to indicate that x, y, and + z components of specific discharge will be calculated at cell centers + and written to the cell-by-cell flow file, which is specified with + "BUDGET SAVE FILE" in Output Control. If this option is activated, + then additional information may be required in the discretization + packages and the GWF Exchange package (if GWF models are coupled). + Specifically, ANGLDEGX must be specified in the CONNECTIONDATA block + of the DISU Package; ANGLDEGX must also be specified for the GWF + Exchange as an auxiliary variable. + k22overk : boolean + * k22overk (boolean) keyword to indicate that specified K22 is a ratio + of K22 divided by K. If this option is specified, then the K22 array + entered in the NPF Package will be multiplied by K after being read. + k33overk : boolean + * k33overk (boolean) keyword to indicate that specified K33 is a ratio + of K33 divided by K. If this option is specified, then the K33 array + entered in the NPF Package will be multiplied by K after being read. + icelltype : [integer] + * icelltype (integer) flag for each cell that specifies how saturated + thickness is treated. 0 means saturated thickness is held constant; + :math:`>`0 means saturated thickness varies with computed head when + head is below the cell top; :math:`<`0 means saturated thickness + varies with computed head unless the THICKSTRT option is in effect. + When THICKSTRT is in effect, a negative value of icelltype indicates + that saturated thickness will be computed as STRT-BOT and held + constant. + k : [double] + * k (double) is the hydraulic conductivity. For the common case in + which the user would like to specify the horizontal hydraulic + conductivity and the vertical hydraulic conductivity, then K should + be assigned as the horizontal hydraulic conductivity, K33 should be + assigned as the vertical hydraulic conductivity, and texttt{K22} and + the three rotation angles should not be specified. When more + sophisticated anisotropy is required, then K corresponds to the K11 + hydraulic conductivity axis. All included cells (IDOMAIN :math:`>` 0) + must have a K value greater than zero. + k22 : [double] + * k22 (double) is the hydraulic conductivity of the second ellipsoid + axis (or the ratio of K22/K if the K22OVERK option is specified); for + an unrotated case this is the hydraulic conductivity in the y + direction. If K22 is not included in the GRIDDATA block, then K22 is + set equal to K. For a regular MODFLOW grid (DIS Package is used) in + which no rotation angles are specified, K22 is the hydraulic + conductivity along columns in the y direction. For an unstructured + DISU grid, the user must assign principal x and y axes and provide + the angle for each cell face relative to the assigned x direction. + All included cells (IDOMAIN :math:`>` 0) must have a K22 value + greater than zero. + k33 : [double] + * k33 (double) is the hydraulic conductivity of the third ellipsoid + axis (or the ratio of K33/K if the K33OVERK option is specified); for + an unrotated case, this is the vertical hydraulic conductivity. When + anisotropy is applied, K33 corresponds to the K33 tensor component. + All included cells (IDOMAIN :math:`>` 0) must have a K33 value + greater than zero. + angle1 : [double] + * angle1 (double) is a rotation angle of the hydraulic conductivity + tensor in degrees. The angle represents the first of three sequential + rotations of the hydraulic conductivity ellipsoid. With the K11, K22, + and K33 axes of the ellipsoid initially aligned with the x, y, and z + coordinate axes, respectively, ANGLE1 rotates the ellipsoid about its + K33 axis (within the x - y plane). A positive value represents + counter-clockwise rotation when viewed from any point on the positive + K33 axis, looking toward the center of the ellipsoid. A value of zero + indicates that the K11 axis lies within the x - z plane. If ANGLE1 is + not specified, default values of zero are assigned to ANGLE1, ANGLE2, + and ANGLE3, in which case the K11, K22, and K33 axes are aligned with + the x, y, and z axes, respectively. + angle2 : [double] + * angle2 (double) is a rotation angle of the hydraulic conductivity + tensor in degrees. The angle represents the second of three + sequential rotations of the hydraulic conductivity ellipsoid. + Following the rotation by ANGLE1 described above, ANGLE2 rotates the + ellipsoid about its K22 axis (out of the x - y plane). An array can + be specified for ANGLE2 only if ANGLE1 is also specified. A positive + value of ANGLE2 represents clockwise rotation when viewed from any + point on the positive K22 axis, looking toward the center of the + ellipsoid. A value of zero indicates that the K11 axis lies within + the x - y plane. If ANGLE2 is not specified, default values of zero + are assigned to ANGLE2 and ANGLE3; connections that are not user- + designated as vertical are assumed to be strictly horizontal (that + is, to have no z component to their orientation); and connection + lengths are based on horizontal distances. + angle3 : [double] + * angle3 (double) is a rotation angle of the hydraulic conductivity + tensor in degrees. The angle represents the third of three sequential + rotations of the hydraulic conductivity ellipsoid. Following the + rotations by ANGLE1 and ANGLE2 described above, ANGLE3 rotates the + ellipsoid about its K11 axis. An array can be specified for ANGLE3 + only if ANGLE1 and ANGLE2 are also specified. An array must be + specified for ANGLE3 if ANGLE2 is specified. A positive value of + ANGLE3 represents clockwise rotation when viewed from any point on + the positive K11 axis, looking toward the center of the ellipsoid. A + value of zero indicates that the K22 axis lies within the x - y + plane. + wetdry : [double] + * wetdry (double) is a combination of the wetting threshold and a flag + to indicate which neighboring cells can cause a cell to become wet. + If WETDRY :math:`<` 0, only a cell below a dry cell can cause the + cell to become wet. If WETDRY :math:`>` 0, the cell below a dry cell + and horizontally adjacent cells can cause a cell to become wet. If + WETDRY is 0, the cell cannot be wetted. The absolute value of WETDRY + is the wetting threshold. When the sum of BOT and the absolute value + of WETDRY at a dry cell is equaled or exceeded by the head at an + adjacent cell, the cell is wetted. WETDRY must be specified if + "REWET" is specified in the OPTIONS block. If "REWET" is not + specified in the options block, then WETDRY can be entered, and + memory will be allocated for it, even though it is not used. + filename : String + File name for this package. + pname : String + Package name for this package. + parent_file : MFPackage + Parent package file that references this package. Only needed for + utility packages (mfutl*). For example, mfutllaktab package must have + a mfgwflak package parent_file. + + """ + rewet_record = ListTemplateGenerator(('gwf6', 'npf', 'options', + 'rewet_record')) + icelltype = ArrayTemplateGenerator(('gwf6', 'npf', 'griddata', + 'icelltype')) + k = ArrayTemplateGenerator(('gwf6', 'npf', 'griddata', 'k')) + k22 = ArrayTemplateGenerator(('gwf6', 'npf', 'griddata', 'k22')) + k33 = ArrayTemplateGenerator(('gwf6', 'npf', 'griddata', 'k33')) + angle1 = ArrayTemplateGenerator(('gwf6', 'npf', 'griddata', + 'angle1')) + angle2 = ArrayTemplateGenerator(('gwf6', 'npf', 'griddata', + 'angle2')) + angle3 = ArrayTemplateGenerator(('gwf6', 'npf', 'griddata', + 'angle3')) + wetdry = ArrayTemplateGenerator(('gwf6', 'npf', 'griddata', + 'wetdry')) + package_abbr = "gwfnpf" + _package_type = "npf" + dfn_file_name = "gwf-npf.dfn" + + dfn = [["block options", "name save_flows", "type keyword", + "reader urword", "optional true"], + ["block options", "name alternative_cell_averaging", + "type string", "valid logarithmic amt-lmk amt-hmk", + "reader urword", "optional true"], + ["block options", "name thickstrt", "type keyword", + "reader urword", "optional true"], + ["block options", "name cvoptions", + "type record variablecv dewatered", "reader urword", + "optional true"], + ["block options", "name variablecv", "in_record true", + "type keyword", "reader urword"], + ["block options", "name dewatered", "in_record true", + "type keyword", "reader urword", "optional true"], + ["block options", "name perched", "type keyword", + "reader urword", "optional true"], + ["block options", "name rewet_record", + "type record rewet wetfct iwetit ihdwet", "reader urword", + "optional true"], + ["block options", "name rewet", "type keyword", "in_record true", + "reader urword", "optional false"], + ["block options", "name wetfct", "type double precision", + "in_record true", "reader urword", "optional false"], + ["block options", "name iwetit", "type integer", + "in_record true", "reader urword", "optional false"], + ["block options", "name ihdwet", "type integer", + "in_record true", "reader urword", "optional false"], + ["block options", "name xt3doptions", "type record xt3d rhs", + "reader urword", "optional true"], + ["block options", "name xt3d", "in_record true", "type keyword", + "reader urword"], + ["block options", "name rhs", "in_record true", "type keyword", + "reader urword", "optional true"], + ["block options", "name save_specific_discharge", "type keyword", + "reader urword", "optional true"], + ["block options", "name k22overk", "type keyword", + "reader urword", "optional true"], + ["block options", "name k33overk", "type keyword", + "reader urword", "optional true"], + ["block griddata", "name icelltype", "type integer", + "shape (nodes)", "valid", "reader readarray", "layered true", + "optional", "default_value 0"], + ["block griddata", "name k", "type double precision", + "shape (nodes)", "valid", "reader readarray", "layered true", + "optional", "default_value 1.0"], + ["block griddata", "name k22", "type double precision", + "shape (nodes)", "valid", "reader readarray", "layered true", + "optional true"], + ["block griddata", "name k33", "type double precision", + "shape (nodes)", "valid", "reader readarray", "layered true", + "optional true"], + ["block griddata", "name angle1", "type double precision", + "shape (nodes)", "valid", "reader readarray", "layered true", + "optional true"], + ["block griddata", "name angle2", "type double precision", + "shape (nodes)", "valid", "reader readarray", "layered true", + "optional true"], + ["block griddata", "name angle3", "type double precision", + "shape (nodes)", "valid", "reader readarray", "layered true", + "optional true"], + ["block griddata", "name wetdry", "type double precision", + "shape (nodes)", "valid", "reader readarray", "layered true", + "optional true"]] + + def __init__(self, model, loading_package=False, save_flows=None, + alternative_cell_averaging=None, thickstrt=None, + cvoptions=None, perched=None, rewet_record=None, + xt3doptions=None, save_specific_discharge=None, k22overk=None, + k33overk=None, icelltype=0, k=1.0, k22=None, k33=None, + angle1=None, angle2=None, angle3=None, wetdry=None, + filename=None, pname=None, parent_file=None): + super(ModflowGwfnpf, self).__init__(model, "npf", filename, pname, + loading_package, parent_file) + + # set up variables + self.save_flows = self.build_mfdata("save_flows", save_flows) + self.alternative_cell_averaging = self.build_mfdata( + "alternative_cell_averaging", alternative_cell_averaging) + self.thickstrt = self.build_mfdata("thickstrt", thickstrt) + self.cvoptions = self.build_mfdata("cvoptions", cvoptions) + self.perched = self.build_mfdata("perched", perched) + self.rewet_record = self.build_mfdata("rewet_record", rewet_record) + self.xt3doptions = self.build_mfdata("xt3doptions", xt3doptions) + self.save_specific_discharge = self.build_mfdata( + "save_specific_discharge", save_specific_discharge) + self.k22overk = self.build_mfdata("k22overk", k22overk) + self.k33overk = self.build_mfdata("k33overk", k33overk) + self.icelltype = self.build_mfdata("icelltype", icelltype) + self.k = self.build_mfdata("k", k) + self.k22 = self.build_mfdata("k22", k22) + self.k33 = self.build_mfdata("k33", k33) + self.angle1 = self.build_mfdata("angle1", angle1) + self.angle2 = self.build_mfdata("angle2", angle2) + self.angle3 = self.build_mfdata("angle3", angle3) + self.wetdry = self.build_mfdata("wetdry", wetdry) + self._init_complete = True diff --git a/flopy/mf6/modflow/mfgwfrch.py b/flopy/mf6/modflow/mfgwfrch.py index a84dfbc64d..b5cfeb2633 100644 --- a/flopy/mf6/modflow/mfgwfrch.py +++ b/flopy/mf6/modflow/mfgwfrch.py @@ -1,207 +1,207 @@ -# DO NOT MODIFY THIS FILE DIRECTLY. THIS FILE MUST BE CREATED BY -# mf6/utils/createpackages.py -from .. import mfpackage -from ..data.mfdatautil import ListTemplateGenerator - - -class ModflowGwfrch(mfpackage.MFPackage): - """ - ModflowGwfrch defines a rch package within a gwf6 model. - - Parameters - ---------- - model : MFModel - Model that this package is a part of. Package is automatically - added to model when it is initialized. - loading_package : bool - Do not set this parameter. It is intended for debugging and internal - processing purposes only. - fixed_cell : boolean - * fixed_cell (boolean) indicates that recharge will not be reassigned - to a cell underlying the cell specified in the list if the specified - cell is inactive. - auxiliary : [string] - * auxiliary (string) defines an array of one or more auxiliary variable - names. There is no limit on the number of auxiliary variables that - can be provided on this line; however, lists of information provided - in subsequent blocks must have a column of data for each auxiliary - variable name defined here. The number of auxiliary variables - detected on this line determines the value for naux. Comments cannot - be provided anywhere on this line as they will be interpreted as - auxiliary variable names. Auxiliary variables may not be used by the - package, but they will be available for use by other parts of the - program. The program will terminate with an error if auxiliary - variables are specified on more than one line in the options block. - auxmultname : string - * auxmultname (string) name of auxiliary variable to be used as - multiplier of recharge. - boundnames : boolean - * boundnames (boolean) keyword to indicate that boundary names may be - provided with the list of recharge cells. - print_input : boolean - * print_input (boolean) keyword to indicate that the list of recharge - information will be written to the listing file immediately after it - is read. - print_flows : boolean - * print_flows (boolean) keyword to indicate that the list of recharge - flow rates will be printed to the listing file for every stress - period time step in which "BUDGET PRINT" is specified in Output - Control. If there is no Output Control option and "PRINT_FLOWS" is - specified, then flow rates are printed for the last time step of each - stress period. - save_flows : boolean - * save_flows (boolean) keyword to indicate that recharge flow terms - will be written to the file specified with "BUDGET FILEOUT" in Output - Control. - timeseries : {varname:data} or timeseries data - * Contains data for the ts package. Data can be stored in a dictionary - containing data for the ts package with variable names as keys and - package data as values. Data just for the timeseries variable is also - acceptable. See ts package documentation for more information. - observations : {varname:data} or continuous data - * Contains data for the obs package. Data can be stored in a dictionary - containing data for the obs package with variable names as keys and - package data as values. Data just for the observations variable is - also acceptable. See obs package documentation for more information. - maxbound : integer - * maxbound (integer) integer value specifying the maximum number of - recharge cells cells that will be specified for use during any stress - period. - stress_period_data : [cellid, recharge, aux, boundname] - * cellid ((integer, ...)) is the cell identifier, and depends on the - type of grid that is used for the simulation. For a structured grid - that uses the DIS input file, CELLID is the layer, row, and column. - For a grid that uses the DISV input file, CELLID is the layer and - CELL2D number. If the model uses the unstructured discretization - (DISU) input file, CELLID is the node number for the cell. This - argument is an index variable, which means that it should be treated - as zero-based when working with FloPy and Python. Flopy will - automatically subtract one when loading index variables and add one - when writing index variables. - * recharge (double) is the recharge flux rate (:math:`LT^{-1}`). This - rate is multiplied inside the program by the surface area of the cell - to calculate the volumetric recharge rate. A time-series name may be - specified. - * aux (double) represents the values of the auxiliary variables for - each recharge. The values of auxiliary variables must be present for - each recharge. The values must be specified in the order of the - auxiliary variables specified in the OPTIONS block. If the package - supports time series and the Options block includes a TIMESERIESFILE - entry (see the "Time-Variable Input" section), values can be obtained - from a time series by entering the time-series name in place of a - numeric value. - * boundname (string) name of the recharge cell. BOUNDNAME is an ASCII - character variable that can contain as many as 40 characters. If - BOUNDNAME contains spaces in it, then the entire name must be - enclosed within single quotes. - filename : String - File name for this package. - pname : String - Package name for this package. - parent_file : MFPackage - Parent package file that references this package. Only needed for - utility packages (mfutl*). For example, mfutllaktab package must have - a mfgwflak package parent_file. - - """ - auxiliary = ListTemplateGenerator(('gwf6', 'rch', 'options', - 'auxiliary')) - ts_filerecord = ListTemplateGenerator(('gwf6', 'rch', 'options', - 'ts_filerecord')) - obs_filerecord = ListTemplateGenerator(('gwf6', 'rch', 'options', - 'obs_filerecord')) - stress_period_data = ListTemplateGenerator(('gwf6', 'rch', 'period', - 'stress_period_data')) - package_abbr = "gwfrch" - _package_type = "rch" - dfn_file_name = "gwf-rch.dfn" - - dfn = [["block options", "name fixed_cell", "type keyword", "shape", - "reader urword", "optional true"], - ["block options", "name auxiliary", "type string", - "shape (naux)", "reader urword", "optional true"], - ["block options", "name auxmultname", "type string", "shape", - "reader urword", "optional true"], - ["block options", "name boundnames", "type keyword", "shape", - "reader urword", "optional true"], - ["block options", "name print_input", "type keyword", - "reader urword", "optional true"], - ["block options", "name print_flows", "type keyword", - "reader urword", "optional true"], - ["block options", "name save_flows", "type keyword", - "reader urword", "optional true"], - ["block options", "name ts_filerecord", - "type record ts6 filein ts6_filename", "shape", "reader urword", - "tagged true", "optional true", "construct_package ts", - "construct_data timeseries", "parameter_name timeseries"], - ["block options", "name ts6", "type keyword", "shape", - "in_record true", "reader urword", "tagged true", - "optional false"], - ["block options", "name filein", "type keyword", "shape", - "in_record true", "reader urword", "tagged true", - "optional false"], - ["block options", "name ts6_filename", "type string", - "preserve_case true", "in_record true", "reader urword", - "optional false", "tagged false"], - ["block options", "name obs_filerecord", - "type record obs6 filein obs6_filename", "shape", "reader urword", - "tagged true", "optional true", "construct_package obs", - "construct_data continuous", "parameter_name observations"], - ["block options", "name obs6", "type keyword", "shape", - "in_record true", "reader urword", "tagged true", - "optional false"], - ["block options", "name obs6_filename", "type string", - "preserve_case true", "in_record true", "tagged false", - "reader urword", "optional false"], - ["block dimensions", "name maxbound", "type integer", - "reader urword", "optional false"], - ["block period", "name iper", "type integer", - "block_variable True", "in_record true", "tagged false", "shape", - "valid", "reader urword", "optional false"], - ["block period", "name stress_period_data", - "type recarray cellid recharge aux boundname", "shape (maxbound)", - "reader urword"], - ["block period", "name cellid", "type integer", - "shape (ncelldim)", "tagged false", "in_record true", - "reader urword"], - ["block period", "name recharge", "type double precision", - "shape", "tagged false", "in_record true", "reader urword", - "time_series true"], - ["block period", "name aux", "type double precision", - "in_record true", "tagged false", "shape (naux)", "reader urword", - "optional true", "time_series true"], - ["block period", "name boundname", "type string", "shape", - "tagged false", "in_record true", "reader urword", - "optional true"]] - - def __init__(self, model, loading_package=False, fixed_cell=None, - auxiliary=None, auxmultname=None, boundnames=None, - print_input=None, print_flows=None, save_flows=None, - timeseries=None, observations=None, maxbound=None, - stress_period_data=None, filename=None, pname=None, - parent_file=None): - super(ModflowGwfrch, self).__init__(model, "rch", filename, pname, - loading_package, parent_file) - - # set up variables - self.fixed_cell = self.build_mfdata("fixed_cell", fixed_cell) - self.auxiliary = self.build_mfdata("auxiliary", auxiliary) - self.auxmultname = self.build_mfdata("auxmultname", auxmultname) - self.boundnames = self.build_mfdata("boundnames", boundnames) - self.print_input = self.build_mfdata("print_input", print_input) - self.print_flows = self.build_mfdata("print_flows", print_flows) - self.save_flows = self.build_mfdata("save_flows", save_flows) - self._ts_filerecord = self.build_mfdata("ts_filerecord", - None) - self._ts_package = self.build_child_package("ts", timeseries, - "timeseries", - self._ts_filerecord) - self._obs_filerecord = self.build_mfdata("obs_filerecord", - None) - self._obs_package = self.build_child_package("obs", observations, - "continuous", - self._obs_filerecord) - self.maxbound = self.build_mfdata("maxbound", maxbound) - self.stress_period_data = self.build_mfdata("stress_period_data", - stress_period_data) - self._init_complete = True +# DO NOT MODIFY THIS FILE DIRECTLY. THIS FILE MUST BE CREATED BY +# mf6/utils/createpackages.py +from .. import mfpackage +from ..data.mfdatautil import ListTemplateGenerator + + +class ModflowGwfrch(mfpackage.MFPackage): + """ + ModflowGwfrch defines a rch package within a gwf6 model. + + Parameters + ---------- + model : MFModel + Model that this package is a part of. Package is automatically + added to model when it is initialized. + loading_package : bool + Do not set this parameter. It is intended for debugging and internal + processing purposes only. + fixed_cell : boolean + * fixed_cell (boolean) indicates that recharge will not be reassigned + to a cell underlying the cell specified in the list if the specified + cell is inactive. + auxiliary : [string] + * auxiliary (string) defines an array of one or more auxiliary variable + names. There is no limit on the number of auxiliary variables that + can be provided on this line; however, lists of information provided + in subsequent blocks must have a column of data for each auxiliary + variable name defined here. The number of auxiliary variables + detected on this line determines the value for naux. Comments cannot + be provided anywhere on this line as they will be interpreted as + auxiliary variable names. Auxiliary variables may not be used by the + package, but they will be available for use by other parts of the + program. The program will terminate with an error if auxiliary + variables are specified on more than one line in the options block. + auxmultname : string + * auxmultname (string) name of auxiliary variable to be used as + multiplier of recharge. + boundnames : boolean + * boundnames (boolean) keyword to indicate that boundary names may be + provided with the list of recharge cells. + print_input : boolean + * print_input (boolean) keyword to indicate that the list of recharge + information will be written to the listing file immediately after it + is read. + print_flows : boolean + * print_flows (boolean) keyword to indicate that the list of recharge + flow rates will be printed to the listing file for every stress + period time step in which "BUDGET PRINT" is specified in Output + Control. If there is no Output Control option and "PRINT_FLOWS" is + specified, then flow rates are printed for the last time step of each + stress period. + save_flows : boolean + * save_flows (boolean) keyword to indicate that recharge flow terms + will be written to the file specified with "BUDGET FILEOUT" in Output + Control. + timeseries : {varname:data} or timeseries data + * Contains data for the ts package. Data can be stored in a dictionary + containing data for the ts package with variable names as keys and + package data as values. Data just for the timeseries variable is also + acceptable. See ts package documentation for more information. + observations : {varname:data} or continuous data + * Contains data for the obs package. Data can be stored in a dictionary + containing data for the obs package with variable names as keys and + package data as values. Data just for the observations variable is + also acceptable. See obs package documentation for more information. + maxbound : integer + * maxbound (integer) integer value specifying the maximum number of + recharge cells cells that will be specified for use during any stress + period. + stress_period_data : [cellid, recharge, aux, boundname] + * cellid ((integer, ...)) is the cell identifier, and depends on the + type of grid that is used for the simulation. For a structured grid + that uses the DIS input file, CELLID is the layer, row, and column. + For a grid that uses the DISV input file, CELLID is the layer and + CELL2D number. If the model uses the unstructured discretization + (DISU) input file, CELLID is the node number for the cell. This + argument is an index variable, which means that it should be treated + as zero-based when working with FloPy and Python. Flopy will + automatically subtract one when loading index variables and add one + when writing index variables. + * recharge (double) is the recharge flux rate (:math:`LT^{-1}`). This + rate is multiplied inside the program by the surface area of the cell + to calculate the volumetric recharge rate. A time-series name may be + specified. + * aux (double) represents the values of the auxiliary variables for + each recharge. The values of auxiliary variables must be present for + each recharge. The values must be specified in the order of the + auxiliary variables specified in the OPTIONS block. If the package + supports time series and the Options block includes a TIMESERIESFILE + entry (see the "Time-Variable Input" section), values can be obtained + from a time series by entering the time-series name in place of a + numeric value. + * boundname (string) name of the recharge cell. BOUNDNAME is an ASCII + character variable that can contain as many as 40 characters. If + BOUNDNAME contains spaces in it, then the entire name must be + enclosed within single quotes. + filename : String + File name for this package. + pname : String + Package name for this package. + parent_file : MFPackage + Parent package file that references this package. Only needed for + utility packages (mfutl*). For example, mfutllaktab package must have + a mfgwflak package parent_file. + + """ + auxiliary = ListTemplateGenerator(('gwf6', 'rch', 'options', + 'auxiliary')) + ts_filerecord = ListTemplateGenerator(('gwf6', 'rch', 'options', + 'ts_filerecord')) + obs_filerecord = ListTemplateGenerator(('gwf6', 'rch', 'options', + 'obs_filerecord')) + stress_period_data = ListTemplateGenerator(('gwf6', 'rch', 'period', + 'stress_period_data')) + package_abbr = "gwfrch" + _package_type = "rch" + dfn_file_name = "gwf-rch.dfn" + + dfn = [["block options", "name fixed_cell", "type keyword", "shape", + "reader urword", "optional true"], + ["block options", "name auxiliary", "type string", + "shape (naux)", "reader urword", "optional true"], + ["block options", "name auxmultname", "type string", "shape", + "reader urword", "optional true"], + ["block options", "name boundnames", "type keyword", "shape", + "reader urword", "optional true"], + ["block options", "name print_input", "type keyword", + "reader urword", "optional true"], + ["block options", "name print_flows", "type keyword", + "reader urword", "optional true"], + ["block options", "name save_flows", "type keyword", + "reader urword", "optional true"], + ["block options", "name ts_filerecord", + "type record ts6 filein ts6_filename", "shape", "reader urword", + "tagged true", "optional true", "construct_package ts", + "construct_data timeseries", "parameter_name timeseries"], + ["block options", "name ts6", "type keyword", "shape", + "in_record true", "reader urword", "tagged true", + "optional false"], + ["block options", "name filein", "type keyword", "shape", + "in_record true", "reader urword", "tagged true", + "optional false"], + ["block options", "name ts6_filename", "type string", + "preserve_case true", "in_record true", "reader urword", + "optional false", "tagged false"], + ["block options", "name obs_filerecord", + "type record obs6 filein obs6_filename", "shape", "reader urword", + "tagged true", "optional true", "construct_package obs", + "construct_data continuous", "parameter_name observations"], + ["block options", "name obs6", "type keyword", "shape", + "in_record true", "reader urword", "tagged true", + "optional false"], + ["block options", "name obs6_filename", "type string", + "preserve_case true", "in_record true", "tagged false", + "reader urword", "optional false"], + ["block dimensions", "name maxbound", "type integer", + "reader urword", "optional false"], + ["block period", "name iper", "type integer", + "block_variable True", "in_record true", "tagged false", "shape", + "valid", "reader urword", "optional false"], + ["block period", "name stress_period_data", + "type recarray cellid recharge aux boundname", "shape (maxbound)", + "reader urword"], + ["block period", "name cellid", "type integer", + "shape (ncelldim)", "tagged false", "in_record true", + "reader urword"], + ["block period", "name recharge", "type double precision", + "shape", "tagged false", "in_record true", "reader urword", + "time_series true"], + ["block period", "name aux", "type double precision", + "in_record true", "tagged false", "shape (naux)", "reader urword", + "optional true", "time_series true"], + ["block period", "name boundname", "type string", "shape", + "tagged false", "in_record true", "reader urword", + "optional true"]] + + def __init__(self, model, loading_package=False, fixed_cell=None, + auxiliary=None, auxmultname=None, boundnames=None, + print_input=None, print_flows=None, save_flows=None, + timeseries=None, observations=None, maxbound=None, + stress_period_data=None, filename=None, pname=None, + parent_file=None): + super(ModflowGwfrch, self).__init__(model, "rch", filename, pname, + loading_package, parent_file) + + # set up variables + self.fixed_cell = self.build_mfdata("fixed_cell", fixed_cell) + self.auxiliary = self.build_mfdata("auxiliary", auxiliary) + self.auxmultname = self.build_mfdata("auxmultname", auxmultname) + self.boundnames = self.build_mfdata("boundnames", boundnames) + self.print_input = self.build_mfdata("print_input", print_input) + self.print_flows = self.build_mfdata("print_flows", print_flows) + self.save_flows = self.build_mfdata("save_flows", save_flows) + self._ts_filerecord = self.build_mfdata("ts_filerecord", + None) + self._ts_package = self.build_child_package("ts", timeseries, + "timeseries", + self._ts_filerecord) + self._obs_filerecord = self.build_mfdata("obs_filerecord", + None) + self._obs_package = self.build_child_package("obs", observations, + "continuous", + self._obs_filerecord) + self.maxbound = self.build_mfdata("maxbound", maxbound) + self.stress_period_data = self.build_mfdata("stress_period_data", + stress_period_data) + self._init_complete = True diff --git a/flopy/mf6/modflow/mfgwfriv.py b/flopy/mf6/modflow/mfgwfriv.py index e51be9b153..a6e626b13c 100644 --- a/flopy/mf6/modflow/mfgwfriv.py +++ b/flopy/mf6/modflow/mfgwfriv.py @@ -1,221 +1,221 @@ -# DO NOT MODIFY THIS FILE DIRECTLY. THIS FILE MUST BE CREATED BY -# mf6/utils/createpackages.py -from .. import mfpackage -from ..data.mfdatautil import ListTemplateGenerator - - -class ModflowGwfriv(mfpackage.MFPackage): - """ - ModflowGwfriv defines a riv package within a gwf6 model. - - Parameters - ---------- - model : MFModel - Model that this package is a part of. Package is automatically - added to model when it is initialized. - loading_package : bool - Do not set this parameter. It is intended for debugging and internal - processing purposes only. - auxiliary : [string] - * auxiliary (string) defines an array of one or more auxiliary variable - names. There is no limit on the number of auxiliary variables that - can be provided on this line; however, lists of information provided - in subsequent blocks must have a column of data for each auxiliary - variable name defined here. The number of auxiliary variables - detected on this line determines the value for naux. Comments cannot - be provided anywhere on this line as they will be interpreted as - auxiliary variable names. Auxiliary variables may not be used by the - package, but they will be available for use by other parts of the - program. The program will terminate with an error if auxiliary - variables are specified on more than one line in the options block. - auxmultname : string - * auxmultname (string) name of auxiliary variable to be used as - multiplier of riverbed conductance. - boundnames : boolean - * boundnames (boolean) keyword to indicate that boundary names may be - provided with the list of river cells. - print_input : boolean - * print_input (boolean) keyword to indicate that the list of river - information will be written to the listing file immediately after it - is read. - print_flows : boolean - * print_flows (boolean) keyword to indicate that the list of river flow - rates will be printed to the listing file for every stress period - time step in which "BUDGET PRINT" is specified in Output Control. If - there is no Output Control option and "PRINT_FLOWS" is specified, - then flow rates are printed for the last time step of each stress - period. - save_flows : boolean - * save_flows (boolean) keyword to indicate that river flow terms will - be written to the file specified with "BUDGET FILEOUT" in Output - Control. - timeseries : {varname:data} or timeseries data - * Contains data for the ts package. Data can be stored in a dictionary - containing data for the ts package with variable names as keys and - package data as values. Data just for the timeseries variable is also - acceptable. See ts package documentation for more information. - observations : {varname:data} or continuous data - * Contains data for the obs package. Data can be stored in a dictionary - containing data for the obs package with variable names as keys and - package data as values. Data just for the observations variable is - also acceptable. See obs package documentation for more information. - mover : boolean - * mover (boolean) keyword to indicate that this instance of the River - Package can be used with the Water Mover (MVR) Package. When the - MOVER option is specified, additional memory is allocated within the - package to store the available, provided, and received water. - maxbound : integer - * maxbound (integer) integer value specifying the maximum number of - rivers cells that will be specified for use during any stress period. - stress_period_data : [cellid, stage, cond, rbot, aux, boundname] - * cellid ((integer, ...)) is the cell identifier, and depends on the - type of grid that is used for the simulation. For a structured grid - that uses the DIS input file, CELLID is the layer, row, and column. - For a grid that uses the DISV input file, CELLID is the layer and - CELL2D number. If the model uses the unstructured discretization - (DISU) input file, CELLID is the node number for the cell. This - argument is an index variable, which means that it should be treated - as zero-based when working with FloPy and Python. Flopy will - automatically subtract one when loading index variables and add one - when writing index variables. - * stage (double) is the head in the river. If the Options block - includes a TIMESERIESFILE entry (see the "Time-Variable Input" - section), values can be obtained from a time series by entering the - time-series name in place of a numeric value. - * cond (double) is the riverbed hydraulic conductance. If the Options - block includes a TIMESERIESFILE entry (see the "Time-Variable Input" - section), values can be obtained from a time series by entering the - time-series name in place of a numeric value. - * rbot (double) is the elevation of the bottom of the riverbed. If the - Options block includes a TIMESERIESFILE entry (see the "Time-Variable - Input" section), values can be obtained from a time series by - entering the time-series name in place of a numeric value. - * aux (double) represents the values of the auxiliary variables for - each river. The values of auxiliary variables must be present for - each river. The values must be specified in the order of the - auxiliary variables specified in the OPTIONS block. If the package - supports time series and the Options block includes a TIMESERIESFILE - entry (see the "Time-Variable Input" section), values can be obtained - from a time series by entering the time-series name in place of a - numeric value. - * boundname (string) name of the river cell. BOUNDNAME is an ASCII - character variable that can contain as many as 40 characters. If - BOUNDNAME contains spaces in it, then the entire name must be - enclosed within single quotes. - filename : String - File name for this package. - pname : String - Package name for this package. - parent_file : MFPackage - Parent package file that references this package. Only needed for - utility packages (mfutl*). For example, mfutllaktab package must have - a mfgwflak package parent_file. - - """ - auxiliary = ListTemplateGenerator(('gwf6', 'riv', 'options', - 'auxiliary')) - ts_filerecord = ListTemplateGenerator(('gwf6', 'riv', 'options', - 'ts_filerecord')) - obs_filerecord = ListTemplateGenerator(('gwf6', 'riv', 'options', - 'obs_filerecord')) - stress_period_data = ListTemplateGenerator(('gwf6', 'riv', 'period', - 'stress_period_data')) - package_abbr = "gwfriv" - _package_type = "riv" - dfn_file_name = "gwf-riv.dfn" - - dfn = [["block options", "name auxiliary", "type string", - "shape (naux)", "reader urword", "optional true"], - ["block options", "name auxmultname", "type string", "shape", - "reader urword", "optional true"], - ["block options", "name boundnames", "type keyword", "shape", - "reader urword", "optional true"], - ["block options", "name print_input", "type keyword", - "reader urword", "optional true"], - ["block options", "name print_flows", "type keyword", - "reader urword", "optional true"], - ["block options", "name save_flows", "type keyword", - "reader urword", "optional true"], - ["block options", "name ts_filerecord", - "type record ts6 filein ts6_filename", "shape", "reader urword", - "tagged true", "optional true", "construct_package ts", - "construct_data timeseries", "parameter_name timeseries"], - ["block options", "name ts6", "type keyword", "shape", - "in_record true", "reader urword", "tagged true", - "optional false"], - ["block options", "name filein", "type keyword", "shape", - "in_record true", "reader urword", "tagged true", - "optional false"], - ["block options", "name ts6_filename", "type string", - "preserve_case true", "in_record true", "reader urword", - "optional false", "tagged false"], - ["block options", "name obs_filerecord", - "type record obs6 filein obs6_filename", "shape", "reader urword", - "tagged true", "optional true", "construct_package obs", - "construct_data continuous", "parameter_name observations"], - ["block options", "name obs6", "type keyword", "shape", - "in_record true", "reader urword", "tagged true", - "optional false"], - ["block options", "name obs6_filename", "type string", - "preserve_case true", "in_record true", "tagged false", - "reader urword", "optional false"], - ["block options", "name mover", "type keyword", "tagged true", - "reader urword", "optional true"], - ["block dimensions", "name maxbound", "type integer", - "reader urword", "optional false"], - ["block period", "name iper", "type integer", - "block_variable True", "in_record true", "tagged false", "shape", - "valid", "reader urword", "optional false"], - ["block period", "name stress_period_data", - "type recarray cellid stage cond rbot aux boundname", - "shape (maxbound)", "reader urword"], - ["block period", "name cellid", "type integer", - "shape (ncelldim)", "tagged false", "in_record true", - "reader urword"], - ["block period", "name stage", "type double precision", "shape", - "tagged false", "in_record true", "reader urword", - "time_series true"], - ["block period", "name cond", "type double precision", "shape", - "tagged false", "in_record true", "reader urword", - "time_series true"], - ["block period", "name rbot", "type double precision", "shape", - "tagged false", "in_record true", "reader urword", - "time_series true"], - ["block period", "name aux", "type double precision", - "in_record true", "tagged false", "shape (naux)", "reader urword", - "optional true", "time_series true"], - ["block period", "name boundname", "type string", "shape", - "tagged false", "in_record true", "reader urword", - "optional true"]] - - def __init__(self, model, loading_package=False, auxiliary=None, - auxmultname=None, boundnames=None, print_input=None, - print_flows=None, save_flows=None, timeseries=None, - observations=None, mover=None, maxbound=None, - stress_period_data=None, filename=None, pname=None, - parent_file=None): - super(ModflowGwfriv, self).__init__(model, "riv", filename, pname, - loading_package, parent_file) - - # set up variables - self.auxiliary = self.build_mfdata("auxiliary", auxiliary) - self.auxmultname = self.build_mfdata("auxmultname", auxmultname) - self.boundnames = self.build_mfdata("boundnames", boundnames) - self.print_input = self.build_mfdata("print_input", print_input) - self.print_flows = self.build_mfdata("print_flows", print_flows) - self.save_flows = self.build_mfdata("save_flows", save_flows) - self._ts_filerecord = self.build_mfdata("ts_filerecord", - None) - self._ts_package = self.build_child_package("ts", timeseries, - "timeseries", - self._ts_filerecord) - self._obs_filerecord = self.build_mfdata("obs_filerecord", - None) - self._obs_package = self.build_child_package("obs", observations, - "continuous", - self._obs_filerecord) - self.mover = self.build_mfdata("mover", mover) - self.maxbound = self.build_mfdata("maxbound", maxbound) - self.stress_period_data = self.build_mfdata("stress_period_data", - stress_period_data) - self._init_complete = True +# DO NOT MODIFY THIS FILE DIRECTLY. THIS FILE MUST BE CREATED BY +# mf6/utils/createpackages.py +from .. import mfpackage +from ..data.mfdatautil import ListTemplateGenerator + + +class ModflowGwfriv(mfpackage.MFPackage): + """ + ModflowGwfriv defines a riv package within a gwf6 model. + + Parameters + ---------- + model : MFModel + Model that this package is a part of. Package is automatically + added to model when it is initialized. + loading_package : bool + Do not set this parameter. It is intended for debugging and internal + processing purposes only. + auxiliary : [string] + * auxiliary (string) defines an array of one or more auxiliary variable + names. There is no limit on the number of auxiliary variables that + can be provided on this line; however, lists of information provided + in subsequent blocks must have a column of data for each auxiliary + variable name defined here. The number of auxiliary variables + detected on this line determines the value for naux. Comments cannot + be provided anywhere on this line as they will be interpreted as + auxiliary variable names. Auxiliary variables may not be used by the + package, but they will be available for use by other parts of the + program. The program will terminate with an error if auxiliary + variables are specified on more than one line in the options block. + auxmultname : string + * auxmultname (string) name of auxiliary variable to be used as + multiplier of riverbed conductance. + boundnames : boolean + * boundnames (boolean) keyword to indicate that boundary names may be + provided with the list of river cells. + print_input : boolean + * print_input (boolean) keyword to indicate that the list of river + information will be written to the listing file immediately after it + is read. + print_flows : boolean + * print_flows (boolean) keyword to indicate that the list of river flow + rates will be printed to the listing file for every stress period + time step in which "BUDGET PRINT" is specified in Output Control. If + there is no Output Control option and "PRINT_FLOWS" is specified, + then flow rates are printed for the last time step of each stress + period. + save_flows : boolean + * save_flows (boolean) keyword to indicate that river flow terms will + be written to the file specified with "BUDGET FILEOUT" in Output + Control. + timeseries : {varname:data} or timeseries data + * Contains data for the ts package. Data can be stored in a dictionary + containing data for the ts package with variable names as keys and + package data as values. Data just for the timeseries variable is also + acceptable. See ts package documentation for more information. + observations : {varname:data} or continuous data + * Contains data for the obs package. Data can be stored in a dictionary + containing data for the obs package with variable names as keys and + package data as values. Data just for the observations variable is + also acceptable. See obs package documentation for more information. + mover : boolean + * mover (boolean) keyword to indicate that this instance of the River + Package can be used with the Water Mover (MVR) Package. When the + MOVER option is specified, additional memory is allocated within the + package to store the available, provided, and received water. + maxbound : integer + * maxbound (integer) integer value specifying the maximum number of + rivers cells that will be specified for use during any stress period. + stress_period_data : [cellid, stage, cond, rbot, aux, boundname] + * cellid ((integer, ...)) is the cell identifier, and depends on the + type of grid that is used for the simulation. For a structured grid + that uses the DIS input file, CELLID is the layer, row, and column. + For a grid that uses the DISV input file, CELLID is the layer and + CELL2D number. If the model uses the unstructured discretization + (DISU) input file, CELLID is the node number for the cell. This + argument is an index variable, which means that it should be treated + as zero-based when working with FloPy and Python. Flopy will + automatically subtract one when loading index variables and add one + when writing index variables. + * stage (double) is the head in the river. If the Options block + includes a TIMESERIESFILE entry (see the "Time-Variable Input" + section), values can be obtained from a time series by entering the + time-series name in place of a numeric value. + * cond (double) is the riverbed hydraulic conductance. If the Options + block includes a TIMESERIESFILE entry (see the "Time-Variable Input" + section), values can be obtained from a time series by entering the + time-series name in place of a numeric value. + * rbot (double) is the elevation of the bottom of the riverbed. If the + Options block includes a TIMESERIESFILE entry (see the "Time-Variable + Input" section), values can be obtained from a time series by + entering the time-series name in place of a numeric value. + * aux (double) represents the values of the auxiliary variables for + each river. The values of auxiliary variables must be present for + each river. The values must be specified in the order of the + auxiliary variables specified in the OPTIONS block. If the package + supports time series and the Options block includes a TIMESERIESFILE + entry (see the "Time-Variable Input" section), values can be obtained + from a time series by entering the time-series name in place of a + numeric value. + * boundname (string) name of the river cell. BOUNDNAME is an ASCII + character variable that can contain as many as 40 characters. If + BOUNDNAME contains spaces in it, then the entire name must be + enclosed within single quotes. + filename : String + File name for this package. + pname : String + Package name for this package. + parent_file : MFPackage + Parent package file that references this package. Only needed for + utility packages (mfutl*). For example, mfutllaktab package must have + a mfgwflak package parent_file. + + """ + auxiliary = ListTemplateGenerator(('gwf6', 'riv', 'options', + 'auxiliary')) + ts_filerecord = ListTemplateGenerator(('gwf6', 'riv', 'options', + 'ts_filerecord')) + obs_filerecord = ListTemplateGenerator(('gwf6', 'riv', 'options', + 'obs_filerecord')) + stress_period_data = ListTemplateGenerator(('gwf6', 'riv', 'period', + 'stress_period_data')) + package_abbr = "gwfriv" + _package_type = "riv" + dfn_file_name = "gwf-riv.dfn" + + dfn = [["block options", "name auxiliary", "type string", + "shape (naux)", "reader urword", "optional true"], + ["block options", "name auxmultname", "type string", "shape", + "reader urword", "optional true"], + ["block options", "name boundnames", "type keyword", "shape", + "reader urword", "optional true"], + ["block options", "name print_input", "type keyword", + "reader urword", "optional true"], + ["block options", "name print_flows", "type keyword", + "reader urword", "optional true"], + ["block options", "name save_flows", "type keyword", + "reader urword", "optional true"], + ["block options", "name ts_filerecord", + "type record ts6 filein ts6_filename", "shape", "reader urword", + "tagged true", "optional true", "construct_package ts", + "construct_data timeseries", "parameter_name timeseries"], + ["block options", "name ts6", "type keyword", "shape", + "in_record true", "reader urword", "tagged true", + "optional false"], + ["block options", "name filein", "type keyword", "shape", + "in_record true", "reader urword", "tagged true", + "optional false"], + ["block options", "name ts6_filename", "type string", + "preserve_case true", "in_record true", "reader urword", + "optional false", "tagged false"], + ["block options", "name obs_filerecord", + "type record obs6 filein obs6_filename", "shape", "reader urword", + "tagged true", "optional true", "construct_package obs", + "construct_data continuous", "parameter_name observations"], + ["block options", "name obs6", "type keyword", "shape", + "in_record true", "reader urword", "tagged true", + "optional false"], + ["block options", "name obs6_filename", "type string", + "preserve_case true", "in_record true", "tagged false", + "reader urword", "optional false"], + ["block options", "name mover", "type keyword", "tagged true", + "reader urword", "optional true"], + ["block dimensions", "name maxbound", "type integer", + "reader urword", "optional false"], + ["block period", "name iper", "type integer", + "block_variable True", "in_record true", "tagged false", "shape", + "valid", "reader urword", "optional false"], + ["block period", "name stress_period_data", + "type recarray cellid stage cond rbot aux boundname", + "shape (maxbound)", "reader urword"], + ["block period", "name cellid", "type integer", + "shape (ncelldim)", "tagged false", "in_record true", + "reader urword"], + ["block period", "name stage", "type double precision", "shape", + "tagged false", "in_record true", "reader urword", + "time_series true"], + ["block period", "name cond", "type double precision", "shape", + "tagged false", "in_record true", "reader urword", + "time_series true"], + ["block period", "name rbot", "type double precision", "shape", + "tagged false", "in_record true", "reader urword", + "time_series true"], + ["block period", "name aux", "type double precision", + "in_record true", "tagged false", "shape (naux)", "reader urword", + "optional true", "time_series true"], + ["block period", "name boundname", "type string", "shape", + "tagged false", "in_record true", "reader urword", + "optional true"]] + + def __init__(self, model, loading_package=False, auxiliary=None, + auxmultname=None, boundnames=None, print_input=None, + print_flows=None, save_flows=None, timeseries=None, + observations=None, mover=None, maxbound=None, + stress_period_data=None, filename=None, pname=None, + parent_file=None): + super(ModflowGwfriv, self).__init__(model, "riv", filename, pname, + loading_package, parent_file) + + # set up variables + self.auxiliary = self.build_mfdata("auxiliary", auxiliary) + self.auxmultname = self.build_mfdata("auxmultname", auxmultname) + self.boundnames = self.build_mfdata("boundnames", boundnames) + self.print_input = self.build_mfdata("print_input", print_input) + self.print_flows = self.build_mfdata("print_flows", print_flows) + self.save_flows = self.build_mfdata("save_flows", save_flows) + self._ts_filerecord = self.build_mfdata("ts_filerecord", + None) + self._ts_package = self.build_child_package("ts", timeseries, + "timeseries", + self._ts_filerecord) + self._obs_filerecord = self.build_mfdata("obs_filerecord", + None) + self._obs_package = self.build_child_package("obs", observations, + "continuous", + self._obs_filerecord) + self.mover = self.build_mfdata("mover", mover) + self.maxbound = self.build_mfdata("maxbound", maxbound) + self.stress_period_data = self.build_mfdata("stress_period_data", + stress_period_data) + self._init_complete = True diff --git a/flopy/mf6/modflow/mfgwfsfr.py b/flopy/mf6/modflow/mfgwfsfr.py index c5d7aaa301..8088060c18 100644 --- a/flopy/mf6/modflow/mfgwfsfr.py +++ b/flopy/mf6/modflow/mfgwfsfr.py @@ -1,609 +1,609 @@ -# DO NOT MODIFY THIS FILE DIRECTLY. THIS FILE MUST BE CREATED BY -# mf6/utils/createpackages.py -from .. import mfpackage -from ..data.mfdatautil import ListTemplateGenerator - - -class ModflowGwfsfr(mfpackage.MFPackage): - """ - ModflowGwfsfr defines a sfr package within a gwf6 model. - - Parameters - ---------- - model : MFModel - Model that this package is a part of. Package is automatically - added to model when it is initialized. - loading_package : bool - Do not set this parameter. It is intended for debugging and internal - processing purposes only. - auxiliary : [string] - * auxiliary (string) defines an array of one or more auxiliary variable - names. There is no limit on the number of auxiliary variables that - can be provided on this line; however, lists of information provided - in subsequent blocks must have a column of data for each auxiliary - variable name defined here. The number of auxiliary variables - detected on this line determines the value for naux. Comments cannot - be provided anywhere on this line as they will be interpreted as - auxiliary variable names. Auxiliary variables may not be used by the - package, but they will be available for use by other parts of the - program. The program will terminate with an error if auxiliary - variables are specified on more than one line in the options block. - boundnames : boolean - * boundnames (boolean) keyword to indicate that boundary names may be - provided with the list of stream reach cells. - print_input : boolean - * print_input (boolean) keyword to indicate that the list of stream - reach information will be written to the listing file immediately - after it is read. - print_stage : boolean - * print_stage (boolean) keyword to indicate that the list of stream - reach stages will be printed to the listing file for every stress - period in which "HEAD PRINT" is specified in Output Control. If there - is no Output Control option and PRINT_STAGE is specified, then stages - are printed for the last time step of each stress period. - print_flows : boolean - * print_flows (boolean) keyword to indicate that the list of stream - reach flow rates will be printed to the listing file for every stress - period time step in which "BUDGET PRINT" is specified in Output - Control. If there is no Output Control option and "PRINT_FLOWS" is - specified, then flow rates are printed for the last time step of each - stress period. - save_flows : boolean - * save_flows (boolean) keyword to indicate that stream reach flow terms - will be written to the file specified with "BUDGET FILEOUT" in Output - Control. - stage_filerecord : [stagefile] - * stagefile (string) name of the binary output file to write stage - information. - budget_filerecord : [budgetfile] - * budgetfile (string) name of the binary output file to write budget - information. - timeseries : {varname:data} or timeseries data - * Contains data for the ts package. Data can be stored in a dictionary - containing data for the ts package with variable names as keys and - package data as values. Data just for the timeseries variable is also - acceptable. See ts package documentation for more information. - observations : {varname:data} or continuous data - * Contains data for the obs package. Data can be stored in a dictionary - containing data for the obs package with variable names as keys and - package data as values. Data just for the observations variable is - also acceptable. See obs package documentation for more information. - mover : boolean - * mover (boolean) keyword to indicate that this instance of the SFR - Package can be used with the Water Mover (MVR) Package. When the - MOVER option is specified, additional memory is allocated within the - package to store the available, provided, and received water. - maximum_iterations : integer - * maximum_iterations (integer) value that defines the maximum number of - Streamflow Routing Newton-Raphson iterations allowed for a reach. By - default, MAXSFRIT is equal to 100. - maximum_depth_change : double - * maximum_depth_change (double) value that defines the depth closure - tolerance. By default, DMAXCHG is equal to :math:`1 \\times 10^{-5}`. - unit_conversion : double - * unit_conversion (double) value (or conversion factor) that is used in - calculating stream depth for stream reach. A constant of 1.486 is - used for flow units of cubic feet per second, and a constant of 1.0 - is used for units of cubic meters per second. The constant must be - multiplied by 86,400 when using time units of days in the simulation. - nreaches : integer - * nreaches (integer) integer value specifying the number of stream - reaches. There must be NREACHES entries in the PACKAGEDATA block. - packagedata : [rno, cellid, rlen, rwid, rgrd, rtp, rbth, rhk, man, ncon, - ustrf, ndv, aux, boundname] - * rno (integer) integer value that defines the reach number associated - with the specified PACKAGEDATA data on the line. RNO must be greater - than zero and less than or equal to NREACHES. Reach information must - be specified for every reach or the program will terminate with an - error. The program will also terminate with an error if information - for a reach is specified more than once. This argument is an index - variable, which means that it should be treated as zero-based when - working with FloPy and Python. Flopy will automatically subtract one - when loading index variables and add one when writing index - variables. - * cellid ((integer, ...)) The keyword `NONE' must be specified for - reaches that are not connected to an underlying GWF cell. The keyword - `NONE' is used for reaches that are in cells that have IDOMAIN values - less than one or are in areas not covered by the GWF model grid. - Reach-aquifer flow is not calculated if the keyword `NONE' is - specified. This argument is an index variable, which means that it - should be treated as zero-based when working with FloPy and Python. - Flopy will automatically subtract one when loading index variables - and add one when writing index variables. - * rlen (double) real value that defines the reach length. RLEN must be - greater than zero. - * rwid (double) real value that defines the reach width. RWID must be - greater than zero. - * rgrd (double) real value that defines the stream gradient (slope) - across the reach. RGRD must be greater than zero. - * rtp (double) real value that defines the top elevation of the reach - streambed. - * rbth (double) real value that defines the thickness of the reach - streambed. RBTH can be any value if CELLID is `NONE'. Otherwise, RBTH - must be greater than zero. - * rhk (double) real value that defines the hydraulic conductivity of - the reach streambed. RHK can be any positive value if CELLID is - `NONE'. Otherwise, RHK must be greater than zero. - * man (string) real or character value that defines the Manning's - roughness coefficient for the reach. MAN must be greater than zero. - If the Options block includes a TIMESERIESFILE entry (see the "Time- - Variable Input" section), values can be obtained from a time series - by entering the time-series name in place of a numeric value. - * ncon (integer) integer value that defines the number of reaches - connected to the reach. - * ustrf (double) real value that defines the fraction of upstream flow - from each upstream reach that is applied as upstream inflow to the - reach. The sum of all USTRF values for all reaches connected to the - same upstream reach must be equal to one and USTRF must be greater - than or equal to zero. - * ndv (integer) integer value that defines the number of downstream - diversions for the reach. - * aux (double) represents the values of the auxiliary variables for - each stream reach. The values of auxiliary variables must be present - for each stream reach. The values must be specified in the order of - the auxiliary variables specified in the OPTIONS block. If the - package supports time series and the Options block includes a - TIMESERIESFILE entry (see the "Time-Variable Input" section), values - can be obtained from a time series by entering the time-series name - in place of a numeric value. - * boundname (string) name of the stream reach cell. BOUNDNAME is an - ASCII character variable that can contain as many as 40 characters. - If BOUNDNAME contains spaces in it, then the entire name must be - enclosed within single quotes. - connectiondata : [rno, ic] - * rno (integer) integer value that defines the reach number associated - with the specified CONNECTIONDATA data on the line. RNO must be - greater than zero and less than or equal to NREACHES. Reach - connection information must be specified for every reach or the - program will terminate with an error. The program will also terminate - with an error if connection information for a reach is specified more - than once. This argument is an index variable, which means that it - should be treated as zero-based when working with FloPy and Python. - Flopy will automatically subtract one when loading index variables - and add one when writing index variables. - * ic (double_precision) integer value that defines the reach number of - the reach connected to the current reach and whether it is connected - to the upstream or downstream end of the reach. Negative IC numbers - indicate connected reaches are connected to the downstream end of the - current reach. Positive IC numbers indicate connected reaches are - connected to the upstream end of the current reach. The absolute - value of IC must be greater than zero and less than or equal to - NREACHES. This argument is an index variable, which means that it - should be treated as zero-based when working with FloPy and Python. - Flopy will automatically subtract one when loading index variables - and add one when writing index variables. - diversions : [rno, idv, iconr, cprior] - * rno (integer) integer value that defines the reach number associated - with the specified DIVERSIONS data on the line. RNO must be greater - than zero and less than or equal to NREACHES. Reach diversion - information must be specified for every reach with a NDV value - greater than 0 or the program will terminate with an error. The - program will also terminate with an error if diversion information - for a given reach diversion is specified more than once. This - argument is an index variable, which means that it should be treated - as zero-based when working with FloPy and Python. Flopy will - automatically subtract one when loading index variables and add one - when writing index variables. - * idv (integer) integer value that defines the downstream diversion - number for the diversion for reach RNO. IDV must be greater than zero - and less than or equal to NDV for reach RNO. This argument is an - index variable, which means that it should be treated as zero-based - when working with FloPy and Python. Flopy will automatically subtract - one when loading index variables and add one when writing index - variables. - * iconr (integer) integer value that defines the downstream reach that - will receive the diverted water. IDV must be greater than zero and - less than or equal to NREACHES. Furthermore, reach ICONR must be a - downstream connection for reach RNO. This argument is an index - variable, which means that it should be treated as zero-based when - working with FloPy and Python. Flopy will automatically subtract one - when loading index variables and add one when writing index - variables. - * cprior (string) character string value that defines the the - prioritization system for the diversion, such as when insufficient - water is available to meet all diversion stipulations, and is used in - conjunction with the value of FLOW value specified in the - STRESS_PERIOD_DATA section. Available diversion options include: (1) - CPRIOR = `FRACTION', then the amount of the diversion is computed as - a fraction of the streamflow leaving reach RNO (:math:`Q_{DS}`); in - this case, 0.0 :math:`\\le` DIVFLOW :math:`\\le` 1.0. (2) CPRIOR = - `EXCESS', a diversion is made only if :math:`Q_{DS}` for reach RNO - exceeds the value of DIVFLOW. If this occurs, then the quantity of - water diverted is the excess flow (:math:`Q_{DS} -` DIVFLOW) and - :math:`Q_{DS}` from reach RNO is set equal to DIVFLOW. This - represents a flood-control type of diversion, as described by Danskin - and Hanson (2002). (3) CPRIOR = `THRESHOLD', then if :math:`Q_{DS}` - in reach RNO is less than the specified diversion flow (DIVFLOW), no - water is diverted from reach RNO. If :math:`Q_{DS}` in reach RNO is - greater than or equal to (DIVFLOW), (DIVFLOW) is diverted and - :math:`Q_{DS}` is set to the remainder (:math:`Q_{DS} -` DIVFLOW)). - This approach assumes that once flow in the stream is sufficiently - low, diversions from the stream cease, and is the `priority' - algorithm that originally was programmed into the STR1 Package - (Prudic, 1989). (4) CPRIOR = `UPTO' -- if :math:`Q_{DS}` in reach RNO - is greater than or equal to the specified diversion flow (DIVFLOW), - :math:`Q_{DS}` is reduced by DIVFLOW. If :math:`Q_{DS}` in reach RNO - is less than (DIVFLOW), DIVFLOW is set to :math:`Q_{DS}` and there - will be no flow available for reaches connected to downstream end of - reach RNO. - perioddata : [rno, sfrsetting] - * rno (integer) integer value that defines the reach number associated - with the specified PERIOD data on the line. RNO must be greater than - zero and less than or equal to NREACHES. This argument is an index - variable, which means that it should be treated as zero-based when - working with FloPy and Python. Flopy will automatically subtract one - when loading index variables and add one when writing index - variables. - * sfrsetting (keystring) line of information that is parsed into a - keyword and values. Keyword values that can be used to start the - SFRSETTING string include: STATUS, MANNING, STAGE, INFLOW, RAINFALL, - EVAPORATION, RUNOFF, DIVERSION, UPSTREAM_FRACTION, and AUXILIARY. - status : [string] - * status (string) keyword option to define stream reach status. - STATUS can be ACTIVE, INACTIVE, or SIMPLE. The SIMPLE STATUS - option simulates streamflow using a user-specified stage for - a reach or a stage set to the top of the reach (depth = 0). - In cases where the simulated leakage calculated using the - specified stage exceeds the sum of inflows to the reach, the - stage is set to the top of the reach and leakage is set equal - to the sum of inflows. Upstream fractions should be changed - using the UPSTREAM_FRACTION SFRSETTING if the status for one - or more reaches is changed to ACTIVE or INACTIVE. For - example, if one of two downstream connections for a reach is - inactivated, the upstream fraction for the active and - inactive downstream reach should be changed to 1.0 and 0.0, - respectively, to ensure that the active reach receives all of - the downstream outflow from the upstream reach. By default, - STATUS is ACTIVE. - manning : [string] - * manning (string) real or character value that defines the - Manning's roughness coefficient for the reach. MANNING must - be greater than zero. If the Options block includes a - TIMESERIESFILE entry (see the "Time-Variable Input" section), - values can be obtained from a time series by entering the - time-series name in place of a numeric value. - stage : [string] - * stage (string) real or character value that defines the stage - for the reach. The specified STAGE is only applied if the - reach uses the simple routing option. If STAGE is not - specified for reaches that use the simple routing option, the - specified stage is set to the top of the reach. If the - Options block includes a TIMESERIESFILE entry (see the "Time- - Variable Input" section), values can be obtained from a time - series by entering the time-series name in place of a numeric - value. - inflow : [string] - * inflow (string) real or character value that defines the - volumetric inflow rate for the streamflow routing reach. If - the Options block includes a TIMESERIESFILE entry (see the - "Time-Variable Input" section), values can be obtained from a - time series by entering the time-series name in place of a - numeric value. By default, inflow rates are zero for each - reach. - rainfall : [string] - * rainfall (string) real or character value that defines the - volumetric rate per unit area of water added by precipitation - directly on the streamflow routing reach. If the Options - block includes a TIMESERIESFILE entry (see the "Time-Variable - Input" section), values can be obtained from a time series by - entering the time-series name in place of a numeric value. By - default, rainfall rates are zero for each reach. - evaporation : [string] - * evaporation (string) real or character value that defines the - volumetric rate per unit area of water subtracted by - evaporation from the streamflow routing reach. A positive - evaporation rate should be provided. If the Options block - includes a TIMESERIESFILE entry (see the "Time-Variable - Input" section), values can be obtained from a time series by - entering the time-series name in place of a numeric value. If - the volumetric evaporation rate for a reach exceeds the - sources of water to the reach (upstream and specified - inflows, rainfall, and runoff but excluding groundwater - leakage into the reach) the volumetric evaporation rate is - limited to the sources of water to the reach. By default, - evaporation rates are zero for each reach. - runoff : [string] - * runoff (string) real or character value that defines the - volumetric rate of diffuse overland runoff that enters the - streamflow routing reach. If the Options block includes a - TIMESERIESFILE entry (see the "Time-Variable Input" section), - values can be obtained from a time series by entering the - time-series name in place of a numeric value. If the - volumetric runoff rate for a reach is negative and exceeds - inflows to the reach (upstream and specified inflows, and - rainfall but excluding groundwater leakage into the reach) - the volumetric runoff rate is limited to inflows to the reach - and the volumetric evaporation rate for the reach is set to - zero. By default, runoff rates are zero for each reach. - diversionrecord : [idv, divrate] - * idv (integer) diversion number. This argument is an index - variable, which means that it should be treated as zero-based - when working with FloPy and Python. Flopy will automatically - subtract one when loading index variables and add one when - writing index variables. - * divrate (double) real or character value that defines the - volumetric diversion (DIVFLOW) rate for the streamflow - routing reach. If the Options block includes a TIMESERIESFILE - entry (see the "Time-Variable Input" section), values can be - obtained from a time series by entering the time-series name - in place of a numeric value. - upstream_fraction : [double] - * upstream_fraction (double) real value that defines the - fraction of upstream flow (USTRF) from each upstream reach - that is applied as upstream inflow to the reach. The sum of - all USTRF values for all reaches connected to the same - upstream reach must be equal to one. - auxiliaryrecord : [auxname, auxval] - * auxname (string) name for the auxiliary variable to be - assigned AUXVAL. AUXNAME must match one of the auxiliary - variable names defined in the OPTIONS block. If AUXNAME does - not match one of the auxiliary variable names defined in the - OPTIONS block the data are ignored. - * auxval (double) value for the auxiliary variable. If the - Options block includes a TIMESERIESFILE entry (see the "Time- - Variable Input" section), values can be obtained from a time - series by entering the time-series name in place of a numeric - value. - filename : String - File name for this package. - pname : String - Package name for this package. - parent_file : MFPackage - Parent package file that references this package. Only needed for - utility packages (mfutl*). For example, mfutllaktab package must have - a mfgwflak package parent_file. - - """ - auxiliary = ListTemplateGenerator(('gwf6', 'sfr', 'options', - 'auxiliary')) - stage_filerecord = ListTemplateGenerator(('gwf6', 'sfr', 'options', - 'stage_filerecord')) - budget_filerecord = ListTemplateGenerator(('gwf6', 'sfr', 'options', - 'budget_filerecord')) - ts_filerecord = ListTemplateGenerator(('gwf6', 'sfr', 'options', - 'ts_filerecord')) - obs_filerecord = ListTemplateGenerator(('gwf6', 'sfr', 'options', - 'obs_filerecord')) - packagedata = ListTemplateGenerator(('gwf6', 'sfr', 'packagedata', - 'packagedata')) - connectiondata = ListTemplateGenerator(('gwf6', 'sfr', - 'connectiondata', - 'connectiondata')) - diversions = ListTemplateGenerator(('gwf6', 'sfr', 'diversions', - 'diversions')) - perioddata = ListTemplateGenerator(('gwf6', 'sfr', 'period', - 'perioddata')) - package_abbr = "gwfsfr" - _package_type = "sfr" - dfn_file_name = "gwf-sfr.dfn" - - dfn = [["block options", "name auxiliary", "type string", - "shape (naux)", "reader urword", "optional true"], - ["block options", "name boundnames", "type keyword", "shape", - "reader urword", "optional true"], - ["block options", "name print_input", "type keyword", - "reader urword", "optional true"], - ["block options", "name print_stage", "type keyword", - "reader urword", "optional true"], - ["block options", "name print_flows", "type keyword", - "reader urword", "optional true"], - ["block options", "name save_flows", "type keyword", - "reader urword", "optional true"], - ["block options", "name stage_filerecord", - "type record stage fileout stagefile", "shape", "reader urword", - "tagged true", "optional true"], - ["block options", "name stage", "type keyword", "shape", - "in_record true", "reader urword", "tagged true", - "optional false"], - ["block options", "name stagefile", "type string", - "preserve_case true", "shape", "in_record true", "reader urword", - "tagged false", "optional false"], - ["block options", "name budget_filerecord", - "type record budget fileout budgetfile", "shape", "reader urword", - "tagged true", "optional true"], - ["block options", "name budget", "type keyword", "shape", - "in_record true", "reader urword", "tagged true", - "optional false"], - ["block options", "name fileout", "type keyword", "shape", - "in_record true", "reader urword", "tagged true", - "optional false"], - ["block options", "name budgetfile", "type string", - "preserve_case true", "shape", "in_record true", "reader urword", - "tagged false", "optional false"], - ["block options", "name ts_filerecord", - "type record ts6 filein ts6_filename", "shape", "reader urword", - "tagged true", "optional true", "construct_package ts", - "construct_data timeseries", "parameter_name timeseries"], - ["block options", "name ts6", "type keyword", "shape", - "in_record true", "reader urword", "tagged true", - "optional false"], - ["block options", "name filein", "type keyword", "shape", - "in_record true", "reader urword", "tagged true", - "optional false"], - ["block options", "name ts6_filename", "type string", - "preserve_case true", "in_record true", "reader urword", - "optional false", "tagged false"], - ["block options", "name obs_filerecord", - "type record obs6 filein obs6_filename", "shape", "reader urword", - "tagged true", "optional true", "construct_package obs", - "construct_data continuous", "parameter_name observations"], - ["block options", "name obs6", "type keyword", "shape", - "in_record true", "reader urword", "tagged true", - "optional false"], - ["block options", "name obs6_filename", "type string", - "preserve_case true", "in_record true", "tagged false", - "reader urword", "optional false"], - ["block options", "name mover", "type keyword", "tagged true", - "reader urword", "optional true"], - ["block options", "name maximum_iterations", "type integer", - "reader urword", "optional true"], - ["block options", "name maximum_depth_change", - "type double precision", "reader urword", "optional true"], - ["block options", "name unit_conversion", - "type double precision", "reader urword", "optional true"], - ["block dimensions", "name nreaches", "type integer", - "reader urword", "optional false"], - ["block packagedata", "name packagedata", - "type recarray rno cellid rlen rwid rgrd rtp rbth rhk man ncon " - "ustrf ndv aux boundname", - "shape (maxbound)", "reader urword"], - ["block packagedata", "name rno", "type integer", "shape", - "tagged false", "in_record true", "reader urword", - "numeric_index true"], - ["block packagedata", "name cellid", "type integer", - "shape (ncelldim)", "tagged false", "in_record true", - "reader urword"], - ["block packagedata", "name rlen", "type double precision", - "shape", "tagged false", "in_record true", "reader urword"], - ["block packagedata", "name rwid", "type double precision", - "shape", "tagged false", "in_record true", "reader urword"], - ["block packagedata", "name rgrd", "type double precision", - "shape", "tagged false", "in_record true", "reader urword"], - ["block packagedata", "name rtp", "type double precision", - "shape", "tagged false", "in_record true", "reader urword"], - ["block packagedata", "name rbth", "type double precision", - "shape", "tagged false", "in_record true", "reader urword"], - ["block packagedata", "name rhk", "type double precision", - "shape", "tagged false", "in_record true", "reader urword"], - ["block packagedata", "name man", "type string", "shape", - "tagged false", "in_record true", "reader urword", - "time_series true"], - ["block packagedata", "name ncon", "type integer", "shape", - "tagged false", "in_record true", "reader urword"], - ["block packagedata", "name ustrf", "type double precision", - "shape", "tagged false", "in_record true", "reader urword"], - ["block packagedata", "name ndv", "type integer", "shape", - "tagged false", "in_record true", "reader urword"], - ["block packagedata", "name aux", "type double precision", - "in_record true", "tagged false", "shape (naux)", "reader urword", - "time_series true", "optional true"], - ["block packagedata", "name boundname", "type string", "shape", - "tagged false", "in_record true", "reader urword", - "optional true"], - ["block connectiondata", "name connectiondata", - "type recarray rno ic", "shape (maxbound)", "reader urword"], - ["block connectiondata", "name rno", "type integer", "shape", - "tagged false", "in_record true", "reader urword", - "numeric_index true"], - ["block connectiondata", "name ic", "type integer", - "shape (ncon(rno))", "tagged false", "in_record true", - "reader urword", "numeric_index true", - "support_negative_index true"], - ["block diversions", "name diversions", - "type recarray rno idv iconr cprior", "shape (maxbound)", - "reader urword"], - ["block diversions", "name rno", "type integer", "shape", - "tagged false", "in_record true", "reader urword", - "numeric_index true"], - ["block diversions", "name idv", "type integer", "shape", - "tagged false", "in_record true", "reader urword", - "numeric_index true"], - ["block diversions", "name iconr", "type integer", "shape", - "tagged false", "in_record true", "reader urword", - "numeric_index true"], - ["block diversions", "name cprior", "type string", "shape", - "tagged false", "in_record true", "reader urword"], - ["block period", "name iper", "type integer", - "block_variable True", "in_record true", "tagged false", "shape", - "valid", "reader urword", "optional false"], - ["block period", "name perioddata", - "type recarray rno sfrsetting", "shape", "reader urword"], - ["block period", "name rno", "type integer", "shape", - "tagged false", "in_record true", "reader urword", - "numeric_index true"], - ["block period", "name sfrsetting", - "type keystring status manning stage inflow rainfall evaporation " - "runoff diversionrecord upstream_fraction auxiliaryrecord", - "shape", "tagged false", "in_record true", "reader urword"], - ["block period", "name status", "type string", "shape", - "tagged true", "in_record true", "reader urword"], - ["block period", "name manning", "type string", "shape", - "tagged true", "in_record true", "reader urword", - "time_series true"], - ["block period", "name stage", "type string", "shape", - "tagged true", "in_record true", "reader urword", - "time_series true"], - ["block period", "name inflow", "type string", "shape", - "tagged true", "in_record true", "reader urword", - "time_series true"], - ["block period", "name rainfall", "type string", "shape", - "tagged true", "in_record true", "reader urword", - "time_series true"], - ["block period", "name evaporation", "type string", "shape", - "tagged true", "in_record true", "reader urword", - "time_series true"], - ["block period", "name runoff", "type string", "shape", - "tagged true", "in_record true", "reader urword", - "time_series true"], - ["block period", "name diversionrecord", - "type record diversion idv divrate", "shape", "tagged", - "in_record true", "reader urword"], - ["block period", "name diversion", "type keyword", "shape", - "in_record true", "reader urword"], - ["block period", "name idv", "type integer", "shape", - "tagged false", "in_record true", "reader urword", - "numeric_index true"], - ["block period", "name divrate", "type double precision", - "shape", "tagged false", "in_record true", "reader urword", - "time_series true"], - ["block period", "name upstream_fraction", - "type double precision", "shape", "tagged true", "in_record true", - "reader urword"], - ["block period", "name auxiliaryrecord", - "type record auxiliary auxname auxval", "shape", "tagged", - "in_record true", "reader urword"], - ["block period", "name auxiliary", "type keyword", "shape", - "in_record true", "reader urword"], - ["block period", "name auxname", "type string", "shape", - "tagged false", "in_record true", "reader urword"], - ["block period", "name auxval", "type double precision", "shape", - "tagged false", "in_record true", "reader urword", - "time_series true"]] - - def __init__(self, model, loading_package=False, auxiliary=None, - boundnames=None, print_input=None, print_stage=None, - print_flows=None, save_flows=None, stage_filerecord=None, - budget_filerecord=None, timeseries=None, observations=None, - mover=None, maximum_iterations=None, - maximum_depth_change=None, unit_conversion=None, - nreaches=None, packagedata=None, connectiondata=None, - diversions=None, perioddata=None, filename=None, pname=None, - parent_file=None): - super(ModflowGwfsfr, self).__init__(model, "sfr", filename, pname, - loading_package, parent_file) - - # set up variables - self.auxiliary = self.build_mfdata("auxiliary", auxiliary) - self.boundnames = self.build_mfdata("boundnames", boundnames) - self.print_input = self.build_mfdata("print_input", print_input) - self.print_stage = self.build_mfdata("print_stage", print_stage) - self.print_flows = self.build_mfdata("print_flows", print_flows) - self.save_flows = self.build_mfdata("save_flows", save_flows) - self.stage_filerecord = self.build_mfdata("stage_filerecord", - stage_filerecord) - self.budget_filerecord = self.build_mfdata("budget_filerecord", - budget_filerecord) - self._ts_filerecord = self.build_mfdata("ts_filerecord", - None) - self._ts_package = self.build_child_package("ts", timeseries, - "timeseries", - self._ts_filerecord) - self._obs_filerecord = self.build_mfdata("obs_filerecord", - None) - self._obs_package = self.build_child_package("obs", observations, - "continuous", - self._obs_filerecord) - self.mover = self.build_mfdata("mover", mover) - self.maximum_iterations = self.build_mfdata("maximum_iterations", - maximum_iterations) - self.maximum_depth_change = self.build_mfdata("maximum_depth_change", - maximum_depth_change) - self.unit_conversion = self.build_mfdata("unit_conversion", - unit_conversion) - self.nreaches = self.build_mfdata("nreaches", nreaches) - self.packagedata = self.build_mfdata("packagedata", packagedata) - self.connectiondata = self.build_mfdata("connectiondata", - connectiondata) - self.diversions = self.build_mfdata("diversions", diversions) - self.perioddata = self.build_mfdata("perioddata", perioddata) - self._init_complete = True +# DO NOT MODIFY THIS FILE DIRECTLY. THIS FILE MUST BE CREATED BY +# mf6/utils/createpackages.py +from .. import mfpackage +from ..data.mfdatautil import ListTemplateGenerator + + +class ModflowGwfsfr(mfpackage.MFPackage): + """ + ModflowGwfsfr defines a sfr package within a gwf6 model. + + Parameters + ---------- + model : MFModel + Model that this package is a part of. Package is automatically + added to model when it is initialized. + loading_package : bool + Do not set this parameter. It is intended for debugging and internal + processing purposes only. + auxiliary : [string] + * auxiliary (string) defines an array of one or more auxiliary variable + names. There is no limit on the number of auxiliary variables that + can be provided on this line; however, lists of information provided + in subsequent blocks must have a column of data for each auxiliary + variable name defined here. The number of auxiliary variables + detected on this line determines the value for naux. Comments cannot + be provided anywhere on this line as they will be interpreted as + auxiliary variable names. Auxiliary variables may not be used by the + package, but they will be available for use by other parts of the + program. The program will terminate with an error if auxiliary + variables are specified on more than one line in the options block. + boundnames : boolean + * boundnames (boolean) keyword to indicate that boundary names may be + provided with the list of stream reach cells. + print_input : boolean + * print_input (boolean) keyword to indicate that the list of stream + reach information will be written to the listing file immediately + after it is read. + print_stage : boolean + * print_stage (boolean) keyword to indicate that the list of stream + reach stages will be printed to the listing file for every stress + period in which "HEAD PRINT" is specified in Output Control. If there + is no Output Control option and PRINT_STAGE is specified, then stages + are printed for the last time step of each stress period. + print_flows : boolean + * print_flows (boolean) keyword to indicate that the list of stream + reach flow rates will be printed to the listing file for every stress + period time step in which "BUDGET PRINT" is specified in Output + Control. If there is no Output Control option and "PRINT_FLOWS" is + specified, then flow rates are printed for the last time step of each + stress period. + save_flows : boolean + * save_flows (boolean) keyword to indicate that stream reach flow terms + will be written to the file specified with "BUDGET FILEOUT" in Output + Control. + stage_filerecord : [stagefile] + * stagefile (string) name of the binary output file to write stage + information. + budget_filerecord : [budgetfile] + * budgetfile (string) name of the binary output file to write budget + information. + timeseries : {varname:data} or timeseries data + * Contains data for the ts package. Data can be stored in a dictionary + containing data for the ts package with variable names as keys and + package data as values. Data just for the timeseries variable is also + acceptable. See ts package documentation for more information. + observations : {varname:data} or continuous data + * Contains data for the obs package. Data can be stored in a dictionary + containing data for the obs package with variable names as keys and + package data as values. Data just for the observations variable is + also acceptable. See obs package documentation for more information. + mover : boolean + * mover (boolean) keyword to indicate that this instance of the SFR + Package can be used with the Water Mover (MVR) Package. When the + MOVER option is specified, additional memory is allocated within the + package to store the available, provided, and received water. + maximum_iterations : integer + * maximum_iterations (integer) value that defines the maximum number of + Streamflow Routing Newton-Raphson iterations allowed for a reach. By + default, MAXSFRIT is equal to 100. + maximum_depth_change : double + * maximum_depth_change (double) value that defines the depth closure + tolerance. By default, DMAXCHG is equal to :math:`1 \\times 10^{-5}`. + unit_conversion : double + * unit_conversion (double) value (or conversion factor) that is used in + calculating stream depth for stream reach. A constant of 1.486 is + used for flow units of cubic feet per second, and a constant of 1.0 + is used for units of cubic meters per second. The constant must be + multiplied by 86,400 when using time units of days in the simulation. + nreaches : integer + * nreaches (integer) integer value specifying the number of stream + reaches. There must be NREACHES entries in the PACKAGEDATA block. + packagedata : [rno, cellid, rlen, rwid, rgrd, rtp, rbth, rhk, man, ncon, + ustrf, ndv, aux, boundname] + * rno (integer) integer value that defines the reach number associated + with the specified PACKAGEDATA data on the line. RNO must be greater + than zero and less than or equal to NREACHES. Reach information must + be specified for every reach or the program will terminate with an + error. The program will also terminate with an error if information + for a reach is specified more than once. This argument is an index + variable, which means that it should be treated as zero-based when + working with FloPy and Python. Flopy will automatically subtract one + when loading index variables and add one when writing index + variables. + * cellid ((integer, ...)) The keyword `NONE' must be specified for + reaches that are not connected to an underlying GWF cell. The keyword + `NONE' is used for reaches that are in cells that have IDOMAIN values + less than one or are in areas not covered by the GWF model grid. + Reach-aquifer flow is not calculated if the keyword `NONE' is + specified. This argument is an index variable, which means that it + should be treated as zero-based when working with FloPy and Python. + Flopy will automatically subtract one when loading index variables + and add one when writing index variables. + * rlen (double) real value that defines the reach length. RLEN must be + greater than zero. + * rwid (double) real value that defines the reach width. RWID must be + greater than zero. + * rgrd (double) real value that defines the stream gradient (slope) + across the reach. RGRD must be greater than zero. + * rtp (double) real value that defines the top elevation of the reach + streambed. + * rbth (double) real value that defines the thickness of the reach + streambed. RBTH can be any value if CELLID is `NONE'. Otherwise, RBTH + must be greater than zero. + * rhk (double) real value that defines the hydraulic conductivity of + the reach streambed. RHK can be any positive value if CELLID is + `NONE'. Otherwise, RHK must be greater than zero. + * man (string) real or character value that defines the Manning's + roughness coefficient for the reach. MAN must be greater than zero. + If the Options block includes a TIMESERIESFILE entry (see the "Time- + Variable Input" section), values can be obtained from a time series + by entering the time-series name in place of a numeric value. + * ncon (integer) integer value that defines the number of reaches + connected to the reach. + * ustrf (double) real value that defines the fraction of upstream flow + from each upstream reach that is applied as upstream inflow to the + reach. The sum of all USTRF values for all reaches connected to the + same upstream reach must be equal to one and USTRF must be greater + than or equal to zero. + * ndv (integer) integer value that defines the number of downstream + diversions for the reach. + * aux (double) represents the values of the auxiliary variables for + each stream reach. The values of auxiliary variables must be present + for each stream reach. The values must be specified in the order of + the auxiliary variables specified in the OPTIONS block. If the + package supports time series and the Options block includes a + TIMESERIESFILE entry (see the "Time-Variable Input" section), values + can be obtained from a time series by entering the time-series name + in place of a numeric value. + * boundname (string) name of the stream reach cell. BOUNDNAME is an + ASCII character variable that can contain as many as 40 characters. + If BOUNDNAME contains spaces in it, then the entire name must be + enclosed within single quotes. + connectiondata : [rno, ic] + * rno (integer) integer value that defines the reach number associated + with the specified CONNECTIONDATA data on the line. RNO must be + greater than zero and less than or equal to NREACHES. Reach + connection information must be specified for every reach or the + program will terminate with an error. The program will also terminate + with an error if connection information for a reach is specified more + than once. This argument is an index variable, which means that it + should be treated as zero-based when working with FloPy and Python. + Flopy will automatically subtract one when loading index variables + and add one when writing index variables. + * ic (double_precision) integer value that defines the reach number of + the reach connected to the current reach and whether it is connected + to the upstream or downstream end of the reach. Negative IC numbers + indicate connected reaches are connected to the downstream end of the + current reach. Positive IC numbers indicate connected reaches are + connected to the upstream end of the current reach. The absolute + value of IC must be greater than zero and less than or equal to + NREACHES. This argument is an index variable, which means that it + should be treated as zero-based when working with FloPy and Python. + Flopy will automatically subtract one when loading index variables + and add one when writing index variables. + diversions : [rno, idv, iconr, cprior] + * rno (integer) integer value that defines the reach number associated + with the specified DIVERSIONS data on the line. RNO must be greater + than zero and less than or equal to NREACHES. Reach diversion + information must be specified for every reach with a NDV value + greater than 0 or the program will terminate with an error. The + program will also terminate with an error if diversion information + for a given reach diversion is specified more than once. This + argument is an index variable, which means that it should be treated + as zero-based when working with FloPy and Python. Flopy will + automatically subtract one when loading index variables and add one + when writing index variables. + * idv (integer) integer value that defines the downstream diversion + number for the diversion for reach RNO. IDV must be greater than zero + and less than or equal to NDV for reach RNO. This argument is an + index variable, which means that it should be treated as zero-based + when working with FloPy and Python. Flopy will automatically subtract + one when loading index variables and add one when writing index + variables. + * iconr (integer) integer value that defines the downstream reach that + will receive the diverted water. IDV must be greater than zero and + less than or equal to NREACHES. Furthermore, reach ICONR must be a + downstream connection for reach RNO. This argument is an index + variable, which means that it should be treated as zero-based when + working with FloPy and Python. Flopy will automatically subtract one + when loading index variables and add one when writing index + variables. + * cprior (string) character string value that defines the the + prioritization system for the diversion, such as when insufficient + water is available to meet all diversion stipulations, and is used in + conjunction with the value of FLOW value specified in the + STRESS_PERIOD_DATA section. Available diversion options include: (1) + CPRIOR = `FRACTION', then the amount of the diversion is computed as + a fraction of the streamflow leaving reach RNO (:math:`Q_{DS}`); in + this case, 0.0 :math:`\\le` DIVFLOW :math:`\\le` 1.0. (2) CPRIOR = + `EXCESS', a diversion is made only if :math:`Q_{DS}` for reach RNO + exceeds the value of DIVFLOW. If this occurs, then the quantity of + water diverted is the excess flow (:math:`Q_{DS} -` DIVFLOW) and + :math:`Q_{DS}` from reach RNO is set equal to DIVFLOW. This + represents a flood-control type of diversion, as described by Danskin + and Hanson (2002). (3) CPRIOR = `THRESHOLD', then if :math:`Q_{DS}` + in reach RNO is less than the specified diversion flow (DIVFLOW), no + water is diverted from reach RNO. If :math:`Q_{DS}` in reach RNO is + greater than or equal to (DIVFLOW), (DIVFLOW) is diverted and + :math:`Q_{DS}` is set to the remainder (:math:`Q_{DS} -` DIVFLOW)). + This approach assumes that once flow in the stream is sufficiently + low, diversions from the stream cease, and is the `priority' + algorithm that originally was programmed into the STR1 Package + (Prudic, 1989). (4) CPRIOR = `UPTO' -- if :math:`Q_{DS}` in reach RNO + is greater than or equal to the specified diversion flow (DIVFLOW), + :math:`Q_{DS}` is reduced by DIVFLOW. If :math:`Q_{DS}` in reach RNO + is less than (DIVFLOW), DIVFLOW is set to :math:`Q_{DS}` and there + will be no flow available for reaches connected to downstream end of + reach RNO. + perioddata : [rno, sfrsetting] + * rno (integer) integer value that defines the reach number associated + with the specified PERIOD data on the line. RNO must be greater than + zero and less than or equal to NREACHES. This argument is an index + variable, which means that it should be treated as zero-based when + working with FloPy and Python. Flopy will automatically subtract one + when loading index variables and add one when writing index + variables. + * sfrsetting (keystring) line of information that is parsed into a + keyword and values. Keyword values that can be used to start the + SFRSETTING string include: STATUS, MANNING, STAGE, INFLOW, RAINFALL, + EVAPORATION, RUNOFF, DIVERSION, UPSTREAM_FRACTION, and AUXILIARY. + status : [string] + * status (string) keyword option to define stream reach status. + STATUS can be ACTIVE, INACTIVE, or SIMPLE. The SIMPLE STATUS + option simulates streamflow using a user-specified stage for + a reach or a stage set to the top of the reach (depth = 0). + In cases where the simulated leakage calculated using the + specified stage exceeds the sum of inflows to the reach, the + stage is set to the top of the reach and leakage is set equal + to the sum of inflows. Upstream fractions should be changed + using the UPSTREAM_FRACTION SFRSETTING if the status for one + or more reaches is changed to ACTIVE or INACTIVE. For + example, if one of two downstream connections for a reach is + inactivated, the upstream fraction for the active and + inactive downstream reach should be changed to 1.0 and 0.0, + respectively, to ensure that the active reach receives all of + the downstream outflow from the upstream reach. By default, + STATUS is ACTIVE. + manning : [string] + * manning (string) real or character value that defines the + Manning's roughness coefficient for the reach. MANNING must + be greater than zero. If the Options block includes a + TIMESERIESFILE entry (see the "Time-Variable Input" section), + values can be obtained from a time series by entering the + time-series name in place of a numeric value. + stage : [string] + * stage (string) real or character value that defines the stage + for the reach. The specified STAGE is only applied if the + reach uses the simple routing option. If STAGE is not + specified for reaches that use the simple routing option, the + specified stage is set to the top of the reach. If the + Options block includes a TIMESERIESFILE entry (see the "Time- + Variable Input" section), values can be obtained from a time + series by entering the time-series name in place of a numeric + value. + inflow : [string] + * inflow (string) real or character value that defines the + volumetric inflow rate for the streamflow routing reach. If + the Options block includes a TIMESERIESFILE entry (see the + "Time-Variable Input" section), values can be obtained from a + time series by entering the time-series name in place of a + numeric value. By default, inflow rates are zero for each + reach. + rainfall : [string] + * rainfall (string) real or character value that defines the + volumetric rate per unit area of water added by precipitation + directly on the streamflow routing reach. If the Options + block includes a TIMESERIESFILE entry (see the "Time-Variable + Input" section), values can be obtained from a time series by + entering the time-series name in place of a numeric value. By + default, rainfall rates are zero for each reach. + evaporation : [string] + * evaporation (string) real or character value that defines the + volumetric rate per unit area of water subtracted by + evaporation from the streamflow routing reach. A positive + evaporation rate should be provided. If the Options block + includes a TIMESERIESFILE entry (see the "Time-Variable + Input" section), values can be obtained from a time series by + entering the time-series name in place of a numeric value. If + the volumetric evaporation rate for a reach exceeds the + sources of water to the reach (upstream and specified + inflows, rainfall, and runoff but excluding groundwater + leakage into the reach) the volumetric evaporation rate is + limited to the sources of water to the reach. By default, + evaporation rates are zero for each reach. + runoff : [string] + * runoff (string) real or character value that defines the + volumetric rate of diffuse overland runoff that enters the + streamflow routing reach. If the Options block includes a + TIMESERIESFILE entry (see the "Time-Variable Input" section), + values can be obtained from a time series by entering the + time-series name in place of a numeric value. If the + volumetric runoff rate for a reach is negative and exceeds + inflows to the reach (upstream and specified inflows, and + rainfall but excluding groundwater leakage into the reach) + the volumetric runoff rate is limited to inflows to the reach + and the volumetric evaporation rate for the reach is set to + zero. By default, runoff rates are zero for each reach. + diversionrecord : [idv, divrate] + * idv (integer) diversion number. This argument is an index + variable, which means that it should be treated as zero-based + when working with FloPy and Python. Flopy will automatically + subtract one when loading index variables and add one when + writing index variables. + * divrate (double) real or character value that defines the + volumetric diversion (DIVFLOW) rate for the streamflow + routing reach. If the Options block includes a TIMESERIESFILE + entry (see the "Time-Variable Input" section), values can be + obtained from a time series by entering the time-series name + in place of a numeric value. + upstream_fraction : [double] + * upstream_fraction (double) real value that defines the + fraction of upstream flow (USTRF) from each upstream reach + that is applied as upstream inflow to the reach. The sum of + all USTRF values for all reaches connected to the same + upstream reach must be equal to one. + auxiliaryrecord : [auxname, auxval] + * auxname (string) name for the auxiliary variable to be + assigned AUXVAL. AUXNAME must match one of the auxiliary + variable names defined in the OPTIONS block. If AUXNAME does + not match one of the auxiliary variable names defined in the + OPTIONS block the data are ignored. + * auxval (double) value for the auxiliary variable. If the + Options block includes a TIMESERIESFILE entry (see the "Time- + Variable Input" section), values can be obtained from a time + series by entering the time-series name in place of a numeric + value. + filename : String + File name for this package. + pname : String + Package name for this package. + parent_file : MFPackage + Parent package file that references this package. Only needed for + utility packages (mfutl*). For example, mfutllaktab package must have + a mfgwflak package parent_file. + + """ + auxiliary = ListTemplateGenerator(('gwf6', 'sfr', 'options', + 'auxiliary')) + stage_filerecord = ListTemplateGenerator(('gwf6', 'sfr', 'options', + 'stage_filerecord')) + budget_filerecord = ListTemplateGenerator(('gwf6', 'sfr', 'options', + 'budget_filerecord')) + ts_filerecord = ListTemplateGenerator(('gwf6', 'sfr', 'options', + 'ts_filerecord')) + obs_filerecord = ListTemplateGenerator(('gwf6', 'sfr', 'options', + 'obs_filerecord')) + packagedata = ListTemplateGenerator(('gwf6', 'sfr', 'packagedata', + 'packagedata')) + connectiondata = ListTemplateGenerator(('gwf6', 'sfr', + 'connectiondata', + 'connectiondata')) + diversions = ListTemplateGenerator(('gwf6', 'sfr', 'diversions', + 'diversions')) + perioddata = ListTemplateGenerator(('gwf6', 'sfr', 'period', + 'perioddata')) + package_abbr = "gwfsfr" + _package_type = "sfr" + dfn_file_name = "gwf-sfr.dfn" + + dfn = [["block options", "name auxiliary", "type string", + "shape (naux)", "reader urword", "optional true"], + ["block options", "name boundnames", "type keyword", "shape", + "reader urword", "optional true"], + ["block options", "name print_input", "type keyword", + "reader urword", "optional true"], + ["block options", "name print_stage", "type keyword", + "reader urword", "optional true"], + ["block options", "name print_flows", "type keyword", + "reader urword", "optional true"], + ["block options", "name save_flows", "type keyword", + "reader urword", "optional true"], + ["block options", "name stage_filerecord", + "type record stage fileout stagefile", "shape", "reader urword", + "tagged true", "optional true"], + ["block options", "name stage", "type keyword", "shape", + "in_record true", "reader urword", "tagged true", + "optional false"], + ["block options", "name stagefile", "type string", + "preserve_case true", "shape", "in_record true", "reader urword", + "tagged false", "optional false"], + ["block options", "name budget_filerecord", + "type record budget fileout budgetfile", "shape", "reader urword", + "tagged true", "optional true"], + ["block options", "name budget", "type keyword", "shape", + "in_record true", "reader urword", "tagged true", + "optional false"], + ["block options", "name fileout", "type keyword", "shape", + "in_record true", "reader urword", "tagged true", + "optional false"], + ["block options", "name budgetfile", "type string", + "preserve_case true", "shape", "in_record true", "reader urword", + "tagged false", "optional false"], + ["block options", "name ts_filerecord", + "type record ts6 filein ts6_filename", "shape", "reader urword", + "tagged true", "optional true", "construct_package ts", + "construct_data timeseries", "parameter_name timeseries"], + ["block options", "name ts6", "type keyword", "shape", + "in_record true", "reader urword", "tagged true", + "optional false"], + ["block options", "name filein", "type keyword", "shape", + "in_record true", "reader urword", "tagged true", + "optional false"], + ["block options", "name ts6_filename", "type string", + "preserve_case true", "in_record true", "reader urword", + "optional false", "tagged false"], + ["block options", "name obs_filerecord", + "type record obs6 filein obs6_filename", "shape", "reader urword", + "tagged true", "optional true", "construct_package obs", + "construct_data continuous", "parameter_name observations"], + ["block options", "name obs6", "type keyword", "shape", + "in_record true", "reader urword", "tagged true", + "optional false"], + ["block options", "name obs6_filename", "type string", + "preserve_case true", "in_record true", "tagged false", + "reader urword", "optional false"], + ["block options", "name mover", "type keyword", "tagged true", + "reader urword", "optional true"], + ["block options", "name maximum_iterations", "type integer", + "reader urword", "optional true"], + ["block options", "name maximum_depth_change", + "type double precision", "reader urword", "optional true"], + ["block options", "name unit_conversion", + "type double precision", "reader urword", "optional true"], + ["block dimensions", "name nreaches", "type integer", + "reader urword", "optional false"], + ["block packagedata", "name packagedata", + "type recarray rno cellid rlen rwid rgrd rtp rbth rhk man ncon " + "ustrf ndv aux boundname", + "shape (maxbound)", "reader urword"], + ["block packagedata", "name rno", "type integer", "shape", + "tagged false", "in_record true", "reader urword", + "numeric_index true"], + ["block packagedata", "name cellid", "type integer", + "shape (ncelldim)", "tagged false", "in_record true", + "reader urword"], + ["block packagedata", "name rlen", "type double precision", + "shape", "tagged false", "in_record true", "reader urword"], + ["block packagedata", "name rwid", "type double precision", + "shape", "tagged false", "in_record true", "reader urword"], + ["block packagedata", "name rgrd", "type double precision", + "shape", "tagged false", "in_record true", "reader urword"], + ["block packagedata", "name rtp", "type double precision", + "shape", "tagged false", "in_record true", "reader urword"], + ["block packagedata", "name rbth", "type double precision", + "shape", "tagged false", "in_record true", "reader urword"], + ["block packagedata", "name rhk", "type double precision", + "shape", "tagged false", "in_record true", "reader urword"], + ["block packagedata", "name man", "type string", "shape", + "tagged false", "in_record true", "reader urword", + "time_series true"], + ["block packagedata", "name ncon", "type integer", "shape", + "tagged false", "in_record true", "reader urword"], + ["block packagedata", "name ustrf", "type double precision", + "shape", "tagged false", "in_record true", "reader urword"], + ["block packagedata", "name ndv", "type integer", "shape", + "tagged false", "in_record true", "reader urword"], + ["block packagedata", "name aux", "type double precision", + "in_record true", "tagged false", "shape (naux)", "reader urword", + "time_series true", "optional true"], + ["block packagedata", "name boundname", "type string", "shape", + "tagged false", "in_record true", "reader urword", + "optional true"], + ["block connectiondata", "name connectiondata", + "type recarray rno ic", "shape (maxbound)", "reader urword"], + ["block connectiondata", "name rno", "type integer", "shape", + "tagged false", "in_record true", "reader urword", + "numeric_index true"], + ["block connectiondata", "name ic", "type integer", + "shape (ncon(rno))", "tagged false", "in_record true", + "reader urword", "numeric_index true", + "support_negative_index true"], + ["block diversions", "name diversions", + "type recarray rno idv iconr cprior", "shape (maxbound)", + "reader urword"], + ["block diversions", "name rno", "type integer", "shape", + "tagged false", "in_record true", "reader urword", + "numeric_index true"], + ["block diversions", "name idv", "type integer", "shape", + "tagged false", "in_record true", "reader urword", + "numeric_index true"], + ["block diversions", "name iconr", "type integer", "shape", + "tagged false", "in_record true", "reader urword", + "numeric_index true"], + ["block diversions", "name cprior", "type string", "shape", + "tagged false", "in_record true", "reader urword"], + ["block period", "name iper", "type integer", + "block_variable True", "in_record true", "tagged false", "shape", + "valid", "reader urword", "optional false"], + ["block period", "name perioddata", + "type recarray rno sfrsetting", "shape", "reader urword"], + ["block period", "name rno", "type integer", "shape", + "tagged false", "in_record true", "reader urword", + "numeric_index true"], + ["block period", "name sfrsetting", + "type keystring status manning stage inflow rainfall evaporation " + "runoff diversionrecord upstream_fraction auxiliaryrecord", + "shape", "tagged false", "in_record true", "reader urword"], + ["block period", "name status", "type string", "shape", + "tagged true", "in_record true", "reader urword"], + ["block period", "name manning", "type string", "shape", + "tagged true", "in_record true", "reader urword", + "time_series true"], + ["block period", "name stage", "type string", "shape", + "tagged true", "in_record true", "reader urword", + "time_series true"], + ["block period", "name inflow", "type string", "shape", + "tagged true", "in_record true", "reader urword", + "time_series true"], + ["block period", "name rainfall", "type string", "shape", + "tagged true", "in_record true", "reader urword", + "time_series true"], + ["block period", "name evaporation", "type string", "shape", + "tagged true", "in_record true", "reader urword", + "time_series true"], + ["block period", "name runoff", "type string", "shape", + "tagged true", "in_record true", "reader urword", + "time_series true"], + ["block period", "name diversionrecord", + "type record diversion idv divrate", "shape", "tagged", + "in_record true", "reader urword"], + ["block period", "name diversion", "type keyword", "shape", + "in_record true", "reader urword"], + ["block period", "name idv", "type integer", "shape", + "tagged false", "in_record true", "reader urword", + "numeric_index true"], + ["block period", "name divrate", "type double precision", + "shape", "tagged false", "in_record true", "reader urword", + "time_series true"], + ["block period", "name upstream_fraction", + "type double precision", "shape", "tagged true", "in_record true", + "reader urword"], + ["block period", "name auxiliaryrecord", + "type record auxiliary auxname auxval", "shape", "tagged", + "in_record true", "reader urword"], + ["block period", "name auxiliary", "type keyword", "shape", + "in_record true", "reader urword"], + ["block period", "name auxname", "type string", "shape", + "tagged false", "in_record true", "reader urword"], + ["block period", "name auxval", "type double precision", "shape", + "tagged false", "in_record true", "reader urword", + "time_series true"]] + + def __init__(self, model, loading_package=False, auxiliary=None, + boundnames=None, print_input=None, print_stage=None, + print_flows=None, save_flows=None, stage_filerecord=None, + budget_filerecord=None, timeseries=None, observations=None, + mover=None, maximum_iterations=None, + maximum_depth_change=None, unit_conversion=None, + nreaches=None, packagedata=None, connectiondata=None, + diversions=None, perioddata=None, filename=None, pname=None, + parent_file=None): + super(ModflowGwfsfr, self).__init__(model, "sfr", filename, pname, + loading_package, parent_file) + + # set up variables + self.auxiliary = self.build_mfdata("auxiliary", auxiliary) + self.boundnames = self.build_mfdata("boundnames", boundnames) + self.print_input = self.build_mfdata("print_input", print_input) + self.print_stage = self.build_mfdata("print_stage", print_stage) + self.print_flows = self.build_mfdata("print_flows", print_flows) + self.save_flows = self.build_mfdata("save_flows", save_flows) + self.stage_filerecord = self.build_mfdata("stage_filerecord", + stage_filerecord) + self.budget_filerecord = self.build_mfdata("budget_filerecord", + budget_filerecord) + self._ts_filerecord = self.build_mfdata("ts_filerecord", + None) + self._ts_package = self.build_child_package("ts", timeseries, + "timeseries", + self._ts_filerecord) + self._obs_filerecord = self.build_mfdata("obs_filerecord", + None) + self._obs_package = self.build_child_package("obs", observations, + "continuous", + self._obs_filerecord) + self.mover = self.build_mfdata("mover", mover) + self.maximum_iterations = self.build_mfdata("maximum_iterations", + maximum_iterations) + self.maximum_depth_change = self.build_mfdata("maximum_depth_change", + maximum_depth_change) + self.unit_conversion = self.build_mfdata("unit_conversion", + unit_conversion) + self.nreaches = self.build_mfdata("nreaches", nreaches) + self.packagedata = self.build_mfdata("packagedata", packagedata) + self.connectiondata = self.build_mfdata("connectiondata", + connectiondata) + self.diversions = self.build_mfdata("diversions", diversions) + self.perioddata = self.build_mfdata("perioddata", perioddata) + self._init_complete = True diff --git a/flopy/mf6/modflow/mfgwfuzf.py b/flopy/mf6/modflow/mfgwfuzf.py index 7869d8391b..7cc2eaa3a8 100644 --- a/flopy/mf6/modflow/mfgwfuzf.py +++ b/flopy/mf6/modflow/mfgwfuzf.py @@ -1,430 +1,430 @@ -# DO NOT MODIFY THIS FILE DIRECTLY. THIS FILE MUST BE CREATED BY -# mf6/utils/createpackages.py -from .. import mfpackage -from ..data.mfdatautil import ListTemplateGenerator - - -class ModflowGwfuzf(mfpackage.MFPackage): - """ - ModflowGwfuzf defines a uzf package within a gwf6 model. - - Parameters - ---------- - model : MFModel - Model that this package is a part of. Package is automatically - added to model when it is initialized. - loading_package : bool - Do not set this parameter. It is intended for debugging and internal - processing purposes only. - auxiliary : [string] - * auxiliary (string) defines an array of one or more auxiliary variable - names. There is no limit on the number of auxiliary variables that - can be provided on this line; however, lists of information provided - in subsequent blocks must have a column of data for each auxiliary - variable name defined here. The number of auxiliary variables - detected on this line determines the value for naux. Comments cannot - be provided anywhere on this line as they will be interpreted as - auxiliary variable names. Auxiliary variables may not be used by the - package, but they will be available for use by other parts of the - program. The program will terminate with an error if auxiliary - variables are specified on more than one line in the options block. - auxmultname : string - * auxmultname (string) name of auxiliary variable to be used as - multiplier of GWF cell area used by UZF cell. - boundnames : boolean - * boundnames (boolean) keyword to indicate that boundary names may be - provided with the list of UZF cells. - print_input : boolean - * print_input (boolean) keyword to indicate that the list of UZF - information will be written to the listing file immediately after it - is read. - print_flows : boolean - * print_flows (boolean) keyword to indicate that the list of UZF flow - rates will be printed to the listing file for every stress period - time step in which "BUDGET PRINT" is specified in Output Control. If - there is no Output Control option and "PRINT_FLOWS" is specified, - then flow rates are printed for the last time step of each stress - period. - save_flows : boolean - * save_flows (boolean) keyword to indicate that UZF flow terms will be - written to the file specified with "BUDGET FILEOUT" in Output - Control. - budget_filerecord : [budgetfile] - * budgetfile (string) name of the binary output file to write budget - information. - timeseries : {varname:data} or timeseries data - * Contains data for the ts package. Data can be stored in a dictionary - containing data for the ts package with variable names as keys and - package data as values. Data just for the timeseries variable is also - acceptable. See ts package documentation for more information. - observations : {varname:data} or continuous data - * Contains data for the obs package. Data can be stored in a dictionary - containing data for the obs package with variable names as keys and - package data as values. Data just for the observations variable is - also acceptable. See obs package documentation for more information. - mover : boolean - * mover (boolean) keyword to indicate that this instance of the UZF - Package can be used with the Water Mover (MVR) Package. When the - MOVER option is specified, additional memory is allocated within the - package to store the available, provided, and received water. - simulate_et : boolean - * simulate_et (boolean) keyword specifying that ET in the unsaturated - (UZF) and saturated zones (GWF) will be simulated. ET can be - simulated in the UZF cell and not the GWF cell by omitting keywords - LINEAR_GWET and SQUARE_GWET. - linear_gwet : boolean - * linear_gwet (boolean) keyword specifying that groundwater ET will be - simulated using the original ET formulation of MODFLOW-2005. - square_gwet : boolean - * square_gwet (boolean) keyword specifying that groundwater ET will be - simulated by assuming a constant ET rate for groundwater levels - between land surface (TOP) and land surface minus the ET extinction - depth (TOP-EXTDP). Groundwater ET is smoothly reduced from the PET - rate to zero over a nominal interval at TOP-EXTDP. - simulate_gwseep : boolean - * simulate_gwseep (boolean) keyword specifying that groundwater - discharge (GWSEEP) to land surface will be simulated. Groundwater - discharge is nonzero when groundwater head is greater than land - surface. - unsat_etwc : boolean - * unsat_etwc (boolean) keyword specifying that ET in the unsaturated - zone will be simulated as a function of the specified PET rate while - the water content (THETA) is greater than the ET extinction water - content (EXTWC). - unsat_etae : boolean - * unsat_etae (boolean) keyword specifying that ET in the unsaturated - zone will be simulated simulated using a capillary pressure based - formulation. Capillary pressure is calculated using the Brooks-Corey - retention function. - nuzfcells : integer - * nuzfcells (integer) is the number of UZF cells. More than one UZF - cell can be assigned to a GWF cell; however, only one GWF cell can be - assigned to a single UZF cell. If more than one UZF cell is assigned - to a GWF cell, then an auxiliary variable should be used to reduce - the surface area of the UZF cell with the AUXMULTNAME option. - ntrailwaves : integer - * ntrailwaves (integer) is the number of trailing waves. A recommended - value of 7 can be used for NTRAILWAVES. This value can be increased - to lower mass balance error in the unsaturated zone. - nwavesets : integer - * nwavesets (integer) is the number of wave sets. A recommended value - of 40 can be used for NWAVESETS. This value can be increased if more - waves are required to resolve variations in water content within the - unsaturated zone. - packagedata : [iuzno, cellid, landflag, ivertcon, surfdep, vks, thtr, thts, - thti, eps, boundname] - * iuzno (integer) integer value that defines the UZF cell number - associated with the specified PACKAGEDATA data on the line. IUZNO - must be greater than zero and less than or equal to NUZFCELLS. UZF - information must be specified for every UZF cell or the program will - terminate with an error. The program will also terminate with an - error if information for a UZF cell is specified more than once. This - argument is an index variable, which means that it should be treated - as zero-based when working with FloPy and Python. Flopy will - automatically subtract one when loading index variables and add one - when writing index variables. - * cellid ((integer, ...)) is the cell identifier, and depends on the - type of grid that is used for the simulation. For a structured grid - that uses the DIS input file, CELLID is the layer, row, and column. - For a grid that uses the DISV input file, CELLID is the layer and - CELL2D number. If the model uses the unstructured discretization - (DISU) input file, CELLID is the node number for the cell. This - argument is an index variable, which means that it should be treated - as zero-based when working with FloPy and Python. Flopy will - automatically subtract one when loading index variables and add one - when writing index variables. - * landflag (integer) integer value set to one for land surface cells - indicating that boundary conditions can be applied and data can be - specified in the PERIOD block. A value of 0 specifies a non-land - surface cell. - * ivertcon (integer) integer value set to specify underlying UZF cell - that receives water flowing to bottom of cell. If unsaturated zone - flow reaches the water table before the cell bottom, then water is - added to the GWF cell instead of flowing to the underlying UZF cell. - A value of 0 indicates the UZF cell is not connected to an underlying - UZF cell. This argument is an index variable, which means that it - should be treated as zero-based when working with FloPy and Python. - Flopy will automatically subtract one when loading index variables - and add one when writing index variables. - * surfdep (double) is the surface depression depth of the UZF cell. - * vks (double) is the vertical saturated hydraulic conductivity of the - UZF cell. - * thtr (double) is the residual (irreducible) water content of the UZF - cell. - * thts (double) is the saturated water content of the UZF cell. - * thti (double) is the initial water content of the UZF cell. - * eps (double) is the epsilon exponent of the UZF cell. - * boundname (string) name of the UZF cell cell. BOUNDNAME is an ASCII - character variable that can contain as many as 40 characters. If - BOUNDNAME contains spaces in it, then the entire name must be - enclosed within single quotes. - perioddata : [iuzno, finf, pet, extdp, extwc, ha, hroot, rootact, aux] - * iuzno (integer) integer value that defines the UZF cell number - associated with the specified PERIOD data on the line. This argument - is an index variable, which means that it should be treated as zero- - based when working with FloPy and Python. Flopy will automatically - subtract one when loading index variables and add one when writing - index variables. - * finf (string) real or character value that defines the applied - infiltration rate of the UZF cell (:math:`LT^{-1}`). If the Options - block includes a TIMESERIESFILE entry (see the "Time-Variable Input" - section), values can be obtained from a time series by entering the - time-series name in place of a numeric value. - * pet (string) real or character value that defines the potential - evapotranspiration rate of the UZF cell and specified GWF cell. - Evapotranspiration is first removed from the unsaturated zone and any - remaining potential evapotranspiration is applied to the saturated - zone. If IVERTCON is greater than zero then residual potential - evapotranspiration not satisfied in the UZF cell is applied to the - underlying UZF and GWF cells. PET is always specified, but is only - used if SIMULATE_ET is specified in the OPTIONS block. If the Options - block includes a TIMESERIESFILE entry (see the "Time-Variable Input" - section), values can be obtained from a time series by entering the - time-series name in place of a numeric value. - * extdp (string) real or character value that defines the - evapotranspiration extinction depth of the UZF cell. If IVERTCON is - greater than zero and EXTDP extends below the GWF cell bottom then - remaining potential evapotranspiration is applied to the underlying - UZF and GWF cells. EXTDP is always specified, but is only used if - SIMULATE_ET is specified in the OPTIONS block. If the Options block - includes a TIMESERIESFILE entry (see the "Time-Variable Input" - section), values can be obtained from a time series by entering the - time-series name in place of a numeric value. - * extwc (string) real or character value that defines the - evapotranspiration extinction water content of the UZF cell. EXTWC is - always specified, but is only used if SIMULATE_ET and UNSAT_ETWC are - specified in the OPTIONS block. If the Options block includes a - TIMESERIESFILE entry (see the "Time-Variable Input" section), values - can be obtained from a time series by entering the time-series name - in place of a numeric value. - * ha (string) real or character value that defines the air entry - potential (head) of the UZF cell. HA is always specified, but is only - used if SIMULATE_ET and UNSAT_ETAE are specified in the OPTIONS - block. If the Options block includes a TIMESERIESFILE entry (see the - "Time-Variable Input" section), values can be obtained from a time - series by entering the time-series name in place of a numeric value. - * hroot (string) real or character value that defines the root - potential (head) of the UZF cell. HROOT is always specified, but is - only used if SIMULATE_ET and UNSAT_ETAE are specified in the OPTIONS - block. If the Options block includes a TIMESERIESFILE entry (see the - "Time-Variable Input" section), values can be obtained from a time - series by entering the time-series name in place of a numeric value. - * rootact (string) real or character value that defines the root - activity function of the UZF cell. ROOTACT is the length of roots in - a given volume of soil divided by that volume. Values range from 0 to - about 3 :math:`cm^{-2}`, depending on the plant community and its - stage of development. ROOTACT is always specified, but is only used - if SIMULATE\_ET and UNSAT\_ETAE are specified in the OPTIONS block. - If the Options block includes a TIMESERIESFILE entry (see the "Time- - Variable Input" section), values can be obtained from a time series - by entering the time-series name in place of a numeric value. - * aux (double) represents the values of the auxiliary variables for - each UZF. The values of auxiliary variables must be present for each - UZF. The values must be specified in the order of the auxiliary - variables specified in the OPTIONS block. If the package supports - time series and the Options block includes a TIMESERIESFILE entry - (see the "Time-Variable Input" section), values can be obtained from - a time series by entering the time-series name in place of a numeric - value. - filename : String - File name for this package. - pname : String - Package name for this package. - parent_file : MFPackage - Parent package file that references this package. Only needed for - utility packages (mfutl*). For example, mfutllaktab package must have - a mfgwflak package parent_file. - - """ - auxiliary = ListTemplateGenerator(('gwf6', 'uzf', 'options', - 'auxiliary')) - budget_filerecord = ListTemplateGenerator(('gwf6', 'uzf', 'options', - 'budget_filerecord')) - ts_filerecord = ListTemplateGenerator(('gwf6', 'uzf', 'options', - 'ts_filerecord')) - obs_filerecord = ListTemplateGenerator(('gwf6', 'uzf', 'options', - 'obs_filerecord')) - packagedata = ListTemplateGenerator(('gwf6', 'uzf', 'packagedata', - 'packagedata')) - perioddata = ListTemplateGenerator(('gwf6', 'uzf', 'period', - 'perioddata')) - package_abbr = "gwfuzf" - _package_type = "uzf" - dfn_file_name = "gwf-uzf.dfn" - - dfn = [["block options", "name auxiliary", "type string", - "shape (naux)", "reader urword", "optional true"], - ["block options", "name auxmultname", "type string", "shape", - "reader urword", "optional true"], - ["block options", "name boundnames", "type keyword", "shape", - "reader urword", "optional true"], - ["block options", "name print_input", "type keyword", - "reader urword", "optional true"], - ["block options", "name print_flows", "type keyword", - "reader urword", "optional true"], - ["block options", "name save_flows", "type keyword", - "reader urword", "optional true"], - ["block options", "name budget_filerecord", - "type record budget fileout budgetfile", "shape", "reader urword", - "tagged true", "optional true"], - ["block options", "name budget", "type keyword", "shape", - "in_record true", "reader urword", "tagged true", - "optional false"], - ["block options", "name fileout", "type keyword", "shape", - "in_record true", "reader urword", "tagged true", - "optional false"], - ["block options", "name budgetfile", "preserve_case true", - "type string", "shape", "in_record true", "reader urword", - "tagged false", "optional false"], - ["block options", "name ts_filerecord", - "type record ts6 filein ts6_filename", "shape", "reader urword", - "tagged true", "optional true", "construct_package ts", - "construct_data timeseries", "parameter_name timeseries"], - ["block options", "name ts6", "type keyword", "shape", - "in_record true", "reader urword", "tagged true", - "optional false"], - ["block options", "name filein", "type keyword", "shape", - "in_record true", "reader urword", "tagged true", - "optional false"], - ["block options", "name ts6_filename", "type string", - "preserve_case true", "in_record true", "reader urword", - "optional false", "tagged false"], - ["block options", "name obs_filerecord", - "type record obs6 filein obs6_filename", "shape", "reader urword", - "tagged true", "optional true", "construct_package obs", - "construct_data continuous", "parameter_name observations"], - ["block options", "name obs6", "type keyword", "shape", - "in_record true", "reader urword", "tagged true", - "optional false"], - ["block options", "name obs6_filename", "type string", - "preserve_case true", "in_record true", "tagged false", - "reader urword", "optional false"], - ["block options", "name mover", "type keyword", "tagged true", - "reader urword", "optional true"], - ["block options", "name simulate_et", "type keyword", - "tagged true", "reader urword", "optional true"], - ["block options", "name linear_gwet", "type keyword", - "tagged true", "reader urword", "optional true"], - ["block options", "name square_gwet", "type keyword", - "tagged true", "reader urword", "optional true"], - ["block options", "name simulate_gwseep", "type keyword", - "tagged true", "reader urword", "optional true"], - ["block options", "name unsat_etwc", "type keyword", - "tagged true", "reader urword", "optional true"], - ["block options", "name unsat_etae", "type keyword", - "tagged true", "reader urword", "optional true"], - ["block dimensions", "name nuzfcells", "type integer", - "reader urword", "optional false"], - ["block dimensions", "name ntrailwaves", "type integer", - "reader urword", "optional false"], - ["block dimensions", "name nwavesets", "type integer", - "reader urword", "optional false"], - ["block packagedata", "name packagedata", - "type recarray iuzno cellid landflag ivertcon surfdep vks thtr " - "thts thti eps boundname", - "shape (nuzfcells)", "reader urword"], - ["block packagedata", "name iuzno", "type integer", "shape", - "tagged false", "in_record true", "reader urword", - "numeric_index true"], - ["block packagedata", "name cellid", "type integer", - "shape (ncelldim)", "tagged false", "in_record true", - "reader urword"], - ["block packagedata", "name landflag", "type integer", "shape", - "tagged false", "in_record true", "reader urword"], - ["block packagedata", "name ivertcon", "type integer", "shape", - "tagged false", "in_record true", "reader urword", - "numeric_index true"], - ["block packagedata", "name surfdep", "type double precision", - "shape", "tagged false", "in_record true", "reader urword"], - ["block packagedata", "name vks", "type double precision", - "shape", "tagged false", "in_record true", "reader urword"], - ["block packagedata", "name thtr", "type double precision", - "shape", "tagged false", "in_record true", "reader urword"], - ["block packagedata", "name thts", "type double precision", - "shape", "tagged false", "in_record true", "reader urword"], - ["block packagedata", "name thti", "type double precision", - "shape", "tagged false", "in_record true", "reader urword"], - ["block packagedata", "name eps", "type double precision", - "shape", "tagged false", "in_record true", "reader urword"], - ["block packagedata", "name boundname", "type string", "shape", - "tagged false", "in_record true", "reader urword", - "optional true"], - ["block period", "name iper", "type integer", - "block_variable True", "in_record true", "tagged false", "shape", - "valid", "reader urword", "optional false"], - ["block period", "name perioddata", - "type recarray iuzno finf pet extdp extwc ha hroot rootact aux", - "shape", "reader urword"], - ["block period", "name iuzno", "type integer", "shape", - "tagged false", "in_record true", "reader urword", - "numeric_index true"], - ["block period", "name finf", "type string", "shape", - "tagged false", "in_record true", "time_series true", - "reader urword"], - ["block period", "name pet", "type string", "shape", - "tagged false", "in_record true", "reader urword", - "time_series true"], - ["block period", "name extdp", "type string", "shape", - "tagged false", "in_record true", "reader urword", - "time_series true"], - ["block period", "name extwc", "type string", "shape", - "tagged false", "in_record true", "reader urword", - "time_series true"], - ["block period", "name ha", "type string", "shape", - "tagged false", "in_record true", "time_series true", - "reader urword"], - ["block period", "name hroot", "type string", "shape", - "tagged false", "in_record true", "reader urword", - "time_series true"], - ["block period", "name rootact", "type string", "shape", - "tagged false", "in_record true", "reader urword", - "time_series true"], - ["block period", "name aux", "type double precision", - "in_record true", "tagged false", "shape (naux)", "reader urword", - "time_series true", "optional true"]] - - def __init__(self, model, loading_package=False, auxiliary=None, - auxmultname=None, boundnames=None, print_input=None, - print_flows=None, save_flows=None, budget_filerecord=None, - timeseries=None, observations=None, mover=None, - simulate_et=None, linear_gwet=None, square_gwet=None, - simulate_gwseep=None, unsat_etwc=None, unsat_etae=None, - nuzfcells=None, ntrailwaves=None, nwavesets=None, - packagedata=None, perioddata=None, filename=None, pname=None, - parent_file=None): - super(ModflowGwfuzf, self).__init__(model, "uzf", filename, pname, - loading_package, parent_file) - - # set up variables - self.auxiliary = self.build_mfdata("auxiliary", auxiliary) - self.auxmultname = self.build_mfdata("auxmultname", auxmultname) - self.boundnames = self.build_mfdata("boundnames", boundnames) - self.print_input = self.build_mfdata("print_input", print_input) - self.print_flows = self.build_mfdata("print_flows", print_flows) - self.save_flows = self.build_mfdata("save_flows", save_flows) - self.budget_filerecord = self.build_mfdata("budget_filerecord", - budget_filerecord) - self._ts_filerecord = self.build_mfdata("ts_filerecord", - None) - self._ts_package = self.build_child_package("ts", timeseries, - "timeseries", - self._ts_filerecord) - self._obs_filerecord = self.build_mfdata("obs_filerecord", - None) - self._obs_package = self.build_child_package("obs", observations, - "continuous", - self._obs_filerecord) - self.mover = self.build_mfdata("mover", mover) - self.simulate_et = self.build_mfdata("simulate_et", simulate_et) - self.linear_gwet = self.build_mfdata("linear_gwet", linear_gwet) - self.square_gwet = self.build_mfdata("square_gwet", square_gwet) - self.simulate_gwseep = self.build_mfdata("simulate_gwseep", - simulate_gwseep) - self.unsat_etwc = self.build_mfdata("unsat_etwc", unsat_etwc) - self.unsat_etae = self.build_mfdata("unsat_etae", unsat_etae) - self.nuzfcells = self.build_mfdata("nuzfcells", nuzfcells) - self.ntrailwaves = self.build_mfdata("ntrailwaves", ntrailwaves) - self.nwavesets = self.build_mfdata("nwavesets", nwavesets) - self.packagedata = self.build_mfdata("packagedata", packagedata) - self.perioddata = self.build_mfdata("perioddata", perioddata) - self._init_complete = True +# DO NOT MODIFY THIS FILE DIRECTLY. THIS FILE MUST BE CREATED BY +# mf6/utils/createpackages.py +from .. import mfpackage +from ..data.mfdatautil import ListTemplateGenerator + + +class ModflowGwfuzf(mfpackage.MFPackage): + """ + ModflowGwfuzf defines a uzf package within a gwf6 model. + + Parameters + ---------- + model : MFModel + Model that this package is a part of. Package is automatically + added to model when it is initialized. + loading_package : bool + Do not set this parameter. It is intended for debugging and internal + processing purposes only. + auxiliary : [string] + * auxiliary (string) defines an array of one or more auxiliary variable + names. There is no limit on the number of auxiliary variables that + can be provided on this line; however, lists of information provided + in subsequent blocks must have a column of data for each auxiliary + variable name defined here. The number of auxiliary variables + detected on this line determines the value for naux. Comments cannot + be provided anywhere on this line as they will be interpreted as + auxiliary variable names. Auxiliary variables may not be used by the + package, but they will be available for use by other parts of the + program. The program will terminate with an error if auxiliary + variables are specified on more than one line in the options block. + auxmultname : string + * auxmultname (string) name of auxiliary variable to be used as + multiplier of GWF cell area used by UZF cell. + boundnames : boolean + * boundnames (boolean) keyword to indicate that boundary names may be + provided with the list of UZF cells. + print_input : boolean + * print_input (boolean) keyword to indicate that the list of UZF + information will be written to the listing file immediately after it + is read. + print_flows : boolean + * print_flows (boolean) keyword to indicate that the list of UZF flow + rates will be printed to the listing file for every stress period + time step in which "BUDGET PRINT" is specified in Output Control. If + there is no Output Control option and "PRINT_FLOWS" is specified, + then flow rates are printed for the last time step of each stress + period. + save_flows : boolean + * save_flows (boolean) keyword to indicate that UZF flow terms will be + written to the file specified with "BUDGET FILEOUT" in Output + Control. + budget_filerecord : [budgetfile] + * budgetfile (string) name of the binary output file to write budget + information. + timeseries : {varname:data} or timeseries data + * Contains data for the ts package. Data can be stored in a dictionary + containing data for the ts package with variable names as keys and + package data as values. Data just for the timeseries variable is also + acceptable. See ts package documentation for more information. + observations : {varname:data} or continuous data + * Contains data for the obs package. Data can be stored in a dictionary + containing data for the obs package with variable names as keys and + package data as values. Data just for the observations variable is + also acceptable. See obs package documentation for more information. + mover : boolean + * mover (boolean) keyword to indicate that this instance of the UZF + Package can be used with the Water Mover (MVR) Package. When the + MOVER option is specified, additional memory is allocated within the + package to store the available, provided, and received water. + simulate_et : boolean + * simulate_et (boolean) keyword specifying that ET in the unsaturated + (UZF) and saturated zones (GWF) will be simulated. ET can be + simulated in the UZF cell and not the GWF cell by omitting keywords + LINEAR_GWET and SQUARE_GWET. + linear_gwet : boolean + * linear_gwet (boolean) keyword specifying that groundwater ET will be + simulated using the original ET formulation of MODFLOW-2005. + square_gwet : boolean + * square_gwet (boolean) keyword specifying that groundwater ET will be + simulated by assuming a constant ET rate for groundwater levels + between land surface (TOP) and land surface minus the ET extinction + depth (TOP-EXTDP). Groundwater ET is smoothly reduced from the PET + rate to zero over a nominal interval at TOP-EXTDP. + simulate_gwseep : boolean + * simulate_gwseep (boolean) keyword specifying that groundwater + discharge (GWSEEP) to land surface will be simulated. Groundwater + discharge is nonzero when groundwater head is greater than land + surface. + unsat_etwc : boolean + * unsat_etwc (boolean) keyword specifying that ET in the unsaturated + zone will be simulated as a function of the specified PET rate while + the water content (THETA) is greater than the ET extinction water + content (EXTWC). + unsat_etae : boolean + * unsat_etae (boolean) keyword specifying that ET in the unsaturated + zone will be simulated simulated using a capillary pressure based + formulation. Capillary pressure is calculated using the Brooks-Corey + retention function. + nuzfcells : integer + * nuzfcells (integer) is the number of UZF cells. More than one UZF + cell can be assigned to a GWF cell; however, only one GWF cell can be + assigned to a single UZF cell. If more than one UZF cell is assigned + to a GWF cell, then an auxiliary variable should be used to reduce + the surface area of the UZF cell with the AUXMULTNAME option. + ntrailwaves : integer + * ntrailwaves (integer) is the number of trailing waves. A recommended + value of 7 can be used for NTRAILWAVES. This value can be increased + to lower mass balance error in the unsaturated zone. + nwavesets : integer + * nwavesets (integer) is the number of wave sets. A recommended value + of 40 can be used for NWAVESETS. This value can be increased if more + waves are required to resolve variations in water content within the + unsaturated zone. + packagedata : [iuzno, cellid, landflag, ivertcon, surfdep, vks, thtr, thts, + thti, eps, boundname] + * iuzno (integer) integer value that defines the UZF cell number + associated with the specified PACKAGEDATA data on the line. IUZNO + must be greater than zero and less than or equal to NUZFCELLS. UZF + information must be specified for every UZF cell or the program will + terminate with an error. The program will also terminate with an + error if information for a UZF cell is specified more than once. This + argument is an index variable, which means that it should be treated + as zero-based when working with FloPy and Python. Flopy will + automatically subtract one when loading index variables and add one + when writing index variables. + * cellid ((integer, ...)) is the cell identifier, and depends on the + type of grid that is used for the simulation. For a structured grid + that uses the DIS input file, CELLID is the layer, row, and column. + For a grid that uses the DISV input file, CELLID is the layer and + CELL2D number. If the model uses the unstructured discretization + (DISU) input file, CELLID is the node number for the cell. This + argument is an index variable, which means that it should be treated + as zero-based when working with FloPy and Python. Flopy will + automatically subtract one when loading index variables and add one + when writing index variables. + * landflag (integer) integer value set to one for land surface cells + indicating that boundary conditions can be applied and data can be + specified in the PERIOD block. A value of 0 specifies a non-land + surface cell. + * ivertcon (integer) integer value set to specify underlying UZF cell + that receives water flowing to bottom of cell. If unsaturated zone + flow reaches the water table before the cell bottom, then water is + added to the GWF cell instead of flowing to the underlying UZF cell. + A value of 0 indicates the UZF cell is not connected to an underlying + UZF cell. This argument is an index variable, which means that it + should be treated as zero-based when working with FloPy and Python. + Flopy will automatically subtract one when loading index variables + and add one when writing index variables. + * surfdep (double) is the surface depression depth of the UZF cell. + * vks (double) is the vertical saturated hydraulic conductivity of the + UZF cell. + * thtr (double) is the residual (irreducible) water content of the UZF + cell. + * thts (double) is the saturated water content of the UZF cell. + * thti (double) is the initial water content of the UZF cell. + * eps (double) is the epsilon exponent of the UZF cell. + * boundname (string) name of the UZF cell cell. BOUNDNAME is an ASCII + character variable that can contain as many as 40 characters. If + BOUNDNAME contains spaces in it, then the entire name must be + enclosed within single quotes. + perioddata : [iuzno, finf, pet, extdp, extwc, ha, hroot, rootact, aux] + * iuzno (integer) integer value that defines the UZF cell number + associated with the specified PERIOD data on the line. This argument + is an index variable, which means that it should be treated as zero- + based when working with FloPy and Python. Flopy will automatically + subtract one when loading index variables and add one when writing + index variables. + * finf (string) real or character value that defines the applied + infiltration rate of the UZF cell (:math:`LT^{-1}`). If the Options + block includes a TIMESERIESFILE entry (see the "Time-Variable Input" + section), values can be obtained from a time series by entering the + time-series name in place of a numeric value. + * pet (string) real or character value that defines the potential + evapotranspiration rate of the UZF cell and specified GWF cell. + Evapotranspiration is first removed from the unsaturated zone and any + remaining potential evapotranspiration is applied to the saturated + zone. If IVERTCON is greater than zero then residual potential + evapotranspiration not satisfied in the UZF cell is applied to the + underlying UZF and GWF cells. PET is always specified, but is only + used if SIMULATE_ET is specified in the OPTIONS block. If the Options + block includes a TIMESERIESFILE entry (see the "Time-Variable Input" + section), values can be obtained from a time series by entering the + time-series name in place of a numeric value. + * extdp (string) real or character value that defines the + evapotranspiration extinction depth of the UZF cell. If IVERTCON is + greater than zero and EXTDP extends below the GWF cell bottom then + remaining potential evapotranspiration is applied to the underlying + UZF and GWF cells. EXTDP is always specified, but is only used if + SIMULATE_ET is specified in the OPTIONS block. If the Options block + includes a TIMESERIESFILE entry (see the "Time-Variable Input" + section), values can be obtained from a time series by entering the + time-series name in place of a numeric value. + * extwc (string) real or character value that defines the + evapotranspiration extinction water content of the UZF cell. EXTWC is + always specified, but is only used if SIMULATE_ET and UNSAT_ETWC are + specified in the OPTIONS block. If the Options block includes a + TIMESERIESFILE entry (see the "Time-Variable Input" section), values + can be obtained from a time series by entering the time-series name + in place of a numeric value. + * ha (string) real or character value that defines the air entry + potential (head) of the UZF cell. HA is always specified, but is only + used if SIMULATE_ET and UNSAT_ETAE are specified in the OPTIONS + block. If the Options block includes a TIMESERIESFILE entry (see the + "Time-Variable Input" section), values can be obtained from a time + series by entering the time-series name in place of a numeric value. + * hroot (string) real or character value that defines the root + potential (head) of the UZF cell. HROOT is always specified, but is + only used if SIMULATE_ET and UNSAT_ETAE are specified in the OPTIONS + block. If the Options block includes a TIMESERIESFILE entry (see the + "Time-Variable Input" section), values can be obtained from a time + series by entering the time-series name in place of a numeric value. + * rootact (string) real or character value that defines the root + activity function of the UZF cell. ROOTACT is the length of roots in + a given volume of soil divided by that volume. Values range from 0 to + about 3 :math:`cm^{-2}`, depending on the plant community and its + stage of development. ROOTACT is always specified, but is only used + if SIMULATE\_ET and UNSAT\_ETAE are specified in the OPTIONS block. + If the Options block includes a TIMESERIESFILE entry (see the "Time- + Variable Input" section), values can be obtained from a time series + by entering the time-series name in place of a numeric value. + * aux (double) represents the values of the auxiliary variables for + each UZF. The values of auxiliary variables must be present for each + UZF. The values must be specified in the order of the auxiliary + variables specified in the OPTIONS block. If the package supports + time series and the Options block includes a TIMESERIESFILE entry + (see the "Time-Variable Input" section), values can be obtained from + a time series by entering the time-series name in place of a numeric + value. + filename : String + File name for this package. + pname : String + Package name for this package. + parent_file : MFPackage + Parent package file that references this package. Only needed for + utility packages (mfutl*). For example, mfutllaktab package must have + a mfgwflak package parent_file. + + """ + auxiliary = ListTemplateGenerator(('gwf6', 'uzf', 'options', + 'auxiliary')) + budget_filerecord = ListTemplateGenerator(('gwf6', 'uzf', 'options', + 'budget_filerecord')) + ts_filerecord = ListTemplateGenerator(('gwf6', 'uzf', 'options', + 'ts_filerecord')) + obs_filerecord = ListTemplateGenerator(('gwf6', 'uzf', 'options', + 'obs_filerecord')) + packagedata = ListTemplateGenerator(('gwf6', 'uzf', 'packagedata', + 'packagedata')) + perioddata = ListTemplateGenerator(('gwf6', 'uzf', 'period', + 'perioddata')) + package_abbr = "gwfuzf" + _package_type = "uzf" + dfn_file_name = "gwf-uzf.dfn" + + dfn = [["block options", "name auxiliary", "type string", + "shape (naux)", "reader urword", "optional true"], + ["block options", "name auxmultname", "type string", "shape", + "reader urword", "optional true"], + ["block options", "name boundnames", "type keyword", "shape", + "reader urword", "optional true"], + ["block options", "name print_input", "type keyword", + "reader urword", "optional true"], + ["block options", "name print_flows", "type keyword", + "reader urword", "optional true"], + ["block options", "name save_flows", "type keyword", + "reader urword", "optional true"], + ["block options", "name budget_filerecord", + "type record budget fileout budgetfile", "shape", "reader urword", + "tagged true", "optional true"], + ["block options", "name budget", "type keyword", "shape", + "in_record true", "reader urword", "tagged true", + "optional false"], + ["block options", "name fileout", "type keyword", "shape", + "in_record true", "reader urword", "tagged true", + "optional false"], + ["block options", "name budgetfile", "preserve_case true", + "type string", "shape", "in_record true", "reader urword", + "tagged false", "optional false"], + ["block options", "name ts_filerecord", + "type record ts6 filein ts6_filename", "shape", "reader urword", + "tagged true", "optional true", "construct_package ts", + "construct_data timeseries", "parameter_name timeseries"], + ["block options", "name ts6", "type keyword", "shape", + "in_record true", "reader urword", "tagged true", + "optional false"], + ["block options", "name filein", "type keyword", "shape", + "in_record true", "reader urword", "tagged true", + "optional false"], + ["block options", "name ts6_filename", "type string", + "preserve_case true", "in_record true", "reader urword", + "optional false", "tagged false"], + ["block options", "name obs_filerecord", + "type record obs6 filein obs6_filename", "shape", "reader urword", + "tagged true", "optional true", "construct_package obs", + "construct_data continuous", "parameter_name observations"], + ["block options", "name obs6", "type keyword", "shape", + "in_record true", "reader urword", "tagged true", + "optional false"], + ["block options", "name obs6_filename", "type string", + "preserve_case true", "in_record true", "tagged false", + "reader urword", "optional false"], + ["block options", "name mover", "type keyword", "tagged true", + "reader urword", "optional true"], + ["block options", "name simulate_et", "type keyword", + "tagged true", "reader urword", "optional true"], + ["block options", "name linear_gwet", "type keyword", + "tagged true", "reader urword", "optional true"], + ["block options", "name square_gwet", "type keyword", + "tagged true", "reader urword", "optional true"], + ["block options", "name simulate_gwseep", "type keyword", + "tagged true", "reader urword", "optional true"], + ["block options", "name unsat_etwc", "type keyword", + "tagged true", "reader urword", "optional true"], + ["block options", "name unsat_etae", "type keyword", + "tagged true", "reader urword", "optional true"], + ["block dimensions", "name nuzfcells", "type integer", + "reader urword", "optional false"], + ["block dimensions", "name ntrailwaves", "type integer", + "reader urword", "optional false"], + ["block dimensions", "name nwavesets", "type integer", + "reader urword", "optional false"], + ["block packagedata", "name packagedata", + "type recarray iuzno cellid landflag ivertcon surfdep vks thtr " + "thts thti eps boundname", + "shape (nuzfcells)", "reader urword"], + ["block packagedata", "name iuzno", "type integer", "shape", + "tagged false", "in_record true", "reader urword", + "numeric_index true"], + ["block packagedata", "name cellid", "type integer", + "shape (ncelldim)", "tagged false", "in_record true", + "reader urword"], + ["block packagedata", "name landflag", "type integer", "shape", + "tagged false", "in_record true", "reader urword"], + ["block packagedata", "name ivertcon", "type integer", "shape", + "tagged false", "in_record true", "reader urword", + "numeric_index true"], + ["block packagedata", "name surfdep", "type double precision", + "shape", "tagged false", "in_record true", "reader urword"], + ["block packagedata", "name vks", "type double precision", + "shape", "tagged false", "in_record true", "reader urword"], + ["block packagedata", "name thtr", "type double precision", + "shape", "tagged false", "in_record true", "reader urword"], + ["block packagedata", "name thts", "type double precision", + "shape", "tagged false", "in_record true", "reader urword"], + ["block packagedata", "name thti", "type double precision", + "shape", "tagged false", "in_record true", "reader urword"], + ["block packagedata", "name eps", "type double precision", + "shape", "tagged false", "in_record true", "reader urword"], + ["block packagedata", "name boundname", "type string", "shape", + "tagged false", "in_record true", "reader urword", + "optional true"], + ["block period", "name iper", "type integer", + "block_variable True", "in_record true", "tagged false", "shape", + "valid", "reader urword", "optional false"], + ["block period", "name perioddata", + "type recarray iuzno finf pet extdp extwc ha hroot rootact aux", + "shape", "reader urword"], + ["block period", "name iuzno", "type integer", "shape", + "tagged false", "in_record true", "reader urword", + "numeric_index true"], + ["block period", "name finf", "type string", "shape", + "tagged false", "in_record true", "time_series true", + "reader urword"], + ["block period", "name pet", "type string", "shape", + "tagged false", "in_record true", "reader urword", + "time_series true"], + ["block period", "name extdp", "type string", "shape", + "tagged false", "in_record true", "reader urword", + "time_series true"], + ["block period", "name extwc", "type string", "shape", + "tagged false", "in_record true", "reader urword", + "time_series true"], + ["block period", "name ha", "type string", "shape", + "tagged false", "in_record true", "time_series true", + "reader urword"], + ["block period", "name hroot", "type string", "shape", + "tagged false", "in_record true", "reader urword", + "time_series true"], + ["block period", "name rootact", "type string", "shape", + "tagged false", "in_record true", "reader urword", + "time_series true"], + ["block period", "name aux", "type double precision", + "in_record true", "tagged false", "shape (naux)", "reader urword", + "time_series true", "optional true"]] + + def __init__(self, model, loading_package=False, auxiliary=None, + auxmultname=None, boundnames=None, print_input=None, + print_flows=None, save_flows=None, budget_filerecord=None, + timeseries=None, observations=None, mover=None, + simulate_et=None, linear_gwet=None, square_gwet=None, + simulate_gwseep=None, unsat_etwc=None, unsat_etae=None, + nuzfcells=None, ntrailwaves=None, nwavesets=None, + packagedata=None, perioddata=None, filename=None, pname=None, + parent_file=None): + super(ModflowGwfuzf, self).__init__(model, "uzf", filename, pname, + loading_package, parent_file) + + # set up variables + self.auxiliary = self.build_mfdata("auxiliary", auxiliary) + self.auxmultname = self.build_mfdata("auxmultname", auxmultname) + self.boundnames = self.build_mfdata("boundnames", boundnames) + self.print_input = self.build_mfdata("print_input", print_input) + self.print_flows = self.build_mfdata("print_flows", print_flows) + self.save_flows = self.build_mfdata("save_flows", save_flows) + self.budget_filerecord = self.build_mfdata("budget_filerecord", + budget_filerecord) + self._ts_filerecord = self.build_mfdata("ts_filerecord", + None) + self._ts_package = self.build_child_package("ts", timeseries, + "timeseries", + self._ts_filerecord) + self._obs_filerecord = self.build_mfdata("obs_filerecord", + None) + self._obs_package = self.build_child_package("obs", observations, + "continuous", + self._obs_filerecord) + self.mover = self.build_mfdata("mover", mover) + self.simulate_et = self.build_mfdata("simulate_et", simulate_et) + self.linear_gwet = self.build_mfdata("linear_gwet", linear_gwet) + self.square_gwet = self.build_mfdata("square_gwet", square_gwet) + self.simulate_gwseep = self.build_mfdata("simulate_gwseep", + simulate_gwseep) + self.unsat_etwc = self.build_mfdata("unsat_etwc", unsat_etwc) + self.unsat_etae = self.build_mfdata("unsat_etae", unsat_etae) + self.nuzfcells = self.build_mfdata("nuzfcells", nuzfcells) + self.ntrailwaves = self.build_mfdata("ntrailwaves", ntrailwaves) + self.nwavesets = self.build_mfdata("nwavesets", nwavesets) + self.packagedata = self.build_mfdata("packagedata", packagedata) + self.perioddata = self.build_mfdata("perioddata", perioddata) + self._init_complete = True diff --git a/flopy/mf6/modflow/mfgwfwel.py b/flopy/mf6/modflow/mfgwfwel.py index c132816ef5..de673003bd 100644 --- a/flopy/mf6/modflow/mfgwfwel.py +++ b/flopy/mf6/modflow/mfgwfwel.py @@ -1,223 +1,223 @@ -# DO NOT MODIFY THIS FILE DIRECTLY. THIS FILE MUST BE CREATED BY -# mf6/utils/createpackages.py -from .. import mfpackage -from ..data.mfdatautil import ListTemplateGenerator - - -class ModflowGwfwel(mfpackage.MFPackage): - """ - ModflowGwfwel defines a wel package within a gwf6 model. - - Parameters - ---------- - model : MFModel - Model that this package is a part of. Package is automatically - added to model when it is initialized. - loading_package : bool - Do not set this parameter. It is intended for debugging and internal - processing purposes only. - auxiliary : [string] - * auxiliary (string) defines an array of one or more auxiliary variable - names. There is no limit on the number of auxiliary variables that - can be provided on this line; however, lists of information provided - in subsequent blocks must have a column of data for each auxiliary - variable name defined here. The number of auxiliary variables - detected on this line determines the value for naux. Comments cannot - be provided anywhere on this line as they will be interpreted as - auxiliary variable names. Auxiliary variables may not be used by the - package, but they will be available for use by other parts of the - program. The program will terminate with an error if auxiliary - variables are specified on more than one line in the options block. - auxmultname : string - * auxmultname (string) name of auxiliary variable to be used as - multiplier of well flow rate. - boundnames : boolean - * boundnames (boolean) keyword to indicate that boundary names may be - provided with the list of well cells. - print_input : boolean - * print_input (boolean) keyword to indicate that the list of well - information will be written to the listing file immediately after it - is read. - print_flows : boolean - * print_flows (boolean) keyword to indicate that the list of well flow - rates will be printed to the listing file for every stress period - time step in which "BUDGET PRINT" is specified in Output Control. If - there is no Output Control option and "PRINT_FLOWS" is specified, - then flow rates are printed for the last time step of each stress - period. - save_flows : boolean - * save_flows (boolean) keyword to indicate that well flow terms will be - written to the file specified with "BUDGET FILEOUT" in Output - Control. - auto_flow_reduce : double - * auto_flow_reduce (double) keyword and real value that defines the - fraction of the cell thickness used as an interval for smoothly - adjusting negative pumping rates to 0 in cells with head values less - than or equal to the bottom of the cell. Negative pumping rates are - adjusted to 0 or a smaller negative value when the head in the cell - is equal to or less than the calculated interval above the cell - bottom. AUTO_FLOW_REDUCE is set to 0.1 if the specified value is less - than or equal to zero. By default, negative pumping rates are not - reduced during a simulation. - timeseries : {varname:data} or timeseries data - * Contains data for the ts package. Data can be stored in a dictionary - containing data for the ts package with variable names as keys and - package data as values. Data just for the timeseries variable is also - acceptable. See ts package documentation for more information. - observations : {varname:data} or continuous data - * Contains data for the obs package. Data can be stored in a dictionary - containing data for the obs package with variable names as keys and - package data as values. Data just for the observations variable is - also acceptable. See obs package documentation for more information. - mover : boolean - * mover (boolean) keyword to indicate that this instance of the Well - Package can be used with the Water Mover (MVR) Package. When the - MOVER option is specified, additional memory is allocated within the - package to store the available, provided, and received water. - maxbound : integer - * maxbound (integer) integer value specifying the maximum number of - wells cells that will be specified for use during any stress period. - stress_period_data : [cellid, q, aux, boundname] - * cellid ((integer, ...)) is the cell identifier, and depends on the - type of grid that is used for the simulation. For a structured grid - that uses the DIS input file, CELLID is the layer, row, and column. - For a grid that uses the DISV input file, CELLID is the layer and - CELL2D number. If the model uses the unstructured discretization - (DISU) input file, CELLID is the node number for the cell. This - argument is an index variable, which means that it should be treated - as zero-based when working with FloPy and Python. Flopy will - automatically subtract one when loading index variables and add one - when writing index variables. - * q (double) is the volumetric well rate. A positive value indicates - recharge (injection) and a negative value indicates discharge - (extraction). If the Options block includes a TIMESERIESFILE entry - (see the "Time-Variable Input" section), values can be obtained from - a time series by entering the time-series name in place of a numeric - value. - * aux (double) represents the values of the auxiliary variables for - each well. The values of auxiliary variables must be present for each - well. The values must be specified in the order of the auxiliary - variables specified in the OPTIONS block. If the package supports - time series and the Options block includes a TIMESERIESFILE entry - (see the "Time-Variable Input" section), values can be obtained from - a time series by entering the time-series name in place of a numeric - value. - * boundname (string) name of the well cell. BOUNDNAME is an ASCII - character variable that can contain as many as 40 characters. If - BOUNDNAME contains spaces in it, then the entire name must be - enclosed within single quotes. - filename : String - File name for this package. - pname : String - Package name for this package. - parent_file : MFPackage - Parent package file that references this package. Only needed for - utility packages (mfutl*). For example, mfutllaktab package must have - a mfgwflak package parent_file. - - """ - auxiliary = ListTemplateGenerator(('gwf6', 'wel', 'options', - 'auxiliary')) - ts_filerecord = ListTemplateGenerator(('gwf6', 'wel', 'options', - 'ts_filerecord')) - obs_filerecord = ListTemplateGenerator(('gwf6', 'wel', 'options', - 'obs_filerecord')) - stress_period_data = ListTemplateGenerator(('gwf6', 'wel', 'period', - 'stress_period_data')) - package_abbr = "gwfwel" - _package_type = "wel" - dfn_file_name = "gwf-wel.dfn" - - dfn = [["block options", "name auxiliary", "type string", - "shape (naux)", "reader urword", "optional true"], - ["block options", "name auxmultname", "type string", "shape", - "reader urword", "optional true"], - ["block options", "name boundnames", "type keyword", "shape", - "reader urword", "optional true"], - ["block options", "name print_input", "type keyword", - "reader urword", "optional true"], - ["block options", "name print_flows", "type keyword", - "reader urword", "optional true"], - ["block options", "name save_flows", "type keyword", - "reader urword", "optional true"], - ["block options", "name auto_flow_reduce", - "type double precision", "reader urword", "optional true"], - ["block options", "name ts_filerecord", - "type record ts6 filein ts6_filename", "shape", "reader urword", - "tagged true", "optional true", "construct_package ts", - "construct_data timeseries", "parameter_name timeseries"], - ["block options", "name ts6", "type keyword", "shape", - "in_record true", "reader urword", "tagged true", - "optional false"], - ["block options", "name filein", "type keyword", "shape", - "in_record true", "reader urword", "tagged true", - "optional false"], - ["block options", "name ts6_filename", "type string", - "preserve_case true", "in_record true", "reader urword", - "optional false", "tagged false"], - ["block options", "name obs_filerecord", - "type record obs6 filein obs6_filename", "shape", "reader urword", - "tagged true", "optional true", "construct_package obs", - "construct_data continuous", "parameter_name observations"], - ["block options", "name obs6", "type keyword", "shape", - "in_record true", "reader urword", "tagged true", - "optional false"], - ["block options", "name obs6_filename", "type string", - "preserve_case true", "in_record true", "tagged false", - "reader urword", "optional false"], - ["block options", "name mover", "type keyword", "tagged true", - "reader urword", "optional true"], - ["block dimensions", "name maxbound", "type integer", - "reader urword", "optional false"], - ["block period", "name iper", "type integer", - "block_variable True", "in_record true", "tagged false", "shape", - "valid", "reader urword", "optional false"], - ["block period", "name stress_period_data", - "type recarray cellid q aux boundname", "shape (maxbound)", - "reader urword"], - ["block period", "name cellid", "type integer", - "shape (ncelldim)", "tagged false", "in_record true", - "reader urword"], - ["block period", "name q", "type double precision", "shape", - "tagged false", "in_record true", "reader urword", - "time_series true"], - ["block period", "name aux", "type double precision", - "in_record true", "tagged false", "shape (naux)", "reader urword", - "optional true", "time_series true"], - ["block period", "name boundname", "type string", "shape", - "tagged false", "in_record true", "reader urword", - "optional true"]] - - def __init__(self, model, loading_package=False, auxiliary=None, - auxmultname=None, boundnames=None, print_input=None, - print_flows=None, save_flows=None, auto_flow_reduce=None, - timeseries=None, observations=None, mover=None, maxbound=None, - stress_period_data=None, filename=None, pname=None, - parent_file=None): - super(ModflowGwfwel, self).__init__(model, "wel", filename, pname, - loading_package, parent_file) - - # set up variables - self.auxiliary = self.build_mfdata("auxiliary", auxiliary) - self.auxmultname = self.build_mfdata("auxmultname", auxmultname) - self.boundnames = self.build_mfdata("boundnames", boundnames) - self.print_input = self.build_mfdata("print_input", print_input) - self.print_flows = self.build_mfdata("print_flows", print_flows) - self.save_flows = self.build_mfdata("save_flows", save_flows) - self.auto_flow_reduce = self.build_mfdata("auto_flow_reduce", - auto_flow_reduce) - self._ts_filerecord = self.build_mfdata("ts_filerecord", - None) - self._ts_package = self.build_child_package("ts", timeseries, - "timeseries", - self._ts_filerecord) - self._obs_filerecord = self.build_mfdata("obs_filerecord", - None) - self._obs_package = self.build_child_package("obs", observations, - "continuous", - self._obs_filerecord) - self.mover = self.build_mfdata("mover", mover) - self.maxbound = self.build_mfdata("maxbound", maxbound) - self.stress_period_data = self.build_mfdata("stress_period_data", - stress_period_data) - self._init_complete = True +# DO NOT MODIFY THIS FILE DIRECTLY. THIS FILE MUST BE CREATED BY +# mf6/utils/createpackages.py +from .. import mfpackage +from ..data.mfdatautil import ListTemplateGenerator + + +class ModflowGwfwel(mfpackage.MFPackage): + """ + ModflowGwfwel defines a wel package within a gwf6 model. + + Parameters + ---------- + model : MFModel + Model that this package is a part of. Package is automatically + added to model when it is initialized. + loading_package : bool + Do not set this parameter. It is intended for debugging and internal + processing purposes only. + auxiliary : [string] + * auxiliary (string) defines an array of one or more auxiliary variable + names. There is no limit on the number of auxiliary variables that + can be provided on this line; however, lists of information provided + in subsequent blocks must have a column of data for each auxiliary + variable name defined here. The number of auxiliary variables + detected on this line determines the value for naux. Comments cannot + be provided anywhere on this line as they will be interpreted as + auxiliary variable names. Auxiliary variables may not be used by the + package, but they will be available for use by other parts of the + program. The program will terminate with an error if auxiliary + variables are specified on more than one line in the options block. + auxmultname : string + * auxmultname (string) name of auxiliary variable to be used as + multiplier of well flow rate. + boundnames : boolean + * boundnames (boolean) keyword to indicate that boundary names may be + provided with the list of well cells. + print_input : boolean + * print_input (boolean) keyword to indicate that the list of well + information will be written to the listing file immediately after it + is read. + print_flows : boolean + * print_flows (boolean) keyword to indicate that the list of well flow + rates will be printed to the listing file for every stress period + time step in which "BUDGET PRINT" is specified in Output Control. If + there is no Output Control option and "PRINT_FLOWS" is specified, + then flow rates are printed for the last time step of each stress + period. + save_flows : boolean + * save_flows (boolean) keyword to indicate that well flow terms will be + written to the file specified with "BUDGET FILEOUT" in Output + Control. + auto_flow_reduce : double + * auto_flow_reduce (double) keyword and real value that defines the + fraction of the cell thickness used as an interval for smoothly + adjusting negative pumping rates to 0 in cells with head values less + than or equal to the bottom of the cell. Negative pumping rates are + adjusted to 0 or a smaller negative value when the head in the cell + is equal to or less than the calculated interval above the cell + bottom. AUTO_FLOW_REDUCE is set to 0.1 if the specified value is less + than or equal to zero. By default, negative pumping rates are not + reduced during a simulation. + timeseries : {varname:data} or timeseries data + * Contains data for the ts package. Data can be stored in a dictionary + containing data for the ts package with variable names as keys and + package data as values. Data just for the timeseries variable is also + acceptable. See ts package documentation for more information. + observations : {varname:data} or continuous data + * Contains data for the obs package. Data can be stored in a dictionary + containing data for the obs package with variable names as keys and + package data as values. Data just for the observations variable is + also acceptable. See obs package documentation for more information. + mover : boolean + * mover (boolean) keyword to indicate that this instance of the Well + Package can be used with the Water Mover (MVR) Package. When the + MOVER option is specified, additional memory is allocated within the + package to store the available, provided, and received water. + maxbound : integer + * maxbound (integer) integer value specifying the maximum number of + wells cells that will be specified for use during any stress period. + stress_period_data : [cellid, q, aux, boundname] + * cellid ((integer, ...)) is the cell identifier, and depends on the + type of grid that is used for the simulation. For a structured grid + that uses the DIS input file, CELLID is the layer, row, and column. + For a grid that uses the DISV input file, CELLID is the layer and + CELL2D number. If the model uses the unstructured discretization + (DISU) input file, CELLID is the node number for the cell. This + argument is an index variable, which means that it should be treated + as zero-based when working with FloPy and Python. Flopy will + automatically subtract one when loading index variables and add one + when writing index variables. + * q (double) is the volumetric well rate. A positive value indicates + recharge (injection) and a negative value indicates discharge + (extraction). If the Options block includes a TIMESERIESFILE entry + (see the "Time-Variable Input" section), values can be obtained from + a time series by entering the time-series name in place of a numeric + value. + * aux (double) represents the values of the auxiliary variables for + each well. The values of auxiliary variables must be present for each + well. The values must be specified in the order of the auxiliary + variables specified in the OPTIONS block. If the package supports + time series and the Options block includes a TIMESERIESFILE entry + (see the "Time-Variable Input" section), values can be obtained from + a time series by entering the time-series name in place of a numeric + value. + * boundname (string) name of the well cell. BOUNDNAME is an ASCII + character variable that can contain as many as 40 characters. If + BOUNDNAME contains spaces in it, then the entire name must be + enclosed within single quotes. + filename : String + File name for this package. + pname : String + Package name for this package. + parent_file : MFPackage + Parent package file that references this package. Only needed for + utility packages (mfutl*). For example, mfutllaktab package must have + a mfgwflak package parent_file. + + """ + auxiliary = ListTemplateGenerator(('gwf6', 'wel', 'options', + 'auxiliary')) + ts_filerecord = ListTemplateGenerator(('gwf6', 'wel', 'options', + 'ts_filerecord')) + obs_filerecord = ListTemplateGenerator(('gwf6', 'wel', 'options', + 'obs_filerecord')) + stress_period_data = ListTemplateGenerator(('gwf6', 'wel', 'period', + 'stress_period_data')) + package_abbr = "gwfwel" + _package_type = "wel" + dfn_file_name = "gwf-wel.dfn" + + dfn = [["block options", "name auxiliary", "type string", + "shape (naux)", "reader urword", "optional true"], + ["block options", "name auxmultname", "type string", "shape", + "reader urword", "optional true"], + ["block options", "name boundnames", "type keyword", "shape", + "reader urword", "optional true"], + ["block options", "name print_input", "type keyword", + "reader urword", "optional true"], + ["block options", "name print_flows", "type keyword", + "reader urword", "optional true"], + ["block options", "name save_flows", "type keyword", + "reader urword", "optional true"], + ["block options", "name auto_flow_reduce", + "type double precision", "reader urword", "optional true"], + ["block options", "name ts_filerecord", + "type record ts6 filein ts6_filename", "shape", "reader urword", + "tagged true", "optional true", "construct_package ts", + "construct_data timeseries", "parameter_name timeseries"], + ["block options", "name ts6", "type keyword", "shape", + "in_record true", "reader urword", "tagged true", + "optional false"], + ["block options", "name filein", "type keyword", "shape", + "in_record true", "reader urword", "tagged true", + "optional false"], + ["block options", "name ts6_filename", "type string", + "preserve_case true", "in_record true", "reader urword", + "optional false", "tagged false"], + ["block options", "name obs_filerecord", + "type record obs6 filein obs6_filename", "shape", "reader urword", + "tagged true", "optional true", "construct_package obs", + "construct_data continuous", "parameter_name observations"], + ["block options", "name obs6", "type keyword", "shape", + "in_record true", "reader urword", "tagged true", + "optional false"], + ["block options", "name obs6_filename", "type string", + "preserve_case true", "in_record true", "tagged false", + "reader urword", "optional false"], + ["block options", "name mover", "type keyword", "tagged true", + "reader urword", "optional true"], + ["block dimensions", "name maxbound", "type integer", + "reader urword", "optional false"], + ["block period", "name iper", "type integer", + "block_variable True", "in_record true", "tagged false", "shape", + "valid", "reader urword", "optional false"], + ["block period", "name stress_period_data", + "type recarray cellid q aux boundname", "shape (maxbound)", + "reader urword"], + ["block period", "name cellid", "type integer", + "shape (ncelldim)", "tagged false", "in_record true", + "reader urword"], + ["block period", "name q", "type double precision", "shape", + "tagged false", "in_record true", "reader urword", + "time_series true"], + ["block period", "name aux", "type double precision", + "in_record true", "tagged false", "shape (naux)", "reader urword", + "optional true", "time_series true"], + ["block period", "name boundname", "type string", "shape", + "tagged false", "in_record true", "reader urword", + "optional true"]] + + def __init__(self, model, loading_package=False, auxiliary=None, + auxmultname=None, boundnames=None, print_input=None, + print_flows=None, save_flows=None, auto_flow_reduce=None, + timeseries=None, observations=None, mover=None, maxbound=None, + stress_period_data=None, filename=None, pname=None, + parent_file=None): + super(ModflowGwfwel, self).__init__(model, "wel", filename, pname, + loading_package, parent_file) + + # set up variables + self.auxiliary = self.build_mfdata("auxiliary", auxiliary) + self.auxmultname = self.build_mfdata("auxmultname", auxmultname) + self.boundnames = self.build_mfdata("boundnames", boundnames) + self.print_input = self.build_mfdata("print_input", print_input) + self.print_flows = self.build_mfdata("print_flows", print_flows) + self.save_flows = self.build_mfdata("save_flows", save_flows) + self.auto_flow_reduce = self.build_mfdata("auto_flow_reduce", + auto_flow_reduce) + self._ts_filerecord = self.build_mfdata("ts_filerecord", + None) + self._ts_package = self.build_child_package("ts", timeseries, + "timeseries", + self._ts_filerecord) + self._obs_filerecord = self.build_mfdata("obs_filerecord", + None) + self._obs_package = self.build_child_package("obs", observations, + "continuous", + self._obs_filerecord) + self.mover = self.build_mfdata("mover", mover) + self.maxbound = self.build_mfdata("maxbound", maxbound) + self.stress_period_data = self.build_mfdata("stress_period_data", + stress_period_data) + self._init_complete = True diff --git a/flopy/mf6/modflow/mfmvr.py b/flopy/mf6/modflow/mfmvr.py index daf1f8e6b8..1be6078749 100644 --- a/flopy/mf6/modflow/mfmvr.py +++ b/flopy/mf6/modflow/mfmvr.py @@ -1,184 +1,184 @@ -# DO NOT MODIFY THIS FILE DIRECTLY. THIS FILE MUST BE CREATED BY -# mf6/utils/createpackages.py -from .. import mfpackage -from ..data.mfdatautil import ListTemplateGenerator - - -class ModflowMvr(mfpackage.MFPackage): - """ - ModflowMvr defines a mvr package. - - Parameters - ---------- - simulation : MFSimulation - Simulation that this package is a part of. Package is automatically - added to simulation when it is initialized. - loading_package : bool - Do not set this parameter. It is intended for debugging and internal - processing purposes only. - print_input : boolean - * print_input (boolean) keyword to indicate that the list of MVR - information will be written to the listing file immediately after it - is read. - print_flows : boolean - * print_flows (boolean) keyword to indicate that the list of MVR flow - rates will be printed to the listing file for every stress period - time step in which "BUDGET PRINT" is specified in Output Control. If - there is no Output Control option and "PRINT_FLOWS" is specified, - then flow rates are printed for the last time step of each stress - period. - modelnames : boolean - * modelnames (boolean) keyword to indicate that all package names will - be preceded by the model name for the package. Model names are - required when the Mover Package is used with a GWF-GWF Exchange. The - MODELNAME keyword should not be used for a Mover Package that is for - a single GWF Model. - budget_filerecord : [budgetfile] - * budgetfile (string) name of the output file to write budget - information. - maxmvr : integer - * maxmvr (integer) integer value specifying the maximum number of water - mover entries that will specified for any stress period. - maxpackages : integer - * maxpackages (integer) integer value specifying the number of unique - packages that are included in this water mover input file. - packages : [mname, pname] - * mname (string) name of model containing the package. Model names are - assigned by the user in the simulation name file. - * pname (string) is the name of a package that may be included in a - subsequent stress period block. The package name is assigned in the - name file for the GWF Model. Package names are optionally provided in - the name file. If they are not provided by the user, then packages - are assigned a default value, which is the package acronym followed - by a hyphen and the package number. For example, the first Drain - Package is named DRN-1. The second Drain Package is named DRN-2, and - so forth. - perioddata : [mname1, pname1, id1, mname2, pname2, id2, mvrtype, value] - * mname1 (string) name of model containing the package, PNAME1. - * pname1 (string) is the package name for the provider. The package - PNAME1 must be designated to provide water through the MVR Package by - specifying the keyword "MOVER" in its OPTIONS block. - * id1 (integer) is the identifier for the provider. For the standard - boundary packages, the provider identifier is the number of the - boundary as it is listed in the package input file. (Note that the - order of these boundaries may change by stress period, which must be - accounted for in the Mover Package.) So the first well has an - identifier of one. The second is two, and so forth. For the advanced - packages, the identifier is the reach number (SFR Package), well - number (MAW Package), or UZF cell number. For the Lake Package, ID1 - is the lake outlet number. Thus, outflows from a single lake can be - routed to different streams, for example. This argument is an index - variable, which means that it should be treated as zero-based when - working with FloPy and Python. Flopy will automatically subtract one - when loading index variables and add one when writing index - variables. - * mname2 (string) name of model containing the package, PNAME2. - * pname2 (string) is the package name for the receiver. The package - PNAME2 must be designated to receive water from the MVR Package by - specifying the keyword "MOVER" in its OPTIONS block. - * id2 (integer) is the identifier for the receiver. The receiver - identifier is the reach number (SFR Package), Lake number (LAK - Package), well number (MAW Package), or UZF cell number. This - argument is an index variable, which means that it should be treated - as zero-based when working with FloPy and Python. Flopy will - automatically subtract one when loading index variables and add one - when writing index variables. - * mvrtype (string) is the character string signifying the method for - determining how much water will be moved. Supported values are - "FACTOR" "EXCESS" "THRESHOLD" and "UPTO". These four options - determine how the receiver flow rate, :math:`Q_R`, is calculated. - These options are based the options available in the SFR2 Package for - diverting stream flow. - * value (double) is the value to be used in the equation for - calculating the amount of water to move. For the "FACTOR" option, - VALUE is the :math:`\\alpha` factor. For the remaining options, VALUE - is the specified flow rate, :math:`Q_S`. - filename : String - File name for this package. - pname : String - Package name for this package. - parent_file : MFPackage - Parent package file that references this package. Only needed for - utility packages (mfutl*). For example, mfutllaktab package must have - a mfgwflak package parent_file. - - """ - budget_filerecord = ListTemplateGenerator(('mvr', 'options', - 'budget_filerecord')) - packages = ListTemplateGenerator(('mvr', 'packages', 'packages')) - perioddata = ListTemplateGenerator(('mvr', 'period', 'perioddata')) - package_abbr = "mvr" - _package_type = "mvr" - dfn_file_name = "gwf-mvr.dfn" - - dfn = [["block options", "name print_input", "type keyword", - "reader urword", "optional true"], - ["block options", "name print_flows", "type keyword", - "reader urword", "optional true"], - ["block options", "name modelnames", "type keyword", - "reader urword", "optional true"], - ["block options", "name budget_filerecord", - "type record budget fileout budgetfile", "shape", "reader urword", - "tagged true", "optional true"], - ["block options", "name budget", "type keyword", "shape", - "in_record true", "reader urword", "tagged true", - "optional false"], - ["block options", "name fileout", "type keyword", "shape", - "in_record true", "reader urword", "tagged true", - "optional false"], - ["block options", "name budgetfile", "type string", - "preserve_case true", "shape", "in_record true", "reader urword", - "tagged false", "optional false"], - ["block dimensions", "name maxmvr", "type integer", - "reader urword", "optional false"], - ["block dimensions", "name maxpackages", "type integer", - "reader urword", "optional false"], - ["block packages", "name packages", "type recarray mname pname", - "reader urword", "shape (npackages)", "optional false"], - ["block packages", "name mname", "type string", "reader urword", - "shape", "tagged false", "in_record true", "optional true"], - ["block packages", "name pname", "type string", "reader urword", - "shape", "tagged false", "in_record true", "optional false"], - ["block period", "name iper", "type integer", - "block_variable True", "in_record true", "tagged false", "shape", - "valid", "reader urword", "optional false"], - ["block period", "name perioddata", - "type recarray mname1 pname1 id1 mname2 pname2 id2 mvrtype value", - "shape (maxbound)", "reader urword"], - ["block period", "name mname1", "type string", "reader urword", - "shape", "tagged false", "in_record true", "optional true"], - ["block period", "name pname1", "type string", "shape", - "tagged false", "in_record true", "reader urword"], - ["block period", "name id1", "type integer", "shape", - "tagged false", "in_record true", "reader urword", - "numeric_index true"], - ["block period", "name mname2", "type string", "reader urword", - "shape", "tagged false", "in_record true", "optional true"], - ["block period", "name pname2", "type string", "shape", - "tagged false", "in_record true", "reader urword"], - ["block period", "name id2", "type integer", "shape", - "tagged false", "in_record true", "reader urword", - "numeric_index true"], - ["block period", "name mvrtype", "type string", "shape", - "tagged false", "in_record true", "reader urword"], - ["block period", "name value", "type double precision", "shape", - "tagged false", "in_record true", "reader urword"]] - - def __init__(self, simulation, loading_package=False, print_input=None, - print_flows=None, modelnames=None, budget_filerecord=None, - maxmvr=None, maxpackages=None, packages=None, perioddata=None, - filename=None, pname=None, parent_file=None): - super(ModflowMvr, self).__init__(simulation, "mvr", filename, pname, - loading_package, parent_file) - - # set up variables - self.print_input = self.build_mfdata("print_input", print_input) - self.print_flows = self.build_mfdata("print_flows", print_flows) - self.modelnames = self.build_mfdata("modelnames", modelnames) - self.budget_filerecord = self.build_mfdata("budget_filerecord", - budget_filerecord) - self.maxmvr = self.build_mfdata("maxmvr", maxmvr) - self.maxpackages = self.build_mfdata("maxpackages", maxpackages) - self.packages = self.build_mfdata("packages", packages) - self.perioddata = self.build_mfdata("perioddata", perioddata) - self._init_complete = True +# DO NOT MODIFY THIS FILE DIRECTLY. THIS FILE MUST BE CREATED BY +# mf6/utils/createpackages.py +from .. import mfpackage +from ..data.mfdatautil import ListTemplateGenerator + + +class ModflowMvr(mfpackage.MFPackage): + """ + ModflowMvr defines a mvr package. + + Parameters + ---------- + simulation : MFSimulation + Simulation that this package is a part of. Package is automatically + added to simulation when it is initialized. + loading_package : bool + Do not set this parameter. It is intended for debugging and internal + processing purposes only. + print_input : boolean + * print_input (boolean) keyword to indicate that the list of MVR + information will be written to the listing file immediately after it + is read. + print_flows : boolean + * print_flows (boolean) keyword to indicate that the list of MVR flow + rates will be printed to the listing file for every stress period + time step in which "BUDGET PRINT" is specified in Output Control. If + there is no Output Control option and "PRINT_FLOWS" is specified, + then flow rates are printed for the last time step of each stress + period. + modelnames : boolean + * modelnames (boolean) keyword to indicate that all package names will + be preceded by the model name for the package. Model names are + required when the Mover Package is used with a GWF-GWF Exchange. The + MODELNAME keyword should not be used for a Mover Package that is for + a single GWF Model. + budget_filerecord : [budgetfile] + * budgetfile (string) name of the output file to write budget + information. + maxmvr : integer + * maxmvr (integer) integer value specifying the maximum number of water + mover entries that will specified for any stress period. + maxpackages : integer + * maxpackages (integer) integer value specifying the number of unique + packages that are included in this water mover input file. + packages : [mname, pname] + * mname (string) name of model containing the package. Model names are + assigned by the user in the simulation name file. + * pname (string) is the name of a package that may be included in a + subsequent stress period block. The package name is assigned in the + name file for the GWF Model. Package names are optionally provided in + the name file. If they are not provided by the user, then packages + are assigned a default value, which is the package acronym followed + by a hyphen and the package number. For example, the first Drain + Package is named DRN-1. The second Drain Package is named DRN-2, and + so forth. + perioddata : [mname1, pname1, id1, mname2, pname2, id2, mvrtype, value] + * mname1 (string) name of model containing the package, PNAME1. + * pname1 (string) is the package name for the provider. The package + PNAME1 must be designated to provide water through the MVR Package by + specifying the keyword "MOVER" in its OPTIONS block. + * id1 (integer) is the identifier for the provider. For the standard + boundary packages, the provider identifier is the number of the + boundary as it is listed in the package input file. (Note that the + order of these boundaries may change by stress period, which must be + accounted for in the Mover Package.) So the first well has an + identifier of one. The second is two, and so forth. For the advanced + packages, the identifier is the reach number (SFR Package), well + number (MAW Package), or UZF cell number. For the Lake Package, ID1 + is the lake outlet number. Thus, outflows from a single lake can be + routed to different streams, for example. This argument is an index + variable, which means that it should be treated as zero-based when + working with FloPy and Python. Flopy will automatically subtract one + when loading index variables and add one when writing index + variables. + * mname2 (string) name of model containing the package, PNAME2. + * pname2 (string) is the package name for the receiver. The package + PNAME2 must be designated to receive water from the MVR Package by + specifying the keyword "MOVER" in its OPTIONS block. + * id2 (integer) is the identifier for the receiver. The receiver + identifier is the reach number (SFR Package), Lake number (LAK + Package), well number (MAW Package), or UZF cell number. This + argument is an index variable, which means that it should be treated + as zero-based when working with FloPy and Python. Flopy will + automatically subtract one when loading index variables and add one + when writing index variables. + * mvrtype (string) is the character string signifying the method for + determining how much water will be moved. Supported values are + "FACTOR" "EXCESS" "THRESHOLD" and "UPTO". These four options + determine how the receiver flow rate, :math:`Q_R`, is calculated. + These options are based the options available in the SFR2 Package for + diverting stream flow. + * value (double) is the value to be used in the equation for + calculating the amount of water to move. For the "FACTOR" option, + VALUE is the :math:`\\alpha` factor. For the remaining options, VALUE + is the specified flow rate, :math:`Q_S`. + filename : String + File name for this package. + pname : String + Package name for this package. + parent_file : MFPackage + Parent package file that references this package. Only needed for + utility packages (mfutl*). For example, mfutllaktab package must have + a mfgwflak package parent_file. + + """ + budget_filerecord = ListTemplateGenerator(('mvr', 'options', + 'budget_filerecord')) + packages = ListTemplateGenerator(('mvr', 'packages', 'packages')) + perioddata = ListTemplateGenerator(('mvr', 'period', 'perioddata')) + package_abbr = "mvr" + _package_type = "mvr" + dfn_file_name = "gwf-mvr.dfn" + + dfn = [["block options", "name print_input", "type keyword", + "reader urword", "optional true"], + ["block options", "name print_flows", "type keyword", + "reader urword", "optional true"], + ["block options", "name modelnames", "type keyword", + "reader urword", "optional true"], + ["block options", "name budget_filerecord", + "type record budget fileout budgetfile", "shape", "reader urword", + "tagged true", "optional true"], + ["block options", "name budget", "type keyword", "shape", + "in_record true", "reader urword", "tagged true", + "optional false"], + ["block options", "name fileout", "type keyword", "shape", + "in_record true", "reader urword", "tagged true", + "optional false"], + ["block options", "name budgetfile", "type string", + "preserve_case true", "shape", "in_record true", "reader urword", + "tagged false", "optional false"], + ["block dimensions", "name maxmvr", "type integer", + "reader urword", "optional false"], + ["block dimensions", "name maxpackages", "type integer", + "reader urword", "optional false"], + ["block packages", "name packages", "type recarray mname pname", + "reader urword", "shape (npackages)", "optional false"], + ["block packages", "name mname", "type string", "reader urword", + "shape", "tagged false", "in_record true", "optional true"], + ["block packages", "name pname", "type string", "reader urword", + "shape", "tagged false", "in_record true", "optional false"], + ["block period", "name iper", "type integer", + "block_variable True", "in_record true", "tagged false", "shape", + "valid", "reader urword", "optional false"], + ["block period", "name perioddata", + "type recarray mname1 pname1 id1 mname2 pname2 id2 mvrtype value", + "shape (maxbound)", "reader urword"], + ["block period", "name mname1", "type string", "reader urword", + "shape", "tagged false", "in_record true", "optional true"], + ["block period", "name pname1", "type string", "shape", + "tagged false", "in_record true", "reader urword"], + ["block period", "name id1", "type integer", "shape", + "tagged false", "in_record true", "reader urword", + "numeric_index true"], + ["block period", "name mname2", "type string", "reader urword", + "shape", "tagged false", "in_record true", "optional true"], + ["block period", "name pname2", "type string", "shape", + "tagged false", "in_record true", "reader urword"], + ["block period", "name id2", "type integer", "shape", + "tagged false", "in_record true", "reader urword", + "numeric_index true"], + ["block period", "name mvrtype", "type string", "shape", + "tagged false", "in_record true", "reader urword"], + ["block period", "name value", "type double precision", "shape", + "tagged false", "in_record true", "reader urword"]] + + def __init__(self, simulation, loading_package=False, print_input=None, + print_flows=None, modelnames=None, budget_filerecord=None, + maxmvr=None, maxpackages=None, packages=None, perioddata=None, + filename=None, pname=None, parent_file=None): + super(ModflowMvr, self).__init__(simulation, "mvr", filename, pname, + loading_package, parent_file) + + # set up variables + self.print_input = self.build_mfdata("print_input", print_input) + self.print_flows = self.build_mfdata("print_flows", print_flows) + self.modelnames = self.build_mfdata("modelnames", modelnames) + self.budget_filerecord = self.build_mfdata("budget_filerecord", + budget_filerecord) + self.maxmvr = self.build_mfdata("maxmvr", maxmvr) + self.maxpackages = self.build_mfdata("maxpackages", maxpackages) + self.packages = self.build_mfdata("packages", packages) + self.perioddata = self.build_mfdata("perioddata", perioddata) + self._init_complete = True diff --git a/flopy/mf6/modflow/mfutlobs.py b/flopy/mf6/modflow/mfutlobs.py index e71e27e467..d06ed2b7c4 100644 --- a/flopy/mf6/modflow/mfutlobs.py +++ b/flopy/mf6/modflow/mfutlobs.py @@ -1,137 +1,137 @@ -# DO NOT MODIFY THIS FILE DIRECTLY. THIS FILE MUST BE CREATED BY -# mf6/utils/createpackages.py -from .. import mfpackage -from ..data.mfdatautil import ListTemplateGenerator - - -class ModflowUtlobs(mfpackage.MFPackage): - """ - ModflowUtlobs defines a obs package within a utl model. - - Parameters - ---------- - model : MFModel - Model that this package is a part of. Package is automatically - added to model when it is initialized. - loading_package : bool - Do not set this parameter. It is intended for debugging and internal - processing purposes only. - digits : integer - * digits (integer) Keyword and an integer digits specifier used for - conversion of simulated values to text on output. The default is 5 - digits. When simulated values are written to a file specified as file - type DATA in the Name File, the digits specifier controls the number - of significant digits with which simulated values are written to the - output file. The digits specifier has no effect on the number of - significant digits with which the simulation time is written for - continuous observations. - print_input : boolean - * print_input (boolean) keyword to indicate that the list of - observation information will be written to the listing file - immediately after it is read. - continuous : [obsname, obstype, id, id2] - * obsname (string) string of 1 to 40 nonblank characters used to - identify the observation. The identifier need not be unique; however, - identification and post-processing of observations in the output - files are facilitated if each observation is given a unique name. - * obstype (string) a string of characters used to identify the - observation type. - * id (string) Text identifying cell where observation is located. For - packages other than NPF, if boundary names are defined in the - corresponding package input file, ID can be a boundary name. - Otherwise ID is a cellid. If the model discretization is type DIS, - cellid is three integers (layer, row, column). If the discretization - is DISV, cellid is two integers (layer, cell number). If the - discretization is DISU, cellid is one integer (node number). This - argument is an index variable, which means that it should be treated - as zero-based when working with FloPy and Python. Flopy will - automatically subtract one when loading index variables and add one - when writing index variables. - * id2 (string) Text identifying cell adjacent to cell identified by ID. - The form of ID2 is as described for ID. ID2 is used for intercell- - flow observations of a GWF model, for three observation types of the - LAK Package, for two observation types of the MAW Package, and one - observation type of the UZF Package. This argument is an index - variable, which means that it should be treated as zero-based when - working with FloPy and Python. Flopy will automatically subtract one - when loading index variables and add one when writing index - variables. - filename : String - File name for this package. - pname : String - Package name for this package. - parent_file : MFPackage - Parent package file that references this package. Only needed for - utility packages (mfutl*). For example, mfutllaktab package must have - a mfgwflak package parent_file. - - """ - continuous = ListTemplateGenerator(('obs', 'continuous', - 'continuous')) - package_abbr = "utlobs" - _package_type = "obs" - dfn_file_name = "utl-obs.dfn" - - dfn = [["block options", "name digits", "type integer", "shape", - "reader urword", "optional true"], - ["block options", "name print_input", "type keyword", - "reader urword", "optional true"], - ["block continuous", "name output", - "type record fileout obs_output_file_name binary", "shape", - "block_variable true", "in_record false", "reader urword", - "optional false"], - ["block continuous", "name fileout", "type keyword", "shape", - "in_record true", "reader urword", "tagged true", - "optional false"], - ["block continuous", "name obs_output_file_name", "type string", - "preserve_case true", "in_record true", "shape", "tagged false", - "reader urword"], - ["block continuous", "name binary", "type keyword", - "in_record true", "shape", "reader urword", "optional true"], - ["block continuous", "name continuous", - "type recarray obsname obstype id id2", "shape", "reader urword", - "optional false"], - ["block continuous", "name obsname", "type string", "shape", - "tagged false", "in_record true", "reader urword"], - ["block continuous", "name obstype", "type string", "shape", - "tagged false", "in_record true", "reader urword"], - ["block continuous", "name id", "type string", "shape", - "tagged false", "in_record true", "reader urword", - "numeric_index true"], - ["block continuous", "name id2", "type string", "shape", - "tagged false", "in_record true", "reader urword", - "optional true", "numeric_index true"]] - - def __init__(self, model, loading_package=False, digits=None, - print_input=None, continuous=None, filename=None, pname=None, - parent_file=None): - super(ModflowUtlobs, self).__init__(model, "obs", filename, pname, - loading_package, parent_file) - - # set up variables - self.digits = self.build_mfdata("digits", digits) - self.print_input = self.build_mfdata("print_input", print_input) - self.continuous = self.build_mfdata("continuous", continuous) - self._init_complete = True - - -class UtlobsPackages(mfpackage.MFChildPackages): - """ - UtlobsPackages is a container class for the ModflowUtlobs class. - - Methods - ---------- - initialize - Initializes a new ModflowUtlobs package removing any sibling child - packages attached to the same parent package. See ModflowUtlobs init - documentation for definition of parameters. - """ - package_abbr = "utlobspackages" - - def initialize(self, digits=None, print_input=None, continuous=None, - filename=None, pname=None): - new_package = ModflowUtlobs(self._model, digits=digits, - print_input=print_input, - continuous=continuous, filename=filename, - pname=pname, parent_file=self._cpparent) - self._init_package(new_package, filename) +# DO NOT MODIFY THIS FILE DIRECTLY. THIS FILE MUST BE CREATED BY +# mf6/utils/createpackages.py +from .. import mfpackage +from ..data.mfdatautil import ListTemplateGenerator + + +class ModflowUtlobs(mfpackage.MFPackage): + """ + ModflowUtlobs defines a obs package within a utl model. + + Parameters + ---------- + model : MFModel + Model that this package is a part of. Package is automatically + added to model when it is initialized. + loading_package : bool + Do not set this parameter. It is intended for debugging and internal + processing purposes only. + digits : integer + * digits (integer) Keyword and an integer digits specifier used for + conversion of simulated values to text on output. The default is 5 + digits. When simulated values are written to a file specified as file + type DATA in the Name File, the digits specifier controls the number + of significant digits with which simulated values are written to the + output file. The digits specifier has no effect on the number of + significant digits with which the simulation time is written for + continuous observations. + print_input : boolean + * print_input (boolean) keyword to indicate that the list of + observation information will be written to the listing file + immediately after it is read. + continuous : [obsname, obstype, id, id2] + * obsname (string) string of 1 to 40 nonblank characters used to + identify the observation. The identifier need not be unique; however, + identification and post-processing of observations in the output + files are facilitated if each observation is given a unique name. + * obstype (string) a string of characters used to identify the + observation type. + * id (string) Text identifying cell where observation is located. For + packages other than NPF, if boundary names are defined in the + corresponding package input file, ID can be a boundary name. + Otherwise ID is a cellid. If the model discretization is type DIS, + cellid is three integers (layer, row, column). If the discretization + is DISV, cellid is two integers (layer, cell number). If the + discretization is DISU, cellid is one integer (node number). This + argument is an index variable, which means that it should be treated + as zero-based when working with FloPy and Python. Flopy will + automatically subtract one when loading index variables and add one + when writing index variables. + * id2 (string) Text identifying cell adjacent to cell identified by ID. + The form of ID2 is as described for ID. ID2 is used for intercell- + flow observations of a GWF model, for three observation types of the + LAK Package, for two observation types of the MAW Package, and one + observation type of the UZF Package. This argument is an index + variable, which means that it should be treated as zero-based when + working with FloPy and Python. Flopy will automatically subtract one + when loading index variables and add one when writing index + variables. + filename : String + File name for this package. + pname : String + Package name for this package. + parent_file : MFPackage + Parent package file that references this package. Only needed for + utility packages (mfutl*). For example, mfutllaktab package must have + a mfgwflak package parent_file. + + """ + continuous = ListTemplateGenerator(('obs', 'continuous', + 'continuous')) + package_abbr = "utlobs" + _package_type = "obs" + dfn_file_name = "utl-obs.dfn" + + dfn = [["block options", "name digits", "type integer", "shape", + "reader urword", "optional true"], + ["block options", "name print_input", "type keyword", + "reader urword", "optional true"], + ["block continuous", "name output", + "type record fileout obs_output_file_name binary", "shape", + "block_variable true", "in_record false", "reader urword", + "optional false"], + ["block continuous", "name fileout", "type keyword", "shape", + "in_record true", "reader urword", "tagged true", + "optional false"], + ["block continuous", "name obs_output_file_name", "type string", + "preserve_case true", "in_record true", "shape", "tagged false", + "reader urword"], + ["block continuous", "name binary", "type keyword", + "in_record true", "shape", "reader urword", "optional true"], + ["block continuous", "name continuous", + "type recarray obsname obstype id id2", "shape", "reader urword", + "optional false"], + ["block continuous", "name obsname", "type string", "shape", + "tagged false", "in_record true", "reader urword"], + ["block continuous", "name obstype", "type string", "shape", + "tagged false", "in_record true", "reader urword"], + ["block continuous", "name id", "type string", "shape", + "tagged false", "in_record true", "reader urword", + "numeric_index true"], + ["block continuous", "name id2", "type string", "shape", + "tagged false", "in_record true", "reader urword", + "optional true", "numeric_index true"]] + + def __init__(self, model, loading_package=False, digits=None, + print_input=None, continuous=None, filename=None, pname=None, + parent_file=None): + super(ModflowUtlobs, self).__init__(model, "obs", filename, pname, + loading_package, parent_file) + + # set up variables + self.digits = self.build_mfdata("digits", digits) + self.print_input = self.build_mfdata("print_input", print_input) + self.continuous = self.build_mfdata("continuous", continuous) + self._init_complete = True + + +class UtlobsPackages(mfpackage.MFChildPackages): + """ + UtlobsPackages is a container class for the ModflowUtlobs class. + + Methods + ---------- + initialize + Initializes a new ModflowUtlobs package removing any sibling child + packages attached to the same parent package. See ModflowUtlobs init + documentation for definition of parameters. + """ + package_abbr = "utlobspackages" + + def initialize(self, digits=None, print_input=None, continuous=None, + filename=None, pname=None): + new_package = ModflowUtlobs(self._model, digits=digits, + print_input=print_input, + continuous=continuous, filename=filename, + pname=pname, parent_file=self._cpparent) + self._init_package(new_package, filename) diff --git a/flopy/mf6/utils/binaryfile_utils.py b/flopy/mf6/utils/binaryfile_utils.py index 797a102cef..44558dc92b 100644 --- a/flopy/mf6/utils/binaryfile_utils.py +++ b/flopy/mf6/utils/binaryfile_utils.py @@ -1,382 +1,382 @@ -import os -import numpy as np -from ...utils import binaryfile as bf - - -class MFOutput: - """ - Wrapper class for Binary Arrays. This class enables directly getting slices - from the binary output. It is intended to be called from the __getitem__ - method of the SimulationDict() class. Implemented to conserve memory. - - Parameters - ---------- - path: binary file path location - mfdict: SimulationDict() object - key: OrderedDictionary key ex. ('flow15','CBC','FLOW RIGHT FACE') - - Returns - ------- - Xarray of [n,n,n,n] dimension - - Usage: - ----- - >>> val = MFOutput(mfdict, path, key) - >>> return val.data - - User interaction: - ----------------- - >>> data[('flow15','CBC','FLOW RIGHT FACE')][:,0,1,:] - or - >>> data[('flow15','CBC','FLOW RIGHT FACE')] - """ - def __init__(self, mfdict, path, key): - self.mfdict = mfdict - data = MFOutputRequester(mfdict, path, key) - try: - self.data = data.querybinarydata - except AttributeError: - self.data = np.array([[[[]]]]) - - def __iter__(self): - yield self.data - - def __getitem__(self, index): - self.data = self.data[index] - return self.data - - -class MFOutputRequester: - """ - MFOutputRequest class is a helper function to enable the user to query - binary data from the SimulationDict() object on the fly without - actually storing it in the SimulationDict() object. - - Parameters: - ---------- - mfdict: OrderedDict - local instance of the SimulationDict() object - path: - pointer to the MFSimulationPath object - key: tuple - user requested data key - - Methods: - ------- - MFOutputRequester.querybinarydata - returns: Xarray object - - Examples: - -------- - >>> data = MFOutputRequester(mfdict, path, key) - >>> data.querybinarydata - """ - - def __init__(self, mfdict, path, key): - self.path = path - self.mfdict = mfdict - self.dataDict = {} - # get the binary file locations, create a dictionary key to look them - # up from, store in self.dataDict - self._getbinaryfilepaths() - - # check if supplied key exists, and model grid type - if key in self.dataDict: - if (key[0], 'disv', 'dimensions', 'nvert') in self.mfdict: - self.querybinarydata = \ - self._querybinarydata_vertices(self.mfdict, key) - elif (key[0], 'disu', 'connectiondata', 'iac') in self.mfdict: - self.querybinarydata = self._querybinarydata_unstructured(key) - else: - self.querybinarydata = self._querybinarydata(key) - elif key == ('model', 'HDS', 'IamAdummy'): - pass - else: - print('\nValid Keys Are:\n') - for valid_key in self.dataDict: - print(valid_key) - raise KeyError('Invalid key {}'.format(key)) - - def _querybinarydata(self, key): - # Basic definition to get output from modflow binary files for - # simulations using a structured grid - path = self.dataDict[key] - bintype = key[1] - - bindata = self._get_binary_file_object(path, bintype, key) - - if bintype == 'CBC': - try: - return np.array(bindata.get_data(text=key[-1], full3D=True)) - except ValueError: - # imeth == 6 - return np.array(bindata.get_data(text=key[-1], full3D=False)) - else: - return np.array(bindata.get_alldata()) - - def _querybinarydata_vertices(self, mfdict, key): - # Basic definition to get output data from binary output files for - # simulations that define grid by vertices - path = self.dataDict[key] - bintype = key[1] - - bindata = self._get_binary_file_object(path, bintype, key) - - if bintype == 'CBC': - if key[-1] == 'FLOW-JA-FACE': - data = np.array(bindata.get_data(text=key[-1])) - # uncomment line to remove extra dimensions from data - # data data.shape = (len(times), -1) - return data - - else: - try: - data = np.array(bindata.get_data(text=key[-1], - full3D=True)) - except ValueError: - # imeth == 6 - data = np.array(bindata.get_data(text=key[-1], - full3D=False)) - else: - data = np.array(bindata.get_alldata()) - - # uncomment line to remove extra dimensions from data - # data = _reshape_binary_data(data, 'V') - return data - - def _querybinarydata_unstructured(self, key): - # get unstructured binary data in numpy array format. - path = self.dataDict[key] - bintype = key[1] - - bindata = self._get_binary_file_object(path, bintype, key) - - if bintype == 'CBC': - try: - data = np.array(bindata.get_data(text=key[-1], full3D=True)) - except ValueError: - data = np.array(bindata.get_data(text=key[-1], full3D=False)) - else: - data = bindata.get_alldata() - - # remove un-needed dimensions - data = _reshape_binary_data(data, 'U') - - if key[-1] == "FLOW-JA-FACE": - return data - - else: - return data - - def _get_binary_file_object(self, path, bintype, key): - # simple method that trys to open the binary file object using Flopy - if bintype == 'CBC': - try: - return bf.CellBudgetFile(path, precision='double') - except AssertionError: - raise AssertionError('{} does not ' - 'exist'.format(self.dataDict[key])) - - elif bintype == 'HDS': - try: - return bf.HeadFile(path, precision='double') - except AssertionError: - raise AssertionError('{} does not ' - 'exist'.format(self.dataDict[key])) - - elif bintype == 'DDN': - try: - return bf.HeadFile(path, text='drawdown', precision='double') - except AssertionError: - raise AssertionError('{} does not ' - 'exist'.format(self.dataDict[key])) - - elif bintype == 'UCN': - try: - return bf.UcnFile(path, precision="single") - except AssertionError: - raise AssertionError('{} does not ' - 'exist'.format(self.dataDict[key])) - - else: - raise AssertionError() - - @staticmethod - def _get_vertices(mfdict, key): - """ - Depreciated! Consider removing from code. - - Parameters - ---------- - key: binary query dictionary key - - Returns - ------- - information defining specified vertices for all model cells to be added - to xarray as coordinates. - cellid: (list) corresponds to the modflow CELL2d cell number - xcyc: (n x 2) dimensional Pandas object of tuples defining the CELL2d - center coordinates - nverts: (list) number of xy vertices corresponding to a cell - xv: (n x nverts) dimensional Pandas object of tuples. Contains x - vertices for a cell - yv: (n x nverts) dimensional Pandas object of tuples. Contains y - vertices for a cell - topv: (n x nlayers) dimensional Pandas object of cell top elevations - corresponding to a row column location - botmv: (n x nlayers) dimensional Pandas object of cell bottom - elevations corresponding to a row column location - """ - - try: - import pandas as pd - except Exception as e: - msg = 'MFOutputRequester._get_vertices(): requires pandas' - raise ImportError(msg) - - mname = key[0] - cellid = mfdict[(mname, 'DISV8', 'CELL2D', 'cell2d_num')] - - cellxc = mfdict[(mname, 'DISV8', 'CELL2D', 'xc')] - cellyc = mfdict[(mname, 'DISV8', 'CELL2D', 'yc')] - xcyc = [(cellxc[i], cellyc[i]) for i in range(len(cellxc))] - xcyc = pd.Series(xcyc, dtype='object') - - nverts = mfdict[(mname, 'DISV8', 'CELL2D', 'nvert')] - vertnums = mfdict[(mname, 'DISV8', 'CELL2D', 'iv')] - vertid = mfdict[(mname, 'DISV8', 'VERTICES', 'vert_num')] - vertx = mfdict[(mname, 'DISV8', 'VERTICES', 'x')] - verty = mfdict[(mname, 'DISV8', 'VERTICES', 'y')] - # get vertices that correspond to CellID list - xv = [] - yv = [] - for line in vertnums: - tempx = [] - tempy = [] - for vert in line: - idx = vertid.index(vert) - tempx.append(vertx[idx]) - tempy.append(verty[idx]) - xv.append(tempx) - yv.append(tempy) - xv = pd.Series(xv, dtype='object') - yv = pd.Series(yv, dtype='object') - - top = np.array(mfdict[(mname, 'DISV8', 'CELLDATA', 'top')]) - botm = np.array(mfdict[(mname, 'DISV8', 'CELLDATA', 'botm')]) - top = top.tolist() - botm = botm.tolist() - # get cell top and bottom by layer - topv = list(zip(top, *botm[:-1])) - botmv = list(zip(*botm)) - topv = pd.Series(topv, dtype='object') - botmv = pd.Series(botmv, dtype='object') - - return cellid, xcyc, nverts, xv, yv, topv, botmv - - def _getbinaryfilepaths(self): - # model paths - self.modelpathdict = {} - for i in self.path.model_relative_path: - self.modelpathdict[i] = self.path.get_model_path(i) - sim_path = self.path.get_sim_path() - self.binarypathdict = {} - # check output control to see if a binary file is supposed to exist. - # Get path to that file - for i in self.modelpathdict: - if (i, 'oc', 'options', 'budget_filerecord') in self.mfdict: - cbc = self.mfdict[(i, 'oc', 'options', 'budget_filerecord')] - if cbc.get_data() is not None: - self.binarypathdict[(i, 'CBC')] = \ - os.path.join(sim_path, cbc.get_data()[0][0]) - - if (i, 'oc', 'options', 'head_filerecord') in self.mfdict: - hds = self.mfdict[(i, 'oc', 'options', 'head_filerecord')] - if hds.get_data() is not None: - self.binarypathdict[(i, 'HDS')] = \ - os.path.join(sim_path, hds.get_data()[0][0]) - - if (i, 'oc', 'options', 'drawdown_filerecord') in self.mfdict: - ddn = self.mfdict[(i, 'oc', 'options', 'drawdown_filerecord')] - if ddn.get_data() is not None: - self.binarypathdict[(i, 'DDN')] = \ - os.path.join(sim_path, ddn.get_data()[0][0]) - - self._setbinarykeys(self.binarypathdict) - - def _setbinarykeys(self, binarypathdict): - # check that if a binary file is supposed to exist, it does, and create - # a dictionary key to access that data - for key in binarypathdict: - path = binarypathdict[key] - if key[1] == 'CBC': - try: - readcbc = bf.CellBudgetFile(path, precision='double') - for record in readcbc.get_unique_record_names(): - name = record.decode("utf-8").strip(' ') - # store keys along with model name in ordered dict? - self.dataDict[(key[0], key[1], name)] = path - readcbc.close() - - except: - pass - - elif key[1] == 'HDS': - try: - readhead = bf.HeadFile(path, precision='double') - self.dataDict[(key[0], key[1], 'HEAD')] = path - readhead.close() - - except: - pass - - elif key[1] == 'DDN': - try: - readddn = bf.HeadFile(path, text='drawdown', - precision='double') - self.dataDict[(key[0], key[1], 'DRAWDOWN')] = path - readddn.close() - - except: - pass - - elif key[1] == 'UCN': - try: - readucn = bf.UcnFile(path, precision='single') - self.dataDict[(key[0], key[1], 'CONCENTRATION')] = path - readucn.close() - - except: - pass - - else: - pass - - @staticmethod - def getkeys(mfdict, path, print_keys=True): - # use a dummy key to get valid binary output keys - dummy_key = ('model', 'HDS', 'IamAdummy') - x = MFOutputRequester(mfdict, path, dummy_key) - keys = [i for i in x.dataDict] - if print_keys is True: - for key in keys: - print(key) - return x - - -def _reshape_binary_data(data, dtype=None): - # removes unnecessary dimensions from data returned by - # flopy.utils.binaryfile - time = len(data) - data = np.array(data) - if dtype is None: - return data - elif dtype == 'V': - nodes = len(data[0][0][0]) - data.shape = (time, -1, nodes) - elif dtype == 'U': - data.shape = (time, -1) - else: - err = "Invalid dtype flag supplied, valid are dtype='U', dtype='V'" - raise Exception(err) - return data +import os +import numpy as np +from ...utils import binaryfile as bf + + +class MFOutput: + """ + Wrapper class for Binary Arrays. This class enables directly getting slices + from the binary output. It is intended to be called from the __getitem__ + method of the SimulationDict() class. Implemented to conserve memory. + + Parameters + ---------- + path: binary file path location + mfdict: SimulationDict() object + key: OrderedDictionary key ex. ('flow15','CBC','FLOW RIGHT FACE') + + Returns + ------- + Xarray of [n,n,n,n] dimension + + Usage: + ----- + >>> val = MFOutput(mfdict, path, key) + >>> return val.data + + User interaction: + ----------------- + >>> data[('flow15','CBC','FLOW RIGHT FACE')][:,0,1,:] + or + >>> data[('flow15','CBC','FLOW RIGHT FACE')] + """ + def __init__(self, mfdict, path, key): + self.mfdict = mfdict + data = MFOutputRequester(mfdict, path, key) + try: + self.data = data.querybinarydata + except AttributeError: + self.data = np.array([[[[]]]]) + + def __iter__(self): + yield self.data + + def __getitem__(self, index): + self.data = self.data[index] + return self.data + + +class MFOutputRequester: + """ + MFOutputRequest class is a helper function to enable the user to query + binary data from the SimulationDict() object on the fly without + actually storing it in the SimulationDict() object. + + Parameters: + ---------- + mfdict: OrderedDict + local instance of the SimulationDict() object + path: + pointer to the MFSimulationPath object + key: tuple + user requested data key + + Methods: + ------- + MFOutputRequester.querybinarydata + returns: Xarray object + + Examples: + -------- + >>> data = MFOutputRequester(mfdict, path, key) + >>> data.querybinarydata + """ + + def __init__(self, mfdict, path, key): + self.path = path + self.mfdict = mfdict + self.dataDict = {} + # get the binary file locations, create a dictionary key to look them + # up from, store in self.dataDict + self._getbinaryfilepaths() + + # check if supplied key exists, and model grid type + if key in self.dataDict: + if (key[0], 'disv', 'dimensions', 'nvert') in self.mfdict: + self.querybinarydata = \ + self._querybinarydata_vertices(self.mfdict, key) + elif (key[0], 'disu', 'connectiondata', 'iac') in self.mfdict: + self.querybinarydata = self._querybinarydata_unstructured(key) + else: + self.querybinarydata = self._querybinarydata(key) + elif key == ('model', 'HDS', 'IamAdummy'): + pass + else: + print('\nValid Keys Are:\n') + for valid_key in self.dataDict: + print(valid_key) + raise KeyError('Invalid key {}'.format(key)) + + def _querybinarydata(self, key): + # Basic definition to get output from modflow binary files for + # simulations using a structured grid + path = self.dataDict[key] + bintype = key[1] + + bindata = self._get_binary_file_object(path, bintype, key) + + if bintype == 'CBC': + try: + return np.array(bindata.get_data(text=key[-1], full3D=True)) + except ValueError: + # imeth == 6 + return np.array(bindata.get_data(text=key[-1], full3D=False)) + else: + return np.array(bindata.get_alldata()) + + def _querybinarydata_vertices(self, mfdict, key): + # Basic definition to get output data from binary output files for + # simulations that define grid by vertices + path = self.dataDict[key] + bintype = key[1] + + bindata = self._get_binary_file_object(path, bintype, key) + + if bintype == 'CBC': + if key[-1] == 'FLOW-JA-FACE': + data = np.array(bindata.get_data(text=key[-1])) + # uncomment line to remove extra dimensions from data + # data data.shape = (len(times), -1) + return data + + else: + try: + data = np.array(bindata.get_data(text=key[-1], + full3D=True)) + except ValueError: + # imeth == 6 + data = np.array(bindata.get_data(text=key[-1], + full3D=False)) + else: + data = np.array(bindata.get_alldata()) + + # uncomment line to remove extra dimensions from data + # data = _reshape_binary_data(data, 'V') + return data + + def _querybinarydata_unstructured(self, key): + # get unstructured binary data in numpy array format. + path = self.dataDict[key] + bintype = key[1] + + bindata = self._get_binary_file_object(path, bintype, key) + + if bintype == 'CBC': + try: + data = np.array(bindata.get_data(text=key[-1], full3D=True)) + except ValueError: + data = np.array(bindata.get_data(text=key[-1], full3D=False)) + else: + data = bindata.get_alldata() + + # remove un-needed dimensions + data = _reshape_binary_data(data, 'U') + + if key[-1] == "FLOW-JA-FACE": + return data + + else: + return data + + def _get_binary_file_object(self, path, bintype, key): + # simple method that trys to open the binary file object using Flopy + if bintype == 'CBC': + try: + return bf.CellBudgetFile(path, precision='double') + except AssertionError: + raise AssertionError('{} does not ' + 'exist'.format(self.dataDict[key])) + + elif bintype == 'HDS': + try: + return bf.HeadFile(path, precision='double') + except AssertionError: + raise AssertionError('{} does not ' + 'exist'.format(self.dataDict[key])) + + elif bintype == 'DDN': + try: + return bf.HeadFile(path, text='drawdown', precision='double') + except AssertionError: + raise AssertionError('{} does not ' + 'exist'.format(self.dataDict[key])) + + elif bintype == 'UCN': + try: + return bf.UcnFile(path, precision="single") + except AssertionError: + raise AssertionError('{} does not ' + 'exist'.format(self.dataDict[key])) + + else: + raise AssertionError() + + @staticmethod + def _get_vertices(mfdict, key): + """ + Depreciated! Consider removing from code. + + Parameters + ---------- + key: binary query dictionary key + + Returns + ------- + information defining specified vertices for all model cells to be added + to xarray as coordinates. + cellid: (list) corresponds to the modflow CELL2d cell number + xcyc: (n x 2) dimensional Pandas object of tuples defining the CELL2d + center coordinates + nverts: (list) number of xy vertices corresponding to a cell + xv: (n x nverts) dimensional Pandas object of tuples. Contains x + vertices for a cell + yv: (n x nverts) dimensional Pandas object of tuples. Contains y + vertices for a cell + topv: (n x nlayers) dimensional Pandas object of cell top elevations + corresponding to a row column location + botmv: (n x nlayers) dimensional Pandas object of cell bottom + elevations corresponding to a row column location + """ + + try: + import pandas as pd + except Exception as e: + msg = 'MFOutputRequester._get_vertices(): requires pandas' + raise ImportError(msg) + + mname = key[0] + cellid = mfdict[(mname, 'DISV8', 'CELL2D', 'cell2d_num')] + + cellxc = mfdict[(mname, 'DISV8', 'CELL2D', 'xc')] + cellyc = mfdict[(mname, 'DISV8', 'CELL2D', 'yc')] + xcyc = [(cellxc[i], cellyc[i]) for i in range(len(cellxc))] + xcyc = pd.Series(xcyc, dtype='object') + + nverts = mfdict[(mname, 'DISV8', 'CELL2D', 'nvert')] + vertnums = mfdict[(mname, 'DISV8', 'CELL2D', 'iv')] + vertid = mfdict[(mname, 'DISV8', 'VERTICES', 'vert_num')] + vertx = mfdict[(mname, 'DISV8', 'VERTICES', 'x')] + verty = mfdict[(mname, 'DISV8', 'VERTICES', 'y')] + # get vertices that correspond to CellID list + xv = [] + yv = [] + for line in vertnums: + tempx = [] + tempy = [] + for vert in line: + idx = vertid.index(vert) + tempx.append(vertx[idx]) + tempy.append(verty[idx]) + xv.append(tempx) + yv.append(tempy) + xv = pd.Series(xv, dtype='object') + yv = pd.Series(yv, dtype='object') + + top = np.array(mfdict[(mname, 'DISV8', 'CELLDATA', 'top')]) + botm = np.array(mfdict[(mname, 'DISV8', 'CELLDATA', 'botm')]) + top = top.tolist() + botm = botm.tolist() + # get cell top and bottom by layer + topv = list(zip(top, *botm[:-1])) + botmv = list(zip(*botm)) + topv = pd.Series(topv, dtype='object') + botmv = pd.Series(botmv, dtype='object') + + return cellid, xcyc, nverts, xv, yv, topv, botmv + + def _getbinaryfilepaths(self): + # model paths + self.modelpathdict = {} + for i in self.path.model_relative_path: + self.modelpathdict[i] = self.path.get_model_path(i) + sim_path = self.path.get_sim_path() + self.binarypathdict = {} + # check output control to see if a binary file is supposed to exist. + # Get path to that file + for i in self.modelpathdict: + if (i, 'oc', 'options', 'budget_filerecord') in self.mfdict: + cbc = self.mfdict[(i, 'oc', 'options', 'budget_filerecord')] + if cbc.get_data() is not None: + self.binarypathdict[(i, 'CBC')] = \ + os.path.join(sim_path, cbc.get_data()[0][0]) + + if (i, 'oc', 'options', 'head_filerecord') in self.mfdict: + hds = self.mfdict[(i, 'oc', 'options', 'head_filerecord')] + if hds.get_data() is not None: + self.binarypathdict[(i, 'HDS')] = \ + os.path.join(sim_path, hds.get_data()[0][0]) + + if (i, 'oc', 'options', 'drawdown_filerecord') in self.mfdict: + ddn = self.mfdict[(i, 'oc', 'options', 'drawdown_filerecord')] + if ddn.get_data() is not None: + self.binarypathdict[(i, 'DDN')] = \ + os.path.join(sim_path, ddn.get_data()[0][0]) + + self._setbinarykeys(self.binarypathdict) + + def _setbinarykeys(self, binarypathdict): + # check that if a binary file is supposed to exist, it does, and create + # a dictionary key to access that data + for key in binarypathdict: + path = binarypathdict[key] + if key[1] == 'CBC': + try: + readcbc = bf.CellBudgetFile(path, precision='double') + for record in readcbc.get_unique_record_names(): + name = record.decode("utf-8").strip(' ') + # store keys along with model name in ordered dict? + self.dataDict[(key[0], key[1], name)] = path + readcbc.close() + + except: + pass + + elif key[1] == 'HDS': + try: + readhead = bf.HeadFile(path, precision='double') + self.dataDict[(key[0], key[1], 'HEAD')] = path + readhead.close() + + except: + pass + + elif key[1] == 'DDN': + try: + readddn = bf.HeadFile(path, text='drawdown', + precision='double') + self.dataDict[(key[0], key[1], 'DRAWDOWN')] = path + readddn.close() + + except: + pass + + elif key[1] == 'UCN': + try: + readucn = bf.UcnFile(path, precision='single') + self.dataDict[(key[0], key[1], 'CONCENTRATION')] = path + readucn.close() + + except: + pass + + else: + pass + + @staticmethod + def getkeys(mfdict, path, print_keys=True): + # use a dummy key to get valid binary output keys + dummy_key = ('model', 'HDS', 'IamAdummy') + x = MFOutputRequester(mfdict, path, dummy_key) + keys = [i for i in x.dataDict] + if print_keys is True: + for key in keys: + print(key) + return x + + +def _reshape_binary_data(data, dtype=None): + # removes unnecessary dimensions from data returned by + # flopy.utils.binaryfile + time = len(data) + data = np.array(data) + if dtype is None: + return data + elif dtype == 'V': + nodes = len(data[0][0][0]) + data.shape = (time, -1, nodes) + elif dtype == 'U': + data.shape = (time, -1) + else: + err = "Invalid dtype flag supplied, valid are dtype='U', dtype='V'" + raise Exception(err) + return data diff --git a/flopy/mf6/utils/createpackages.py b/flopy/mf6/utils/createpackages.py index 41443982f8..bd626f1b66 100644 --- a/flopy/mf6/utils/createpackages.py +++ b/flopy/mf6/utils/createpackages.py @@ -1,631 +1,631 @@ -import os -import textwrap -from enum import Enum -from flopy.mf6.data import mfstructure, mfdatautil -from flopy.utils import datautil - -""" -createpackages.py is a utility script that reads in the file definition -metadata in the .dfn files to create the package classes in the modflow folder. -Run this script any time changes are made to the .dfn files. -""" - - -class PackageLevel(Enum): - sim_level = 0 - model_level = 1 - - -def build_doc_string(param_name, param_type, param_desc, indent): - return '{}{} : {}\n{}* {}'.format(indent, param_name, param_type, indent*2, - param_desc) - - -def generator_type(data_type): - if data_type == mfstructure.DataType.scalar_keyword or \ - data_type == mfstructure.DataType.scalar: - # regular scalar - return 'ScalarTemplateGenerator' - elif data_type == mfstructure.DataType.scalar_keyword_transient or \ - data_type == mfstructure.DataType.scalar_transient: - # transient scalar - return 'ScalarTemplateGenerator' - elif data_type == mfstructure.DataType.array: - # array - return 'ArrayTemplateGenerator' - elif data_type == mfstructure.DataType.array_transient: - # transient array - return 'ArrayTemplateGenerator' - elif data_type == mfstructure.DataType.list: - # list - return 'ListTemplateGenerator' - elif data_type == mfstructure.DataType.list_transient or \ - data_type == mfstructure.DataType.list_multiple: - # transient or multiple list - return 'ListTemplateGenerator' - - -def clean_class_string(name): - if len(name) > 0: - clean_string = name.replace(' ', '_') - clean_string = clean_string.replace('-', '_') - version = mfstructure.MFStructure().get_version_string() - # FIX: remove all numbers - if clean_string[-1] == version: - clean_string = clean_string[:-1] - return clean_string - return name - - -def build_dfn_string(dfn_list): - dfn_string = ' dfn = [' - line_length = len(dfn_string) - leading_spaces = ' ' * line_length - first_di = True - # process all data items - for data_item in dfn_list: - line_length += 1 - if not first_di: - dfn_string = '{},\n{}'.format(dfn_string, leading_spaces) - line_length = len(leading_spaces) - else: - first_di = False - dfn_string = '{}{}'.format(dfn_string, '[') - first_line = True - # process each line in a data item - for line in data_item: - line = line.strip() - # do not include the description of longname - if not line.lower().startswith('description') and \ - not line.lower().startswith('longname'): - line = line.replace('"', "'") - line_length += len(line) + 4 - if not first_line: - dfn_string = '{},'.format(dfn_string) - if line_length < 77: - # added text fits on the current line - if first_line: - dfn_string = '{}"{}"'.format(dfn_string, line) - else: - dfn_string = '{} "{}"'.format(dfn_string, line) - else: - # added text does not fit on the current line - line_length = len(line) + len(leading_spaces) + 2 - if line_length > 79: - # added text too long to fit on a single line, wrap - # text as needed - line = '"{}"'.format(line) - lines = textwrap.wrap(line, 75 - len(leading_spaces), - drop_whitespace = True) - lines[0] = '{} {}'.format(leading_spaces, lines[0]) - line_join = ' "\n{} "'.format(leading_spaces) - dfn_string = '{}\n{}'.format(dfn_string, - line_join.join(lines)) - else: - dfn_string = '{}\n{} "{}"'.format(dfn_string, - leading_spaces, line) - first_line = False - - dfn_string = '{}{}'.format(dfn_string, ']') - dfn_string = '{}{}'.format(dfn_string, ']') - return dfn_string - - -def create_init_var(clean_ds_name, data_structure_name, init_val=None): - if init_val is None: - init_val = clean_ds_name - - init_var = ' self.{} = self.build_mfdata('.format(clean_ds_name) - leading_spaces = ' ' * len(init_var) - if len(init_var) + len(data_structure_name) + 2 > 79: - second_line = '\n "{}",'.format(data_structure_name) - if len(second_line) + len(clean_ds_name) + 2 > 79: - init_var = '{}{}\n {})'.format(init_var, second_line, - init_val) - else: - init_var = '{}{} {})'.format(init_var, second_line, init_val) - else: - init_var = '{}"{}",'.format(init_var, data_structure_name) - if len(init_var) + len(clean_ds_name) + 2 > 79: - init_var = '{}\n{}{})'.format(init_var, leading_spaces, - init_val) - else: - init_var = '{} {})'.format(init_var, init_val) - return init_var - - -def create_basic_init(clean_ds_name): - return ' self.{} = {}\n'.format(clean_ds_name, clean_ds_name) - - -def create_property(clean_ds_name): - return " {} = property(get_{}, set_{}" \ - ")".format(clean_ds_name, - clean_ds_name, - clean_ds_name) - - -def format_var_list(base_string, var_list, is_tuple=False): - if is_tuple: - base_string = '{}('.format(base_string) - extra_chars = 4 - else: - extra_chars = 2 - line_length = len(base_string) - leading_spaces = ' ' * line_length - # determine if any variable name is too long to fit - for item in var_list: - if line_length + len(item) + extra_chars > 80: - leading_spaces = ' ' - base_string = '{}\n{}'.format(base_string, leading_spaces) - line_length = len(leading_spaces) - break - - for index, item in enumerate(var_list): - if is_tuple: - item = "'{}'".format(item) - if index == len(var_list) - 1: - next_var_str = item - else: - next_var_str = '{},'.format(item) - line_length += len(item) + extra_chars - if line_length > 80: - base_string = '{}\n{}{}'.format(base_string, leading_spaces, - next_var_str) - else: - if base_string[-1] == ',': - base_string = '{} '.format(base_string) - base_string = '{}{}'.format(base_string, next_var_str) - if is_tuple: - return '{}))'.format(base_string) - else: - return '{})'.format(base_string) - - -def create_package_init_var(parameter_name, package_abbr, data_name): - one_line = ' self._{}_package = self.build_child_package('\ - .format(package_abbr) - one_line_b = '"{}", {},'.format(package_abbr, parameter_name) - leading_spaces = ' ' * len(one_line) - two_line = '\n{}"{}",'.format(leading_spaces, data_name) - three_line = '\n{}self._{}_filerecord)'.format(leading_spaces, package_abbr) - return '{}{}{}{}'.format(one_line, one_line_b, two_line, three_line) - - -def add_var(init_vars, class_vars, init_param_list, package_properties, - doc_string, data_structure_dict, default_value, name, - python_name, description, path, data_type, - basic_init=False, construct_package=None, construct_data=None, - parameter_name=None, set_param_list=None): - if set_param_list is None: - set_param_list = [] - clean_ds_name = datautil.clean_name(python_name) - if construct_package is None: - # add variable initialization lines - if basic_init: - init_vars.append(create_basic_init(clean_ds_name)) - else: - init_vars.append(create_init_var(clean_ds_name, name)) - # add to parameter list - if default_value is None: - default_value = 'None' - init_param_list.append('{}={}'.format(clean_ds_name, default_value)) - # add to set parameter list - set_param_list.append('{}={}'.format(clean_ds_name, - clean_ds_name)) - else: - clean_parameter_name = datautil.clean_name(parameter_name) - # init hidden variable - init_vars.append(create_init_var('_{}'.format(clean_ds_name), name, - 'None')) - # init child package - init_vars.append(create_package_init_var(clean_parameter_name, - construct_package, - construct_data)) - # add to parameter list - init_param_list.append('{}=None'.format(clean_parameter_name)) - # add to set parameter list - set_param_list.append('{}={}'.format(clean_parameter_name, - clean_parameter_name)) - - package_properties.append(create_property(clean_ds_name)) - doc_string.add_parameter(description, model_parameter=True) - data_structure_dict[python_name] = 0 - if class_vars is not None: - gen_type = generator_type(data_type) - if gen_type != 'ScalarTemplateGenerator': - new_class_var = ' {} = {}('.format(clean_ds_name, - gen_type) - class_vars.append(format_var_list(new_class_var, path, True)) - return gen_type - return None - - -def build_init_string(init_string, init_param_list, - whitespace=' '): - line_chars = len(init_string) - for index, param in enumerate(init_param_list): - if index + 1 < len(init_param_list): - line_chars += len(param) + 2 - else: - line_chars += len(param) + 3 - if line_chars > 79: - if len(param) + len(whitespace) + 1 > 79: - # try to break apart at = sign - param_list = param.split('=') - if len(param_list) == 2: - init_string = '{},\n{}{}=\n{}{}'.format( - init_string, whitespace, param_list[0], whitespace, - param_list[1]) - line_chars = len(param_list[1]) + len(whitespace) + 1 - continue - init_string = '{},\n{}{}'.format( - init_string, whitespace, param) - line_chars = len(param) + len(whitespace) + 1 - else: - init_string = '{}, {}'.format(init_string, param) - return '{}):\n'.format(init_string) - - -def build_model_load(model_type): - model_load_c = ' Methods\n -------\n' \ - ' load : (simulation : MFSimulationData, model_name : ' \ - 'string,\n namfile : string, ' \ - 'version : string, exe_name : string,\n model_ws : '\ - 'string, strict : boolean) : MFSimulation\n' \ - ' a class method that loads a model from files' \ - '\n """' - - model_load = " @classmethod\n def load(cls, simulation, structure, "\ - "modelname='NewModel',\n " \ - "model_nam_file='modflowtest.nam', version='mf6',\n" \ - " exe_name='mf6.exe', strict=True, " \ - "model_rel_path='.',\n" \ - " load_only=None):\n " \ - "return mfmodel.MFModel.load_base(simulation, structure, " \ - "modelname,\n " \ - "model_nam_file, '{}', version,\n" \ - " exe_name, strict, "\ - "model_rel_path,\n" \ - " load_only)" \ - "\n".format(model_type) - return model_load, model_load_c - - -def build_model_init_vars(param_list): - init_var_list = [] - for param in param_list: - param_parts = param.split('=') - init_var_list.append(' self.name_file.{}.set_data({}' - ')'.format(param_parts[0], param_parts[0])) - return '\n'.join(init_var_list) - - -def create_packages(): - indent = ' ' - init_string_def = ' def __init__(self' - - # load JSON file - file_structure = mfstructure.MFStructure(load_from_dfn_files=True) - sim_struct = file_structure.sim_struct - - # assemble package list of buildable packages - package_list = [] - package_list.append( - (sim_struct.name_file_struct_obj, PackageLevel.sim_level, '', - sim_struct.name_file_struct_obj.dfn_list, - sim_struct.name_file_struct_obj.file_type)) - for package in sim_struct.package_struct_objs.values(): - # add simulation level package to list - package_list.append((package, PackageLevel.sim_level, '', - package.dfn_list, package.file_type)) - for package in sim_struct.utl_struct_objs.values(): - # add utility packages to list - package_list.append((package, PackageLevel.model_level, 'utl', - package.dfn_list, package.file_type)) - for model_key, model in sim_struct.model_struct_objs.items(): - package_list.append( - (model.name_file_struct_obj, PackageLevel.model_level, model_key, - model.name_file_struct_obj.dfn_list, - model.name_file_struct_obj.file_type)) - for package in model.package_struct_objs.values(): - package_list.append((package, PackageLevel.model_level, - model_key, package.dfn_list, - package.file_type)) - - util_path, tail = os.path.split(os.path.realpath(__file__)) - init_file = open(os.path.join(util_path, '..', 'modflow', '__init__.py'), - 'w') - init_file.write('# imports\n') - init_file.write('from .mfsimulation import MFSimulation\n') - - nam_import_string = 'from .. import mfmodel\nfrom ..data.mfdatautil ' \ - 'import ListTemplateGenerator, ArrayTemplateGenerator' - - # loop through packages list - for package in package_list: - data_structure_dict = {} - package_properties = [] - init_vars = [] - init_param_list = [] - set_param_list = [] - class_vars = [] - template_gens = [] - dfn_string = build_dfn_string(package[3]) - package_abbr = clean_class_string( - '{}{}'.format(clean_class_string(package[2]), - package[0].file_type)).lower() - package_name = clean_class_string( - '{}{}{}'.format(clean_class_string(package[2]), - package[0].file_prefix, - package[0].file_type)).lower() - if package[0].description: - doc_string = mfdatautil.MFDocString(package[0].description) - else: - if package[2]: - package_container_text = ' within a {} model'.format( - package[2]) - else: - package_container_text = '' - doc_string = mfdatautil.MFDocString( - 'Modflow{} defines a {} package' - '{}.'.format(package_name.title(), - package[0].file_type, - package_container_text)) - - if package[0].dfn_type == mfstructure.DfnType.exch_file: - add_var(init_vars, None, init_param_list, package_properties, - doc_string, data_structure_dict, None, - 'exgtype', 'exgtype', - build_doc_string('exgtype', '', - 'is the exchange type (GWF-GWF or ' - 'GWF-GWT).', indent), None, None, True) - add_var(init_vars, None, init_param_list, package_properties, - doc_string, data_structure_dict, None, - 'exgmnamea', 'exgmnamea', - build_doc_string('exgmnamea', '', - 'is the name of the first model that is ' - 'part of this exchange.', indent), - None, None, True) - add_var(init_vars, None, init_param_list, package_properties, - doc_string, data_structure_dict, None, - 'exgmnameb', 'exgmnameb', - build_doc_string('exgmnameb', '', - 'is the name of the second model that is ' - 'part of this exchange.', indent), - None, None, True) - init_vars.append( - ' simulation.register_exchange_file(self)\n') - - # loop through all blocks - for block in package[0].blocks.values(): - for data_structure in block.data_structures.values(): - # only create one property for each unique data structure name - if data_structure.name not in data_structure_dict: - tg = add_var( - init_vars, class_vars, init_param_list, - package_properties, doc_string, data_structure_dict, - data_structure.default_value, data_structure.name, - data_structure.python_name, - data_structure.get_doc_string(79, indent, indent), - data_structure.path, data_structure.get_datatype(), - False, data_structure.construct_package, - data_structure.construct_data, - data_structure.parameter_name, set_param_list) - if tg is not None and tg not in template_gens: - template_gens.append(tg) - - import_string = 'from .. import mfpackage' - if template_gens: - import_string = '{}\nfrom ..data.mfdatautil import' \ - ' '.format(import_string) - first_string = True - for template in template_gens: - if first_string: - import_string = '{}{}'.format(import_string, template) - first_string = False - else: - import_string = '{}, {}'.format(import_string, template) - # add extra docstrings for additional variables - doc_string.add_parameter(' filename : String\n ' - 'File name for this package.') - doc_string.add_parameter(' pname : String\n ' - 'Package name for this package.') - doc_string.add_parameter(' parent_file : MFPackage\n ' - 'Parent package file that references this ' - 'package. Only needed for\n utility ' - 'packages (mfutl*). For example, mfutllaktab ' - 'package must have \n a mfgwflak ' - 'package parent_file.') - - # build package builder class string - init_vars.append(' self._init_complete = True') - init_vars = '\n'.join(init_vars) - package_short_name = clean_class_string(package[0].file_type).lower() - class_def_string = 'class Modflow{}(mfpackage.MFPackage):\n'.format( - package_name.title()) - class_def_string = class_def_string.replace('-', '_') - class_var_string = '{}\n package_abbr = "{}"\n _package_type = ' \ - '"{}"\n dfn_file_name = "{}"' \ - '\n'.format('\n'.join(class_vars), package_abbr, - package[4], package[0].dfn_file_name) - init_string_full = init_string_def - init_string_model = '{}, simulation'.format(init_string_def) - # add variables to init string - doc_string.add_parameter(' loading_package : bool\n ' - 'Do not set this parameter. It is intended ' - 'for debugging and internal\n ' - 'processing purposes only.', - beginning_of_list=True) - if package[1] == PackageLevel.sim_level: - doc_string.add_parameter(' simulation : MFSimulation\n ' - 'Simulation that this package is a part ' - 'of. Package is automatically\n ' - 'added to simulation when it is ' - 'initialized.', beginning_of_list=True) - init_string_full = '{}, simulation, loading_package=' \ - 'False'.format(init_string_full) - else: - doc_string.add_parameter(' model : MFModel\n ' - 'Model that this package is a part of. ' - 'Package is automatically\n added ' - 'to model when it is initialized.', - beginning_of_list=True) - init_string_full = '{}, model, loading_package=False'.format( - init_string_full) - init_param_list.append('filename=None') - init_param_list.append('pname=None') - init_param_list.append('parent_file=None') - init_string_full = build_init_string(init_string_full, init_param_list) - - # build init code - if package[1] == PackageLevel.sim_level: - init_var = 'simulation' - else: - init_var = 'model' - parent_init_string = ' super(Modflow{}, self)' \ - '.__init__('.format(package_name.title()) - spaces = ' ' * len(parent_init_string) - parent_init_string = '{}{}, "{}", filename, pname,\n{}' \ - 'loading_package, parent_file)\n\n' \ - ' # set up variables'.format( - parent_init_string, init_var, package_short_name, spaces) - comment_string = '# DO NOT MODIFY THIS FILE DIRECTLY. THIS FILE ' \ - 'MUST BE CREATED BY\n# mf6/utils/createpackages.py' - # assemble full package string - package_string = '{}\n{}\n\n\n{}{}\n{}\n{}\n\n{}{}\n{}\n'.format( - comment_string, import_string, class_def_string, - doc_string.get_doc_string(), class_var_string, dfn_string, - init_string_full, parent_init_string, init_vars) - - # open new Packages file - pb_file = open(os.path.join(util_path, '..', 'modflow', - 'mf{}.py'.format(package_name)), 'w') - pb_file.write(package_string) - - if package[2] == 'utl' and package_abbr != 'utltab': - set_param_list.append('filename=filename') - set_param_list.append('pname=pname') - set_param_list.append('parent_file=self._cpparent') - whsp_1 = ' ' - whsp_2 = ' ' - - chld_doc_string = ' """\n Utl{}Packages is a container ' \ - 'class for the ModflowUtl{} class.\n\n ' \ - 'Methods\n ----------' \ - '\n'.format(package_short_name, - package_short_name) - - # write out child packages class - chld_cls = '\n\nclass Utl{}Packages(mfpackage.MFChildPackage' \ - 's):\n'.format(package_short_name) - chld_var = ' package_abbr = "utl{}packages"\n\n'.format( - package_short_name) - chld_init = ' def initialize(self' - chld_init = build_init_string(chld_init, init_param_list[:-1], - whsp_1) - init_pkg = '\n self._init_package(new_package, filename)' - params_init = ' new_package = ModflowUtl{}(' \ - 'self._model'.format(package_short_name) - params_init = build_init_string(params_init, set_param_list, whsp_2) - chld_doc_string = '{} initialize\n Initializes a new ' \ - 'ModflowUtl{} package removing any sibling ' \ - 'child\n packages attached to the same ' \ - 'parent package. See ModflowUtl{} init\n ' \ - ' documentation for definition of ' \ - 'parameters.\n'.format(chld_doc_string, - package_short_name, - package_short_name) - - chld_appn = '' - params_appn = '' - append_pkg = '' - if package_abbr != 'utlobs': # Hard coded obs no multi-pkg support - chld_appn = '\n\n def append_package(self' - chld_appn = build_init_string(chld_appn, init_param_list[:-1], - whsp_1) - append_pkg = '\n self._append_package(new_package, ' \ - 'filename)' - params_appn = ' new_package = ModflowUtl{}(' \ - 'self._model'.format(package_short_name) - params_appn = build_init_string(params_appn, set_param_list, - whsp_2) - chld_doc_string = '{} append_package\n Adds a ' \ - 'new ModflowUtl{} package to the container.' \ - ' See ModflowUtl{}\n init ' \ - 'documentation for definition of ' \ - 'parameters.\n'.format(chld_doc_string, - package_short_name, - package_short_name) - chld_doc_string = '{} """\n'.format(chld_doc_string) - packages_str = '{}{}{}{}{}{}{}{}{}\n'.format(chld_cls, - chld_doc_string, - chld_var, chld_init, - params_init[:-2], - init_pkg, chld_appn, - params_appn[:-2], - append_pkg,) - pb_file.write(packages_str) - pb_file.close() - - init_file.write('from .mf{} import ' - 'Modflow{}\n'.format(package_name, - package_name.title())) - - if package[0].dfn_type == mfstructure.DfnType.model_name_file: - # build model file - model_param_list = init_param_list[:-3] - init_vars = build_model_init_vars(model_param_list) - - model_param_list.insert(0, "model_rel_path='.'") - model_param_list.insert(0, "exe_name='mf6.exe'") - model_param_list.insert(0, "version='mf6'") - model_param_list.insert(0, 'model_nam_file=None') - model_param_list.insert(0, "modelname='model'") - model_param_list.append("**kwargs") - init_string_model = build_init_string(init_string_model, - model_param_list) - model_name = clean_class_string(package[2]) - class_def_string = 'class Modflow{}(mfmodel.MFModel):\n'.format( - model_name.capitalize()) - class_def_string = class_def_string.replace('-', '_') - doc_string.add_parameter(' sim : MFSimulation\n ' - 'Simulation that this model is a part ' - 'of. Model is automatically\n ' - 'added to simulation when it is ' - 'initialized.', - beginning_of_list=True, - model_parameter=True) - doc_string.description = 'Modflow{} defines a {} model'.format( - model_name, model_name) - class_var_string = " model_type = '{}'\n".format(model_name) - mparent_init_string = ' super(Modflow{}, self)' \ - '.__init__('.format(model_name.capitalize()) - spaces = ' ' * len(mparent_init_string) - mparent_init_string = "{}simulation, model_type='{}6',\n{}" \ - "modelname=modelname,\n{}" \ - "model_nam_file=model_nam_file,\n{}" \ - "version=version, exe_name=exe_name,\n{}" \ - "model_rel_path=model_rel_path,\n{}" \ - "**kwargs" \ - ")\n".format(mparent_init_string, model_name, - spaces, - spaces, spaces, spaces, spaces) - load_txt, doc_text = build_model_load(model_name) - package_string = '{}\n{}\n\n\n{}{}\n{}\n{}\n{}{}\n{}\n\n{}'.format( - comment_string, nam_import_string, class_def_string, - doc_string.get_doc_string(True), doc_text, class_var_string, - init_string_model, mparent_init_string, init_vars, load_txt) - md_file = open(os.path.join(util_path, '..', 'modflow', - 'mf{}.py'.format(model_name)), - 'w') - md_file.write(package_string) - md_file.close() - init_file.write('from .mf{} import ' - 'Modflow{}\n'.format(model_name, - model_name.capitalize())) - init_file.close() - - -if __name__ == '__main__': - create_packages() +import os +import textwrap +from enum import Enum +from flopy.mf6.data import mfstructure, mfdatautil +from flopy.utils import datautil + +""" +createpackages.py is a utility script that reads in the file definition +metadata in the .dfn files to create the package classes in the modflow folder. +Run this script any time changes are made to the .dfn files. +""" + + +class PackageLevel(Enum): + sim_level = 0 + model_level = 1 + + +def build_doc_string(param_name, param_type, param_desc, indent): + return '{}{} : {}\n{}* {}'.format(indent, param_name, param_type, indent*2, + param_desc) + + +def generator_type(data_type): + if data_type == mfstructure.DataType.scalar_keyword or \ + data_type == mfstructure.DataType.scalar: + # regular scalar + return 'ScalarTemplateGenerator' + elif data_type == mfstructure.DataType.scalar_keyword_transient or \ + data_type == mfstructure.DataType.scalar_transient: + # transient scalar + return 'ScalarTemplateGenerator' + elif data_type == mfstructure.DataType.array: + # array + return 'ArrayTemplateGenerator' + elif data_type == mfstructure.DataType.array_transient: + # transient array + return 'ArrayTemplateGenerator' + elif data_type == mfstructure.DataType.list: + # list + return 'ListTemplateGenerator' + elif data_type == mfstructure.DataType.list_transient or \ + data_type == mfstructure.DataType.list_multiple: + # transient or multiple list + return 'ListTemplateGenerator' + + +def clean_class_string(name): + if len(name) > 0: + clean_string = name.replace(' ', '_') + clean_string = clean_string.replace('-', '_') + version = mfstructure.MFStructure().get_version_string() + # FIX: remove all numbers + if clean_string[-1] == version: + clean_string = clean_string[:-1] + return clean_string + return name + + +def build_dfn_string(dfn_list): + dfn_string = ' dfn = [' + line_length = len(dfn_string) + leading_spaces = ' ' * line_length + first_di = True + # process all data items + for data_item in dfn_list: + line_length += 1 + if not first_di: + dfn_string = '{},\n{}'.format(dfn_string, leading_spaces) + line_length = len(leading_spaces) + else: + first_di = False + dfn_string = '{}{}'.format(dfn_string, '[') + first_line = True + # process each line in a data item + for line in data_item: + line = line.strip() + # do not include the description of longname + if not line.lower().startswith('description') and \ + not line.lower().startswith('longname'): + line = line.replace('"', "'") + line_length += len(line) + 4 + if not first_line: + dfn_string = '{},'.format(dfn_string) + if line_length < 77: + # added text fits on the current line + if first_line: + dfn_string = '{}"{}"'.format(dfn_string, line) + else: + dfn_string = '{} "{}"'.format(dfn_string, line) + else: + # added text does not fit on the current line + line_length = len(line) + len(leading_spaces) + 2 + if line_length > 79: + # added text too long to fit on a single line, wrap + # text as needed + line = '"{}"'.format(line) + lines = textwrap.wrap(line, 75 - len(leading_spaces), + drop_whitespace = True) + lines[0] = '{} {}'.format(leading_spaces, lines[0]) + line_join = ' "\n{} "'.format(leading_spaces) + dfn_string = '{}\n{}'.format(dfn_string, + line_join.join(lines)) + else: + dfn_string = '{}\n{} "{}"'.format(dfn_string, + leading_spaces, line) + first_line = False + + dfn_string = '{}{}'.format(dfn_string, ']') + dfn_string = '{}{}'.format(dfn_string, ']') + return dfn_string + + +def create_init_var(clean_ds_name, data_structure_name, init_val=None): + if init_val is None: + init_val = clean_ds_name + + init_var = ' self.{} = self.build_mfdata('.format(clean_ds_name) + leading_spaces = ' ' * len(init_var) + if len(init_var) + len(data_structure_name) + 2 > 79: + second_line = '\n "{}",'.format(data_structure_name) + if len(second_line) + len(clean_ds_name) + 2 > 79: + init_var = '{}{}\n {})'.format(init_var, second_line, + init_val) + else: + init_var = '{}{} {})'.format(init_var, second_line, init_val) + else: + init_var = '{}"{}",'.format(init_var, data_structure_name) + if len(init_var) + len(clean_ds_name) + 2 > 79: + init_var = '{}\n{}{})'.format(init_var, leading_spaces, + init_val) + else: + init_var = '{} {})'.format(init_var, init_val) + return init_var + + +def create_basic_init(clean_ds_name): + return ' self.{} = {}\n'.format(clean_ds_name, clean_ds_name) + + +def create_property(clean_ds_name): + return " {} = property(get_{}, set_{}" \ + ")".format(clean_ds_name, + clean_ds_name, + clean_ds_name) + + +def format_var_list(base_string, var_list, is_tuple=False): + if is_tuple: + base_string = '{}('.format(base_string) + extra_chars = 4 + else: + extra_chars = 2 + line_length = len(base_string) + leading_spaces = ' ' * line_length + # determine if any variable name is too long to fit + for item in var_list: + if line_length + len(item) + extra_chars > 80: + leading_spaces = ' ' + base_string = '{}\n{}'.format(base_string, leading_spaces) + line_length = len(leading_spaces) + break + + for index, item in enumerate(var_list): + if is_tuple: + item = "'{}'".format(item) + if index == len(var_list) - 1: + next_var_str = item + else: + next_var_str = '{},'.format(item) + line_length += len(item) + extra_chars + if line_length > 80: + base_string = '{}\n{}{}'.format(base_string, leading_spaces, + next_var_str) + else: + if base_string[-1] == ',': + base_string = '{} '.format(base_string) + base_string = '{}{}'.format(base_string, next_var_str) + if is_tuple: + return '{}))'.format(base_string) + else: + return '{})'.format(base_string) + + +def create_package_init_var(parameter_name, package_abbr, data_name): + one_line = ' self._{}_package = self.build_child_package('\ + .format(package_abbr) + one_line_b = '"{}", {},'.format(package_abbr, parameter_name) + leading_spaces = ' ' * len(one_line) + two_line = '\n{}"{}",'.format(leading_spaces, data_name) + three_line = '\n{}self._{}_filerecord)'.format(leading_spaces, package_abbr) + return '{}{}{}{}'.format(one_line, one_line_b, two_line, three_line) + + +def add_var(init_vars, class_vars, init_param_list, package_properties, + doc_string, data_structure_dict, default_value, name, + python_name, description, path, data_type, + basic_init=False, construct_package=None, construct_data=None, + parameter_name=None, set_param_list=None): + if set_param_list is None: + set_param_list = [] + clean_ds_name = datautil.clean_name(python_name) + if construct_package is None: + # add variable initialization lines + if basic_init: + init_vars.append(create_basic_init(clean_ds_name)) + else: + init_vars.append(create_init_var(clean_ds_name, name)) + # add to parameter list + if default_value is None: + default_value = 'None' + init_param_list.append('{}={}'.format(clean_ds_name, default_value)) + # add to set parameter list + set_param_list.append('{}={}'.format(clean_ds_name, + clean_ds_name)) + else: + clean_parameter_name = datautil.clean_name(parameter_name) + # init hidden variable + init_vars.append(create_init_var('_{}'.format(clean_ds_name), name, + 'None')) + # init child package + init_vars.append(create_package_init_var(clean_parameter_name, + construct_package, + construct_data)) + # add to parameter list + init_param_list.append('{}=None'.format(clean_parameter_name)) + # add to set parameter list + set_param_list.append('{}={}'.format(clean_parameter_name, + clean_parameter_name)) + + package_properties.append(create_property(clean_ds_name)) + doc_string.add_parameter(description, model_parameter=True) + data_structure_dict[python_name] = 0 + if class_vars is not None: + gen_type = generator_type(data_type) + if gen_type != 'ScalarTemplateGenerator': + new_class_var = ' {} = {}('.format(clean_ds_name, + gen_type) + class_vars.append(format_var_list(new_class_var, path, True)) + return gen_type + return None + + +def build_init_string(init_string, init_param_list, + whitespace=' '): + line_chars = len(init_string) + for index, param in enumerate(init_param_list): + if index + 1 < len(init_param_list): + line_chars += len(param) + 2 + else: + line_chars += len(param) + 3 + if line_chars > 79: + if len(param) + len(whitespace) + 1 > 79: + # try to break apart at = sign + param_list = param.split('=') + if len(param_list) == 2: + init_string = '{},\n{}{}=\n{}{}'.format( + init_string, whitespace, param_list[0], whitespace, + param_list[1]) + line_chars = len(param_list[1]) + len(whitespace) + 1 + continue + init_string = '{},\n{}{}'.format( + init_string, whitespace, param) + line_chars = len(param) + len(whitespace) + 1 + else: + init_string = '{}, {}'.format(init_string, param) + return '{}):\n'.format(init_string) + + +def build_model_load(model_type): + model_load_c = ' Methods\n -------\n' \ + ' load : (simulation : MFSimulationData, model_name : ' \ + 'string,\n namfile : string, ' \ + 'version : string, exe_name : string,\n model_ws : '\ + 'string, strict : boolean) : MFSimulation\n' \ + ' a class method that loads a model from files' \ + '\n """' + + model_load = " @classmethod\n def load(cls, simulation, structure, "\ + "modelname='NewModel',\n " \ + "model_nam_file='modflowtest.nam', version='mf6',\n" \ + " exe_name='mf6.exe', strict=True, " \ + "model_rel_path='.',\n" \ + " load_only=None):\n " \ + "return mfmodel.MFModel.load_base(simulation, structure, " \ + "modelname,\n " \ + "model_nam_file, '{}', version,\n" \ + " exe_name, strict, "\ + "model_rel_path,\n" \ + " load_only)" \ + "\n".format(model_type) + return model_load, model_load_c + + +def build_model_init_vars(param_list): + init_var_list = [] + for param in param_list: + param_parts = param.split('=') + init_var_list.append(' self.name_file.{}.set_data({}' + ')'.format(param_parts[0], param_parts[0])) + return '\n'.join(init_var_list) + + +def create_packages(): + indent = ' ' + init_string_def = ' def __init__(self' + + # load JSON file + file_structure = mfstructure.MFStructure(load_from_dfn_files=True) + sim_struct = file_structure.sim_struct + + # assemble package list of buildable packages + package_list = [] + package_list.append( + (sim_struct.name_file_struct_obj, PackageLevel.sim_level, '', + sim_struct.name_file_struct_obj.dfn_list, + sim_struct.name_file_struct_obj.file_type)) + for package in sim_struct.package_struct_objs.values(): + # add simulation level package to list + package_list.append((package, PackageLevel.sim_level, '', + package.dfn_list, package.file_type)) + for package in sim_struct.utl_struct_objs.values(): + # add utility packages to list + package_list.append((package, PackageLevel.model_level, 'utl', + package.dfn_list, package.file_type)) + for model_key, model in sim_struct.model_struct_objs.items(): + package_list.append( + (model.name_file_struct_obj, PackageLevel.model_level, model_key, + model.name_file_struct_obj.dfn_list, + model.name_file_struct_obj.file_type)) + for package in model.package_struct_objs.values(): + package_list.append((package, PackageLevel.model_level, + model_key, package.dfn_list, + package.file_type)) + + util_path, tail = os.path.split(os.path.realpath(__file__)) + init_file = open(os.path.join(util_path, '..', 'modflow', '__init__.py'), + 'w') + init_file.write('# imports\n') + init_file.write('from .mfsimulation import MFSimulation\n') + + nam_import_string = 'from .. import mfmodel\nfrom ..data.mfdatautil ' \ + 'import ListTemplateGenerator, ArrayTemplateGenerator' + + # loop through packages list + for package in package_list: + data_structure_dict = {} + package_properties = [] + init_vars = [] + init_param_list = [] + set_param_list = [] + class_vars = [] + template_gens = [] + dfn_string = build_dfn_string(package[3]) + package_abbr = clean_class_string( + '{}{}'.format(clean_class_string(package[2]), + package[0].file_type)).lower() + package_name = clean_class_string( + '{}{}{}'.format(clean_class_string(package[2]), + package[0].file_prefix, + package[0].file_type)).lower() + if package[0].description: + doc_string = mfdatautil.MFDocString(package[0].description) + else: + if package[2]: + package_container_text = ' within a {} model'.format( + package[2]) + else: + package_container_text = '' + doc_string = mfdatautil.MFDocString( + 'Modflow{} defines a {} package' + '{}.'.format(package_name.title(), + package[0].file_type, + package_container_text)) + + if package[0].dfn_type == mfstructure.DfnType.exch_file: + add_var(init_vars, None, init_param_list, package_properties, + doc_string, data_structure_dict, None, + 'exgtype', 'exgtype', + build_doc_string('exgtype', '', + 'is the exchange type (GWF-GWF or ' + 'GWF-GWT).', indent), None, None, True) + add_var(init_vars, None, init_param_list, package_properties, + doc_string, data_structure_dict, None, + 'exgmnamea', 'exgmnamea', + build_doc_string('exgmnamea', '', + 'is the name of the first model that is ' + 'part of this exchange.', indent), + None, None, True) + add_var(init_vars, None, init_param_list, package_properties, + doc_string, data_structure_dict, None, + 'exgmnameb', 'exgmnameb', + build_doc_string('exgmnameb', '', + 'is the name of the second model that is ' + 'part of this exchange.', indent), + None, None, True) + init_vars.append( + ' simulation.register_exchange_file(self)\n') + + # loop through all blocks + for block in package[0].blocks.values(): + for data_structure in block.data_structures.values(): + # only create one property for each unique data structure name + if data_structure.name not in data_structure_dict: + tg = add_var( + init_vars, class_vars, init_param_list, + package_properties, doc_string, data_structure_dict, + data_structure.default_value, data_structure.name, + data_structure.python_name, + data_structure.get_doc_string(79, indent, indent), + data_structure.path, data_structure.get_datatype(), + False, data_structure.construct_package, + data_structure.construct_data, + data_structure.parameter_name, set_param_list) + if tg is not None and tg not in template_gens: + template_gens.append(tg) + + import_string = 'from .. import mfpackage' + if template_gens: + import_string = '{}\nfrom ..data.mfdatautil import' \ + ' '.format(import_string) + first_string = True + for template in template_gens: + if first_string: + import_string = '{}{}'.format(import_string, template) + first_string = False + else: + import_string = '{}, {}'.format(import_string, template) + # add extra docstrings for additional variables + doc_string.add_parameter(' filename : String\n ' + 'File name for this package.') + doc_string.add_parameter(' pname : String\n ' + 'Package name for this package.') + doc_string.add_parameter(' parent_file : MFPackage\n ' + 'Parent package file that references this ' + 'package. Only needed for\n utility ' + 'packages (mfutl*). For example, mfutllaktab ' + 'package must have \n a mfgwflak ' + 'package parent_file.') + + # build package builder class string + init_vars.append(' self._init_complete = True') + init_vars = '\n'.join(init_vars) + package_short_name = clean_class_string(package[0].file_type).lower() + class_def_string = 'class Modflow{}(mfpackage.MFPackage):\n'.format( + package_name.title()) + class_def_string = class_def_string.replace('-', '_') + class_var_string = '{}\n package_abbr = "{}"\n _package_type = ' \ + '"{}"\n dfn_file_name = "{}"' \ + '\n'.format('\n'.join(class_vars), package_abbr, + package[4], package[0].dfn_file_name) + init_string_full = init_string_def + init_string_model = '{}, simulation'.format(init_string_def) + # add variables to init string + doc_string.add_parameter(' loading_package : bool\n ' + 'Do not set this parameter. It is intended ' + 'for debugging and internal\n ' + 'processing purposes only.', + beginning_of_list=True) + if package[1] == PackageLevel.sim_level: + doc_string.add_parameter(' simulation : MFSimulation\n ' + 'Simulation that this package is a part ' + 'of. Package is automatically\n ' + 'added to simulation when it is ' + 'initialized.', beginning_of_list=True) + init_string_full = '{}, simulation, loading_package=' \ + 'False'.format(init_string_full) + else: + doc_string.add_parameter(' model : MFModel\n ' + 'Model that this package is a part of. ' + 'Package is automatically\n added ' + 'to model when it is initialized.', + beginning_of_list=True) + init_string_full = '{}, model, loading_package=False'.format( + init_string_full) + init_param_list.append('filename=None') + init_param_list.append('pname=None') + init_param_list.append('parent_file=None') + init_string_full = build_init_string(init_string_full, init_param_list) + + # build init code + if package[1] == PackageLevel.sim_level: + init_var = 'simulation' + else: + init_var = 'model' + parent_init_string = ' super(Modflow{}, self)' \ + '.__init__('.format(package_name.title()) + spaces = ' ' * len(parent_init_string) + parent_init_string = '{}{}, "{}", filename, pname,\n{}' \ + 'loading_package, parent_file)\n\n' \ + ' # set up variables'.format( + parent_init_string, init_var, package_short_name, spaces) + comment_string = '# DO NOT MODIFY THIS FILE DIRECTLY. THIS FILE ' \ + 'MUST BE CREATED BY\n# mf6/utils/createpackages.py' + # assemble full package string + package_string = '{}\n{}\n\n\n{}{}\n{}\n{}\n\n{}{}\n{}\n'.format( + comment_string, import_string, class_def_string, + doc_string.get_doc_string(), class_var_string, dfn_string, + init_string_full, parent_init_string, init_vars) + + # open new Packages file + pb_file = open(os.path.join(util_path, '..', 'modflow', + 'mf{}.py'.format(package_name)), 'w') + pb_file.write(package_string) + + if package[2] == 'utl' and package_abbr != 'utltab': + set_param_list.append('filename=filename') + set_param_list.append('pname=pname') + set_param_list.append('parent_file=self._cpparent') + whsp_1 = ' ' + whsp_2 = ' ' + + chld_doc_string = ' """\n Utl{}Packages is a container ' \ + 'class for the ModflowUtl{} class.\n\n ' \ + 'Methods\n ----------' \ + '\n'.format(package_short_name, + package_short_name) + + # write out child packages class + chld_cls = '\n\nclass Utl{}Packages(mfpackage.MFChildPackage' \ + 's):\n'.format(package_short_name) + chld_var = ' package_abbr = "utl{}packages"\n\n'.format( + package_short_name) + chld_init = ' def initialize(self' + chld_init = build_init_string(chld_init, init_param_list[:-1], + whsp_1) + init_pkg = '\n self._init_package(new_package, filename)' + params_init = ' new_package = ModflowUtl{}(' \ + 'self._model'.format(package_short_name) + params_init = build_init_string(params_init, set_param_list, whsp_2) + chld_doc_string = '{} initialize\n Initializes a new ' \ + 'ModflowUtl{} package removing any sibling ' \ + 'child\n packages attached to the same ' \ + 'parent package. See ModflowUtl{} init\n ' \ + ' documentation for definition of ' \ + 'parameters.\n'.format(chld_doc_string, + package_short_name, + package_short_name) + + chld_appn = '' + params_appn = '' + append_pkg = '' + if package_abbr != 'utlobs': # Hard coded obs no multi-pkg support + chld_appn = '\n\n def append_package(self' + chld_appn = build_init_string(chld_appn, init_param_list[:-1], + whsp_1) + append_pkg = '\n self._append_package(new_package, ' \ + 'filename)' + params_appn = ' new_package = ModflowUtl{}(' \ + 'self._model'.format(package_short_name) + params_appn = build_init_string(params_appn, set_param_list, + whsp_2) + chld_doc_string = '{} append_package\n Adds a ' \ + 'new ModflowUtl{} package to the container.' \ + ' See ModflowUtl{}\n init ' \ + 'documentation for definition of ' \ + 'parameters.\n'.format(chld_doc_string, + package_short_name, + package_short_name) + chld_doc_string = '{} """\n'.format(chld_doc_string) + packages_str = '{}{}{}{}{}{}{}{}{}\n'.format(chld_cls, + chld_doc_string, + chld_var, chld_init, + params_init[:-2], + init_pkg, chld_appn, + params_appn[:-2], + append_pkg,) + pb_file.write(packages_str) + pb_file.close() + + init_file.write('from .mf{} import ' + 'Modflow{}\n'.format(package_name, + package_name.title())) + + if package[0].dfn_type == mfstructure.DfnType.model_name_file: + # build model file + model_param_list = init_param_list[:-3] + init_vars = build_model_init_vars(model_param_list) + + model_param_list.insert(0, "model_rel_path='.'") + model_param_list.insert(0, "exe_name='mf6.exe'") + model_param_list.insert(0, "version='mf6'") + model_param_list.insert(0, 'model_nam_file=None') + model_param_list.insert(0, "modelname='model'") + model_param_list.append("**kwargs") + init_string_model = build_init_string(init_string_model, + model_param_list) + model_name = clean_class_string(package[2]) + class_def_string = 'class Modflow{}(mfmodel.MFModel):\n'.format( + model_name.capitalize()) + class_def_string = class_def_string.replace('-', '_') + doc_string.add_parameter(' sim : MFSimulation\n ' + 'Simulation that this model is a part ' + 'of. Model is automatically\n ' + 'added to simulation when it is ' + 'initialized.', + beginning_of_list=True, + model_parameter=True) + doc_string.description = 'Modflow{} defines a {} model'.format( + model_name, model_name) + class_var_string = " model_type = '{}'\n".format(model_name) + mparent_init_string = ' super(Modflow{}, self)' \ + '.__init__('.format(model_name.capitalize()) + spaces = ' ' * len(mparent_init_string) + mparent_init_string = "{}simulation, model_type='{}6',\n{}" \ + "modelname=modelname,\n{}" \ + "model_nam_file=model_nam_file,\n{}" \ + "version=version, exe_name=exe_name,\n{}" \ + "model_rel_path=model_rel_path,\n{}" \ + "**kwargs" \ + ")\n".format(mparent_init_string, model_name, + spaces, + spaces, spaces, spaces, spaces) + load_txt, doc_text = build_model_load(model_name) + package_string = '{}\n{}\n\n\n{}{}\n{}\n{}\n{}{}\n{}\n\n{}'.format( + comment_string, nam_import_string, class_def_string, + doc_string.get_doc_string(True), doc_text, class_var_string, + init_string_model, mparent_init_string, init_vars, load_txt) + md_file = open(os.path.join(util_path, '..', 'modflow', + 'mf{}.py'.format(model_name)), + 'w') + md_file.write(package_string) + md_file.close() + init_file.write('from .mf{} import ' + 'Modflow{}\n'.format(model_name, + model_name.capitalize())) + init_file.close() + + +if __name__ == '__main__': + create_packages() diff --git a/flopy/mf6/utils/mfenums.py b/flopy/mf6/utils/mfenums.py index 3b0b0a164c..e8543e08e7 100644 --- a/flopy/mf6/utils/mfenums.py +++ b/flopy/mf6/utils/mfenums.py @@ -1,12 +1,12 @@ -from enum import Enum - - -class DiscretizationType(Enum): - """ - Enumeration of discretization types - """ - UNDEFINED = 0 - DIS = 1 - DISV = 2 - DISU = 3 - DISL = 4 +from enum import Enum + + +class DiscretizationType(Enum): + """ + Enumeration of discretization types + """ + UNDEFINED = 0 + DIS = 1 + DISV = 2 + DISU = 3 + DISL = 4 diff --git a/flopy/mf6/utils/mfobservation.py b/flopy/mf6/utils/mfobservation.py index 8c81232cb4..1529087366 100644 --- a/flopy/mf6/utils/mfobservation.py +++ b/flopy/mf6/utils/mfobservation.py @@ -1,480 +1,480 @@ -import numpy as np -import csv - -def try_float(data): - try: - data = float(data) - except ValueError: - pass - return data - - -class MFObservation: - ''' - Wrapper class to request the MFObservation object: - Class is called by the MFSimulation.SimulationDict() class and is not - called by the user - - Inputs: - ------- - mfdict: (dict) the sim.simulation_dict.mfdict object for the flopy project - path: (object) the path object detailing model names and paths - key: (tuple, stings) user supplied dictionary key to request observation - utility data - - Returns: - --------\ - self.data: (xarray) array of observations - ''' - def __init__(self, mfdict, path, key): - self.mfdict = mfdict - data = MFObservationRequester(mfdict, path, key) - try: - self.data = data.query_observation_data - except AttributeError: - self.data = np.array([[]]) - - def __iter__(self): - yield self.data - - def __getitem__(self, index): - self.data = self.data[index] - return self.data - - -class Observations: - ''' - Simple class to extract and view Observation files for Uzf models - (possibly all obs/hobs)? - - Input: - ------ - fi = (sting) name of the observation binary output file - - Methods: - -------- - get_data(): (np.array) returns array of observation data - parameters: - ----------- - text = (str) specific modflow record name contained in Obs.out file - idx = (int), (slice(start, stop)) integer or slice of data to be - returned. corresponds to kstp*kper - 1 - totim = (float) model time value to return data from - - list_records(): prints a list of all valid record names contained within - the Obs.out file - get_times(): (list) returns list of time values contained in Obs.out - get_nrecords(): (int) returns number of records - get_ntimes(): (int) returns number of times - get_nobs(): (int) returns total number of observations (ntimes * nrecords) - - ''' - def __init__(self, fi): - self.Obsname = fi - - def _reader(self, fi): - # observation file reader is a standard csv reader that we try to - # convert each entry to floating point - with open(fi) as f: - reader = csv.reader(f) - data = [[try_float(point) for point in line] for line in reader] - return np.array(data) - - def _array_to_dict(self, data, key=None): - # convert np.array to dictionary of observation names and data - data = data.T - data = {line[0]: [try_float(point) for point in line[1:]] - for line in data} - if key is None: - return data - else: - return data[key] - - def list_records(self): - # requester option to list all records (observation names) within an - # observation file - data_str = self._reader(self.Obsname) - data = self._array_to_dict(data_str) - for key in data: - print(key) - - def get_data(self, key=None, idx=None, totim=None): - ''' - Method to request and return array of data from an Observation - output file - - Parameters - ---------- - key: (str) dictionary key for a specific observation contained within - the observation file (optional) - idx: (int) time index (optional) - totim: (float) simulation time (optional) - - Returns - ------- - data: (list) observation file data in list - ''' - data = self._reader(self.Obsname) - - # check if user supplied observation key, default is to return - # all observations - if key is None: - header = data[0] - if idx is not None: - data = data[idx, :] - elif totim is not None: - try: - times = self.get_times() - idx = times.index(totim) - data = data[idx, :] - except ValueError: - err = 'Invalid totim value provided: obs.get_times() ' \ - 'returns a list of valid times for totim = <>' - raise ValueError(err) - else: - pass - - else: - data = self._array_to_dict(data, key) - if idx is not None: - data = data[idx] - elif totim is not None: - try: - times = self.get_times() - idx = times.index(totim) - data = data[idx] - except ValueError: - err = 'Invalid totim value provided: obs.get_times() ' \ - 'returns a list of valid times for totim = <>' - raise ValueError(err) - else: - pass - return data - - def get_times(self): - return self.get_data(key='time') - - def get_nrecords(self): - data_str = self._reader(self.Obsname) - return len(self._array_to_dict(data_str)) - - def get_ntimes(self): - return len(self.get_times()) - - def get_nobs(self): - x = self.get_data().shape - prod = 1 - for i in x: - prod *= i - nrecords = self.get_nrecords() - ntimes = self.get_ntimes() - nobs = prod - ntimes - nrecords - return nobs - - def get_dataframe(self, keys=None, idx=None, totim=None, - start_datetime=None, timeunit='D'): - ''' - Creates a pandas dataframe object from the observation data, useful - backend if the user does not like the x-array format! - - Parameters - ---------- - keys: (string) sting of dictionary/observation keys separated by comma. - (optional) - idx: (int) time index location (optional) - totim: (float) simulation time (optional) - start_datetime: (string) format is 'dd/mm/yyyy' or - 'dd/mm/yyyy hh:mm:ss' (optional) - timeunit: (string) specifies the time unit associated with totim when - setting a datetime - - Returns - ------- - pd.DataFrame - - ''' - try: - import pandas as pd - except Exception as e: - print("this feature requires pandas") - return None - - data_str = self._reader(self.Obsname) - data = self._array_to_dict(data_str) - time = data['time'] - - if start_datetime is not None: - time = self._get_datetime(time, start_datetime, timeunit) - else: - pass - - # check to see if user supplied keys, if not get all observations, - # adjust for time if necessary. - if keys is None: - if idx is not None or totim is not None: - if totim is not None: - try: - times = self.get_times() - idx = times.index(totim) - except ValueError: - err = 'Invalid totim value provided: obs.get_times() '\ - 'returns a list of valid times for totim = <>' - raise ValueError(err) - - # use dictionary comprehension to create a set of pandas series - # that can be added to a pd.DataFrame - d = {key: pd.Series(data[key][idx], index=[time[idx]]) - for key in data if key != 'time'} - else: - d = {key: pd.Series(data[key], index=time) - for key in data if key != 'time'} - - else: - keys = self._key_list(keys) - for key in keys: - if key not in data: - raise KeyError('Supplied data key: {} is not ' - 'valid'.format(key)) - else: - pass - - if idx is not None or totim is not None: - if totim is not None: - try: - times = self.get_times() - idx = times.index(totim) - except ValueError: - err = 'Invalid totim value provided: obs.get_times() '\ - 'returns a list of valid times for totim\ - = <>' - raise ValueError(err) - - d = {key: pd.Series(data[key][idx], index=[time[idx]]) - for key in data if key != 'time' and key in keys} - else: - d = {key: pd.Series(data[key], index=time) - for key in data if key != 'time' and key in keys} - - # create dataframe from pd.Series dictionary - df = pd.DataFrame(d) - - return df - - def _key_list(self, keys): - # check if user supplied keys is single or multiple, string or list. - # Return a list of keys. - key_type = type(keys) - if key_type is str: - keys = keys.split(',') - keys = [key.strip(' ') for key in keys] - elif key_type is list: - pass - else: - err = 'Invalid key type: supply a string of keys separated by , ' \ - 'or a list of keys' - raise TypeError(err) - return keys - - def _get_datetime(self, times, start_dt, unit): - # use to create datetime objects for time in pandas dataFrames - import datetime as dt - - # check user supplied format of datetime, is it dd/mm/yyyy or - # dd/mm/yyyy hh:mm:ss? - if ':' in start_dt: - date, time = start_dt.split(' ') - dlist = date.split('/') - tlist = time.split(':') - else: - dlist = start_dt.split('/') - tlist = [0, 0, 0] - - # parse data from the datetime lists - try: - month = int(dlist[0]) - day = int(dlist[1]) - year = int(dlist[2]) - hour = int(tlist[0]) - minute = int(tlist[1]) - second = int(tlist[2]) - except IndexError: - err = 'please supply start_datetime in the format "dd/mm/yyyy ' \ - 'hh:mm:ss" or "dd/mm/yyyy"' - raise AssertionError(err) - - # create list of datetimes - t0 = dt.datetime(year, month, day, hour, minute, second) - if unit == 'Y': - dtlist = [dt.datetime(int(year + time), month, day, hour, minute, - second) for time in times] - elif unit == 'D': - dtlist = [t0+dt.timedelta(days=time) for time in times] - elif unit == 'H': - dtlist = [t0+dt.timedelta(hours=time) for time in times] - elif unit == 'M': - dtlist = [t0+dt.timedelta(minutes=time) for time in times] - elif unit == 'S': - dtlist = [t0+dt.timedelta(seconds=time) for time in times] - else: - raise TypeError('invalid time unit supplied') - - return dtlist - - def get_obs_data(self, key=None, idx=None, totim=None): - ''' - Method to request observation output data as an x-array - Parameters - ---------- - key: (string) dictionary key for a specific observation contained - within the observation file (optional) - idx: (int) time index (optional) - totim: (float) simulation time (optional) - - Returns - ------- - xarray.DataArray: (NxN) dimensions are totim, header == keys* - ''' - data = self.get_data(key=key, idx=idx, totim=totim) - # create x-array coordinates from time and header - totim = data.T[0][1:].astype(np.float) - header = data[0][1:].astype(np.str) - - # strip time and header off of data - data = data[1:, 1:].astype(np.float) - - return data - - -class MFObservationRequester: - ''' - Wrapper class for MFObservation.Observations. Class checks which - observation data is available, and creates a dictionary key to access - the set of observation data from the SimulationDict() - ''' - def __init__(self, mfdict, path, key, **kwargs): - self.mfdict = mfdict - self.path = path - self.obs_dataDict = {} - # check that observation files exist, create a key and path to them and - # set to self.obs_dataDict - self._check_for_observations() - - # check if user supplied dictionary key is valid, or if it is a dummy - # key for a key request. - if key in self.obs_dataDict: - modelpath = path.get_model_path(key[0]) - self.query_observation_data = \ - self._query_observation_data(modelpath, key) - return - - elif key == ('model', 'OBS8', 'IamAdummy'): - pass - - else: - err = '{} is not a valid dictionary key\n'.format(str(key)) - raise KeyError(err) - - def _query_observation_data(self, modelpath, key): - # get absolute path for observation data files - fi = modelpath + self.obs_dataDict[key] - # request observation data - Obs = Observations(fi) - data = Obs.get_obs_data() - return data - - def _check_for_observations(self): - ''' - Checks all entries of mfdict for the string - 'observation-input-filenames', finds path to file, creates - dictionary key to access observation output data. - - Returns - ------- - sets key: path to self.Obs_dataDict{} - - ''' - possible_observations = [k for k in self.mfdict - if 'observation-input-filename' in k and - 'FORMAT' not in k] - partial_key = [] - for k in possible_observations: - if self.mfdict[k] is not None: - partial_key.append([k[0], k[1]]) - - # check if there are multiple OBS8 files associated with this project - for line in partial_key: - check = partial_key.count(line) - if check > 1: - multi_observations = [i for i in partial_key if i == line] - for i in range(len(multi_observations)): - obs8_file = 'OBS8_{}'.format(i + 1) - # check for single observations, continuous observations - self._get_obsfile_names(multi_observations[i], obs8_file, - 'SINGLE') - self._get_obsfile_names(multi_observations[i], obs8_file, - 'CONTINUOUS') - - elif check <= 1: - for i in range(len(partial_key)): - self._get_obsfile_names(partial_key[i], 'OBS8', 'SINGLE') - self._get_obsfile_names(partial_key[i], 'OBS8', - 'CONTINUOUS') - - else: - raise KeyError('There are no observation files associated ' - 'with this project') - - def _get_obsfile_names(self, partial_key, OBS8, obstype): - ''' - Creates a data dictionary key for user to request data. This key holds - the path to the observation file - - Parameters - ---------- - partial_key: (list) partial dictionary key - OBS8: (string) OBS8 mfdict key name - obstype: (string) SINGLE or CONTINUOUS - - Returns: - -------- - sets key: path to self.obs_dataDict - - ''' - try: - obstypes = self.mfdict[(partial_key[0], partial_key[1], OBS8, - obstype, 'obstype')] - obspackage = self._get_package_type(obstypes) - obs_fname = self.mfdict[(partial_key[0], partial_key[1], OBS8, - obstype, 'obs_output_file_name')] - self.obs_dataDict[(partial_key[0], obspackage, obstype, - 'Observations')] = obs_fname - except KeyError: - pass - - def _get_package_type(self, obstypes): - # check the observation name in the OBS8 dictionary to get the - # package type - valid_packages = ('CHD', 'DRN', 'GHB', 'GWF', 'LAK', 'MAW', 'RIV', - 'SFR', 'UZF', 'WEL') - valid_gwf = ('head', 'drawdown', 'intercell-flow') - package = obstypes[0][:3].upper() - model = obstypes[0] - - if package in valid_packages: - return package - - elif model in valid_gwf: - return 'GWF' - - else: - raise KeyError('{} is not a valid observation ' - 'type'.format(package)) - - @staticmethod - def getkeys(mfdict, path): - # staticmethod to return a valid set of mfdict keys to the user to - # access this data - key = ('model', 'OBS8', 'IamAdummy') - x = MFObservationRequester(mfdict, path, key) - for key in x.obs_dataDict: - print(key) +import numpy as np +import csv + +def try_float(data): + try: + data = float(data) + except ValueError: + pass + return data + + +class MFObservation: + ''' + Wrapper class to request the MFObservation object: + Class is called by the MFSimulation.SimulationDict() class and is not + called by the user + + Inputs: + ------- + mfdict: (dict) the sim.simulation_dict.mfdict object for the flopy project + path: (object) the path object detailing model names and paths + key: (tuple, stings) user supplied dictionary key to request observation + utility data + + Returns: + --------\ + self.data: (xarray) array of observations + ''' + def __init__(self, mfdict, path, key): + self.mfdict = mfdict + data = MFObservationRequester(mfdict, path, key) + try: + self.data = data.query_observation_data + except AttributeError: + self.data = np.array([[]]) + + def __iter__(self): + yield self.data + + def __getitem__(self, index): + self.data = self.data[index] + return self.data + + +class Observations: + ''' + Simple class to extract and view Observation files for Uzf models + (possibly all obs/hobs)? + + Input: + ------ + fi = (sting) name of the observation binary output file + + Methods: + -------- + get_data(): (np.array) returns array of observation data + parameters: + ----------- + text = (str) specific modflow record name contained in Obs.out file + idx = (int), (slice(start, stop)) integer or slice of data to be + returned. corresponds to kstp*kper - 1 + totim = (float) model time value to return data from + + list_records(): prints a list of all valid record names contained within + the Obs.out file + get_times(): (list) returns list of time values contained in Obs.out + get_nrecords(): (int) returns number of records + get_ntimes(): (int) returns number of times + get_nobs(): (int) returns total number of observations (ntimes * nrecords) + + ''' + def __init__(self, fi): + self.Obsname = fi + + def _reader(self, fi): + # observation file reader is a standard csv reader that we try to + # convert each entry to floating point + with open(fi) as f: + reader = csv.reader(f) + data = [[try_float(point) for point in line] for line in reader] + return np.array(data) + + def _array_to_dict(self, data, key=None): + # convert np.array to dictionary of observation names and data + data = data.T + data = {line[0]: [try_float(point) for point in line[1:]] + for line in data} + if key is None: + return data + else: + return data[key] + + def list_records(self): + # requester option to list all records (observation names) within an + # observation file + data_str = self._reader(self.Obsname) + data = self._array_to_dict(data_str) + for key in data: + print(key) + + def get_data(self, key=None, idx=None, totim=None): + ''' + Method to request and return array of data from an Observation + output file + + Parameters + ---------- + key: (str) dictionary key for a specific observation contained within + the observation file (optional) + idx: (int) time index (optional) + totim: (float) simulation time (optional) + + Returns + ------- + data: (list) observation file data in list + ''' + data = self._reader(self.Obsname) + + # check if user supplied observation key, default is to return + # all observations + if key is None: + header = data[0] + if idx is not None: + data = data[idx, :] + elif totim is not None: + try: + times = self.get_times() + idx = times.index(totim) + data = data[idx, :] + except ValueError: + err = 'Invalid totim value provided: obs.get_times() ' \ + 'returns a list of valid times for totim = <>' + raise ValueError(err) + else: + pass + + else: + data = self._array_to_dict(data, key) + if idx is not None: + data = data[idx] + elif totim is not None: + try: + times = self.get_times() + idx = times.index(totim) + data = data[idx] + except ValueError: + err = 'Invalid totim value provided: obs.get_times() ' \ + 'returns a list of valid times for totim = <>' + raise ValueError(err) + else: + pass + return data + + def get_times(self): + return self.get_data(key='time') + + def get_nrecords(self): + data_str = self._reader(self.Obsname) + return len(self._array_to_dict(data_str)) + + def get_ntimes(self): + return len(self.get_times()) + + def get_nobs(self): + x = self.get_data().shape + prod = 1 + for i in x: + prod *= i + nrecords = self.get_nrecords() + ntimes = self.get_ntimes() + nobs = prod - ntimes - nrecords + return nobs + + def get_dataframe(self, keys=None, idx=None, totim=None, + start_datetime=None, timeunit='D'): + ''' + Creates a pandas dataframe object from the observation data, useful + backend if the user does not like the x-array format! + + Parameters + ---------- + keys: (string) sting of dictionary/observation keys separated by comma. + (optional) + idx: (int) time index location (optional) + totim: (float) simulation time (optional) + start_datetime: (string) format is 'dd/mm/yyyy' or + 'dd/mm/yyyy hh:mm:ss' (optional) + timeunit: (string) specifies the time unit associated with totim when + setting a datetime + + Returns + ------- + pd.DataFrame + + ''' + try: + import pandas as pd + except Exception as e: + print("this feature requires pandas") + return None + + data_str = self._reader(self.Obsname) + data = self._array_to_dict(data_str) + time = data['time'] + + if start_datetime is not None: + time = self._get_datetime(time, start_datetime, timeunit) + else: + pass + + # check to see if user supplied keys, if not get all observations, + # adjust for time if necessary. + if keys is None: + if idx is not None or totim is not None: + if totim is not None: + try: + times = self.get_times() + idx = times.index(totim) + except ValueError: + err = 'Invalid totim value provided: obs.get_times() '\ + 'returns a list of valid times for totim = <>' + raise ValueError(err) + + # use dictionary comprehension to create a set of pandas series + # that can be added to a pd.DataFrame + d = {key: pd.Series(data[key][idx], index=[time[idx]]) + for key in data if key != 'time'} + else: + d = {key: pd.Series(data[key], index=time) + for key in data if key != 'time'} + + else: + keys = self._key_list(keys) + for key in keys: + if key not in data: + raise KeyError('Supplied data key: {} is not ' + 'valid'.format(key)) + else: + pass + + if idx is not None or totim is not None: + if totim is not None: + try: + times = self.get_times() + idx = times.index(totim) + except ValueError: + err = 'Invalid totim value provided: obs.get_times() '\ + 'returns a list of valid times for totim\ + = <>' + raise ValueError(err) + + d = {key: pd.Series(data[key][idx], index=[time[idx]]) + for key in data if key != 'time' and key in keys} + else: + d = {key: pd.Series(data[key], index=time) + for key in data if key != 'time' and key in keys} + + # create dataframe from pd.Series dictionary + df = pd.DataFrame(d) + + return df + + def _key_list(self, keys): + # check if user supplied keys is single or multiple, string or list. + # Return a list of keys. + key_type = type(keys) + if key_type is str: + keys = keys.split(',') + keys = [key.strip(' ') for key in keys] + elif key_type is list: + pass + else: + err = 'Invalid key type: supply a string of keys separated by , ' \ + 'or a list of keys' + raise TypeError(err) + return keys + + def _get_datetime(self, times, start_dt, unit): + # use to create datetime objects for time in pandas dataFrames + import datetime as dt + + # check user supplied format of datetime, is it dd/mm/yyyy or + # dd/mm/yyyy hh:mm:ss? + if ':' in start_dt: + date, time = start_dt.split(' ') + dlist = date.split('/') + tlist = time.split(':') + else: + dlist = start_dt.split('/') + tlist = [0, 0, 0] + + # parse data from the datetime lists + try: + month = int(dlist[0]) + day = int(dlist[1]) + year = int(dlist[2]) + hour = int(tlist[0]) + minute = int(tlist[1]) + second = int(tlist[2]) + except IndexError: + err = 'please supply start_datetime in the format "dd/mm/yyyy ' \ + 'hh:mm:ss" or "dd/mm/yyyy"' + raise AssertionError(err) + + # create list of datetimes + t0 = dt.datetime(year, month, day, hour, minute, second) + if unit == 'Y': + dtlist = [dt.datetime(int(year + time), month, day, hour, minute, + second) for time in times] + elif unit == 'D': + dtlist = [t0+dt.timedelta(days=time) for time in times] + elif unit == 'H': + dtlist = [t0+dt.timedelta(hours=time) for time in times] + elif unit == 'M': + dtlist = [t0+dt.timedelta(minutes=time) for time in times] + elif unit == 'S': + dtlist = [t0+dt.timedelta(seconds=time) for time in times] + else: + raise TypeError('invalid time unit supplied') + + return dtlist + + def get_obs_data(self, key=None, idx=None, totim=None): + ''' + Method to request observation output data as an x-array + Parameters + ---------- + key: (string) dictionary key for a specific observation contained + within the observation file (optional) + idx: (int) time index (optional) + totim: (float) simulation time (optional) + + Returns + ------- + xarray.DataArray: (NxN) dimensions are totim, header == keys* + ''' + data = self.get_data(key=key, idx=idx, totim=totim) + # create x-array coordinates from time and header + totim = data.T[0][1:].astype(np.float) + header = data[0][1:].astype(np.str) + + # strip time and header off of data + data = data[1:, 1:].astype(np.float) + + return data + + +class MFObservationRequester: + ''' + Wrapper class for MFObservation.Observations. Class checks which + observation data is available, and creates a dictionary key to access + the set of observation data from the SimulationDict() + ''' + def __init__(self, mfdict, path, key, **kwargs): + self.mfdict = mfdict + self.path = path + self.obs_dataDict = {} + # check that observation files exist, create a key and path to them and + # set to self.obs_dataDict + self._check_for_observations() + + # check if user supplied dictionary key is valid, or if it is a dummy + # key for a key request. + if key in self.obs_dataDict: + modelpath = path.get_model_path(key[0]) + self.query_observation_data = \ + self._query_observation_data(modelpath, key) + return + + elif key == ('model', 'OBS8', 'IamAdummy'): + pass + + else: + err = '{} is not a valid dictionary key\n'.format(str(key)) + raise KeyError(err) + + def _query_observation_data(self, modelpath, key): + # get absolute path for observation data files + fi = modelpath + self.obs_dataDict[key] + # request observation data + Obs = Observations(fi) + data = Obs.get_obs_data() + return data + + def _check_for_observations(self): + ''' + Checks all entries of mfdict for the string + 'observation-input-filenames', finds path to file, creates + dictionary key to access observation output data. + + Returns + ------- + sets key: path to self.Obs_dataDict{} + + ''' + possible_observations = [k for k in self.mfdict + if 'observation-input-filename' in k and + 'FORMAT' not in k] + partial_key = [] + for k in possible_observations: + if self.mfdict[k] is not None: + partial_key.append([k[0], k[1]]) + + # check if there are multiple OBS8 files associated with this project + for line in partial_key: + check = partial_key.count(line) + if check > 1: + multi_observations = [i for i in partial_key if i == line] + for i in range(len(multi_observations)): + obs8_file = 'OBS8_{}'.format(i + 1) + # check for single observations, continuous observations + self._get_obsfile_names(multi_observations[i], obs8_file, + 'SINGLE') + self._get_obsfile_names(multi_observations[i], obs8_file, + 'CONTINUOUS') + + elif check <= 1: + for i in range(len(partial_key)): + self._get_obsfile_names(partial_key[i], 'OBS8', 'SINGLE') + self._get_obsfile_names(partial_key[i], 'OBS8', + 'CONTINUOUS') + + else: + raise KeyError('There are no observation files associated ' + 'with this project') + + def _get_obsfile_names(self, partial_key, OBS8, obstype): + ''' + Creates a data dictionary key for user to request data. This key holds + the path to the observation file + + Parameters + ---------- + partial_key: (list) partial dictionary key + OBS8: (string) OBS8 mfdict key name + obstype: (string) SINGLE or CONTINUOUS + + Returns: + -------- + sets key: path to self.obs_dataDict + + ''' + try: + obstypes = self.mfdict[(partial_key[0], partial_key[1], OBS8, + obstype, 'obstype')] + obspackage = self._get_package_type(obstypes) + obs_fname = self.mfdict[(partial_key[0], partial_key[1], OBS8, + obstype, 'obs_output_file_name')] + self.obs_dataDict[(partial_key[0], obspackage, obstype, + 'Observations')] = obs_fname + except KeyError: + pass + + def _get_package_type(self, obstypes): + # check the observation name in the OBS8 dictionary to get the + # package type + valid_packages = ('CHD', 'DRN', 'GHB', 'GWF', 'LAK', 'MAW', 'RIV', + 'SFR', 'UZF', 'WEL') + valid_gwf = ('head', 'drawdown', 'intercell-flow') + package = obstypes[0][:3].upper() + model = obstypes[0] + + if package in valid_packages: + return package + + elif model in valid_gwf: + return 'GWF' + + else: + raise KeyError('{} is not a valid observation ' + 'type'.format(package)) + + @staticmethod + def getkeys(mfdict, path): + # staticmethod to return a valid set of mfdict keys to the user to + # access this data + key = ('model', 'OBS8', 'IamAdummy') + x = MFObservationRequester(mfdict, path, key) + for key in x.obs_dataDict: + print(key) diff --git a/flopy/mf6/utils/reference.py b/flopy/mf6/utils/reference.py index e4fe318dee..9e1335b8a5 100644 --- a/flopy/mf6/utils/reference.py +++ b/flopy/mf6/utils/reference.py @@ -1,868 +1,868 @@ -""" -Module spatial referencing for flopy model objects - -""" -import numpy as np - - -class StructuredSpatialReference(object): - """ - a simple class to locate the model grid in x-y space - - Parameters - ---------- - - delr : numpy ndarray - the model discretization delr vector - - delc : numpy ndarray - the model discretization delc vector - - lenuni : int - the length units flag from the discretization package - - xul : float - the x coordinate of the upper left corner of the grid - - yul : float - the y coordinate of the upper left corner of the grid - - rotation : float - the counter-clockwise rotation (in degrees) of the grid - - proj4_str: str - a PROJ4 string that identifies the grid in space. warning: case - sensitive! - - Attributes - ---------- - xedge : ndarray - array of column edges - - yedge : ndarray - array of row edges - - xgrid : ndarray - numpy meshgrid of xedges - - ygrid : ndarray - numpy meshgrid of yedges - - xcenter : ndarray - array of column centers - - ycenter : ndarray - array of row centers - - xcentergrid : ndarray - numpy meshgrid of column centers - - ycentergrid : ndarray - numpy meshgrid of row centers - - Notes - ----- - - xul and yul can be explicitly (re)set after SpatialReference - instantiation, but only before any of the other attributes and methods are - accessed - - """ - - def __init__(self, delr=1.0, delc=1.0, lenuni=1, nlay=1, xul=None, - yul=None, rotation=0.0, proj4_str=None, **kwargs): - self.delc = np.atleast_1d(np.array(delc)) - self.delr = np.atleast_1d(np.array(delr)) - self.nlay = nlay - self.lenuni = lenuni - self.proj4_str = proj4_str - self._reset() - self.set_spatialreference(xul, yul, rotation) - - @classmethod - def from_namfile_header(cls, namefile): - # check for reference info in the nam file header - header = [] - with open(namefile, 'r') as f: - for line in f: - if not line.startswith('#'): - break - header.extend(line.strip().replace('#', '').split(',')) - - xul, yul = None, None - rotation = 0.0 - proj4_str = None - start_datetime = "1/1/1970" - - for item in header: - if "xul" in item.lower(): - try: - xul = float(item.split(':')[1]) - except: - pass - elif "yul" in item.lower(): - try: - yul = float(item.split(':')[1]) - except: - pass - elif "rotation" in item.lower(): - try: - rotation = float(item.split(':')[1]) - except: - pass - elif "proj4_str" in item.lower(): - try: - proj4_str = ':'.join(item.split(':')[1:]).strip() - except: - pass - elif "start" in item.lower(): - try: - start_datetime = item.split(':')[1].strip() - except: - pass - - return cls(xul=xul, yul=yul, rotation=rotation, proj4_str=proj4_str),\ - start_datetime - - def __setattr__(self, key, value): - reset = True - if key == "delr": - super(StructuredSpatialReference, self).\ - __setattr__("delr", np.atleast_1d(np.array(value))) - elif key == "delc": - super(StructuredSpatialReference, self).\ - __setattr__("delc", np.atleast_1d(np.array(value))) - elif key == "xul": - super(StructuredSpatialReference, self).\ - __setattr__("xul", float(value)) - elif key == "yul": - super(StructuredSpatialReference, self).\ - __setattr__("yul", float(value)) - elif key == "rotation": - super(StructuredSpatialReference, self).\ - __setattr__("rotation", float(value)) - elif key == "lenuni": - super(StructuredSpatialReference, self).\ - __setattr__("lenuni", int(value)) - elif key == "nlay": - super(StructuredSpatialReference, self).\ - __setattr__("nlay", int(value)) - else: - super(StructuredSpatialReference, self).__setattr__(key, value) - reset = False - if reset: - self._reset() - - def reset(self, **kwargs): - for key, value in kwargs.items(): - setattr(self, key, value) - - def _reset(self): - self._xgrid = None - self._ygrid = None - self._ycentergrid = None - self._xcentergrid = None - - @property - def nrow(self): - return self.delc.shape[0] - - @property - def ncol(self): - return self.delr.shape[0] - - def __eq__(self, other): - if not isinstance(other, StructuredSpatialReference): - return False - if other.xul != self.xul: - return False - if other.yul != self.yul: - return False - if other.rotation != self.rotation: - return False - if other.proj4_str != self.proj4_str: - return False - return True - - @classmethod - def from_gridspec(cls, gridspec_file, lenuni=0): - f = open(gridspec_file, 'r') - lines = f.readlines() - raw = f.readline().strip().split() - nrow = int(raw[0]) - ncol = int(raw[1]) - raw = f.readline().strip().split() - xul, yul, rot = float(raw[0]), float(raw[1]), float(raw[2]) - delr = [] - j = 0 - while j < ncol: - raw = f.readline().strip().split() - for r in raw: - if '*' in r: - rraw = r.split('*') - for n in range(int(rraw[0])): - delr.append(int(rraw[1])) - j += 1 - else: - delr.append(int(r)) - j += 1 - delc = [] - i = 0 - while i < nrow: - raw = f.readline().strip().split() - for r in raw: - if '*' in r: - rraw = r.split('*') - for n in range(int(rraw[0])): - delc.append(int(rraw[1])) - i += 1 - else: - delc.append(int(r)) - i += 1 - f.close() - return cls(np.array(delr), np.array(delc), - lenuni, xul=xul, yul=yul, rotation=rot) - - @property - def attribute_dict(self): - return {"xul": self.xul, "yul": self.yul, "rotation": self.rotation, - "proj4_str": self.proj4_str} - - def set_spatialreference(self, xul=None, yul=None, rotation=0.0): - """ - set spatial reference - can be called from model instance - """ - - # Set origin and rotation - if xul is None: - self.xul = 0. - else: - self.xul = xul - if yul is None: - self.yul = np.add.reduce(self.delc) - else: - self.yul = yul - self.rotation = rotation - self._reset() - - def __repr__(self): - s = "xul:{0:= 2 + cellid_size*2: - obsrecarray.append( - (fd_spl[0], fd_spl[1], make_int_tuple(fd_spl[2:2+cellid_size]), - make_int_tuple(fd_spl[2 + cellid_size:2 + 2 * cellid_size]))) - else: - obsrecarray.append((fd_spl[0], fd_spl[1], - make_int_tuple(fd_spl[2:2 + cellid_size]))) - - fd.close() - return obsrecarray - - -def read_std_array(array_file, data_type): - data_list = [] - fd = open(array_file, 'r') - for current_line in fd: - split_line = datautil.PyListUtil.split_data_line(current_line) - for data in split_line: - if data_type == 'float': - data_list.append(float(data)) - elif data_type == 'int': - data_list.append(int(data)) - else: - data_list.append(data) - fd.close() - return data_list - - -def read_sfr_rec(sfr_file, cellid_size=3): - fd = open(sfr_file, 'r') - sfrrecarray = [] - for line in fd: - fd_spl = line.strip().split() - try: - cellid = make_int_tuple(fd_spl[1:1+cellid_size]) - temp_size = cellid_size - except ValueError: - cellid = fd_spl[1] - temp_size = 1 - sfrrecarray.append((int(fd_spl[0]) - 1, - cellid, - float(fd_spl[temp_size+1]), - int(fd_spl[temp_size+2]), - float(fd_spl[temp_size+3]), - float(fd_spl[temp_size+4]), - float(fd_spl[temp_size+5]), - float(fd_spl[temp_size+6]), - float(fd_spl[temp_size+7]), - int(fd_spl[temp_size+8]), - float(fd_spl[temp_size+9]), - int(fd_spl[temp_size+10]))) - fd.close() - return sfrrecarray - - -def read_reach_con_rec(sfr_file): - fd = open(sfr_file, 'r') - sfrrecarray = [] - for line in fd: - fd_spl = line.strip().split() - con_arr = [] - for index, item in enumerate(fd_spl): - item_val = int(item) - if index == 0: - item_val -= 1 - else: - if item_val == -1: - item_val = -0.0 - elif item_val < 0: - item_val += 1 - item_val = float(item_val) - else: - item_val -= 1 - item_val = float(item_val) - con_arr.append(item_val) - sfrrecarray.append(tuple(con_arr)) - fd.close() - return sfrrecarray - - -def read_reach_div_rec(sfr_file): - fd = open(sfr_file, 'r') - sfrrecarray = [] - for line in fd: - fd_spl = line.strip().split() - sfrrecarray.append((int(fd_spl[0]) - 1, int(fd_spl[1]) - 1, - int(fd_spl[2]) - 1, fd_spl[3])) - fd.close() - return sfrrecarray - - -def read_reach_per_rec(sfr_file): - fd = open(sfr_file, 'r') - sfrrecarray = [] - for line in fd: - fd_spl = line.strip().split() - per_arr = [int(fd_spl[0]) - 1, fd_spl[1]] - first = True - for item in fd_spl[2:]: - if fd_spl[1].lower() == 'diversion' and first: - per_arr.append(str(int(item) - 1)) - first = False - else: - per_arr.append(item) - sfrrecarray.append(tuple(per_arr)) - fd.close() - return sfrrecarray - - -def read_wells(wel_file, cellid_size=3): - fd = open(wel_file, 'r') - welrecarray = [] - for line in fd: - fd_spl = line.strip().split() - new_wel = [] - new_wel.append(make_int_tuple(fd_spl[0:cellid_size])) - new_wel.append(float(fd_spl[cellid_size])) - for item in fd_spl[cellid_size+1:]: - new_wel.append(item) - welrecarray.append(tuple(new_wel)) - fd.close() - return welrecarray +from ...utils import datautil + + +def make_int_tuple(str_list): + int_list = [] + for item in str_list: + int_list.append(int(item)-1) + return tuple(int_list) + + +def read_vertices(vert_file): + fd = open(vert_file, 'r') + vertrecarray = [] + for line in fd: + fd_spl = line.strip().split() + vertrecarray.append((int(fd_spl[0]) - 1, float(fd_spl[1]), + float(fd_spl[2]))) + fd.close() + return vertrecarray + + +def read_cell2d(cell2d_file): + fd = open(cell2d_file, 'r') + c2drecarray = [] + for line in fd: + fd_spl = line.strip().split() + rec_array = [int(fd_spl[0]) - 1, float(fd_spl[1]), float(fd_spl[2])] + rec_array.append(int(fd_spl[3])) + for item in fd_spl[4:]: + rec_array.append(int(item) - 1) + c2drecarray.append(tuple(rec_array)) + fd.close() + return c2drecarray + + +def read_exchangedata(gwf_file, cellid_size=3): + exgrecarray = [] + fd = open(gwf_file, 'r') + for line in fd: + linesp = line.strip().split() + exgrecarray.append((make_int_tuple(linesp[0:cellid_size]), + make_int_tuple(linesp[cellid_size:cellid_size*2]), + int(linesp[cellid_size*2]), + float(linesp[cellid_size*2+1]), + float(linesp[cellid_size*2+2]), + float(linesp[cellid_size*2+3]), + float(linesp[cellid_size*2+4]))) + return exgrecarray + + +def read_gncrecarray(gnc_file, cellid_size=3): + gncrecarray = [] + fd = open(gnc_file, 'r') + for line in fd: + linesp = line.strip().split() + gncrecarray.append( + (make_int_tuple(linesp[0:cellid_size]), + make_int_tuple(linesp[cellid_size:cellid_size*2]), + make_int_tuple(linesp[cellid_size*2:cellid_size*3]), + float(linesp[cellid_size*3]))) + return gncrecarray + + +def read_chdrecarray(chd_file, cellid_size=3): + fd = open(chd_file, 'r') + chdrecarray = [] + for line in fd: + fd_spl = line.strip().split() + chdrecarray.append((make_int_tuple(fd_spl[0:cellid_size]), + float(fd_spl[cellid_size]))) + fd.close() + return chdrecarray + + +def read_ghbrecarray(chd_file, cellid_size=3): + fd = open(chd_file, 'r') + ghbrecarray = [] + for line in fd: + fd_spl = line.strip().split() + ghbrecarray.append((make_int_tuple(fd_spl[0:cellid_size]), + float(fd_spl[cellid_size]), + float(fd_spl[cellid_size+1]))) + fd.close() + return ghbrecarray + + +def read_obs(obs_file, cellid_size=3): + fd = open(obs_file, 'r') + obsrecarray = [] + for line in fd: + fd_spl = line.strip().split() + if len(fd_spl) >= 2 + cellid_size*2: + obsrecarray.append( + (fd_spl[0], fd_spl[1], make_int_tuple(fd_spl[2:2+cellid_size]), + make_int_tuple(fd_spl[2 + cellid_size:2 + 2 * cellid_size]))) + else: + obsrecarray.append((fd_spl[0], fd_spl[1], + make_int_tuple(fd_spl[2:2 + cellid_size]))) + + fd.close() + return obsrecarray + + +def read_std_array(array_file, data_type): + data_list = [] + fd = open(array_file, 'r') + for current_line in fd: + split_line = datautil.PyListUtil.split_data_line(current_line) + for data in split_line: + if data_type == 'float': + data_list.append(float(data)) + elif data_type == 'int': + data_list.append(int(data)) + else: + data_list.append(data) + fd.close() + return data_list + + +def read_sfr_rec(sfr_file, cellid_size=3): + fd = open(sfr_file, 'r') + sfrrecarray = [] + for line in fd: + fd_spl = line.strip().split() + try: + cellid = make_int_tuple(fd_spl[1:1+cellid_size]) + temp_size = cellid_size + except ValueError: + cellid = fd_spl[1] + temp_size = 1 + sfrrecarray.append((int(fd_spl[0]) - 1, + cellid, + float(fd_spl[temp_size+1]), + int(fd_spl[temp_size+2]), + float(fd_spl[temp_size+3]), + float(fd_spl[temp_size+4]), + float(fd_spl[temp_size+5]), + float(fd_spl[temp_size+6]), + float(fd_spl[temp_size+7]), + int(fd_spl[temp_size+8]), + float(fd_spl[temp_size+9]), + int(fd_spl[temp_size+10]))) + fd.close() + return sfrrecarray + + +def read_reach_con_rec(sfr_file): + fd = open(sfr_file, 'r') + sfrrecarray = [] + for line in fd: + fd_spl = line.strip().split() + con_arr = [] + for index, item in enumerate(fd_spl): + item_val = int(item) + if index == 0: + item_val -= 1 + else: + if item_val == -1: + item_val = -0.0 + elif item_val < 0: + item_val += 1 + item_val = float(item_val) + else: + item_val -= 1 + item_val = float(item_val) + con_arr.append(item_val) + sfrrecarray.append(tuple(con_arr)) + fd.close() + return sfrrecarray + + +def read_reach_div_rec(sfr_file): + fd = open(sfr_file, 'r') + sfrrecarray = [] + for line in fd: + fd_spl = line.strip().split() + sfrrecarray.append((int(fd_spl[0]) - 1, int(fd_spl[1]) - 1, + int(fd_spl[2]) - 1, fd_spl[3])) + fd.close() + return sfrrecarray + + +def read_reach_per_rec(sfr_file): + fd = open(sfr_file, 'r') + sfrrecarray = [] + for line in fd: + fd_spl = line.strip().split() + per_arr = [int(fd_spl[0]) - 1, fd_spl[1]] + first = True + for item in fd_spl[2:]: + if fd_spl[1].lower() == 'diversion' and first: + per_arr.append(str(int(item) - 1)) + first = False + else: + per_arr.append(item) + sfrrecarray.append(tuple(per_arr)) + fd.close() + return sfrrecarray + + +def read_wells(wel_file, cellid_size=3): + fd = open(wel_file, 'r') + welrecarray = [] + for line in fd: + fd_spl = line.strip().split() + new_wel = [] + new_wel.append(make_int_tuple(fd_spl[0:cellid_size])) + new_wel.append(float(fd_spl[cellid_size])) + for item in fd_spl[cellid_size+1:]: + new_wel.append(item) + welrecarray.append(tuple(new_wel)) + fd.close() + return welrecarray diff --git a/flopy/modflow/__init__.py b/flopy/modflow/__init__.py index 6e3e9ea7a0..c9dae5c6da 100644 --- a/flopy/modflow/__init__.py +++ b/flopy/modflow/__init__.py @@ -1,52 +1,52 @@ -from .mf import Modflow -from .mfbas import ModflowBas -from .mfbcf import ModflowBcf -from .mflpf import ModflowLpf -from .mfchd import ModflowChd -from .mffhb import ModflowFhb -from .mfdis import ModflowDis -from .mfdrn import ModflowDrn -from .mfdrt import ModflowDrt -from .mfevt import ModflowEvt -from .mfghb import ModflowGhb -from .mfpbc import ModflowPbc -from .mfrch import ModflowRch -from .mfriv import ModflowRiv -from .mfstr import ModflowStr -from .mfwel import ModflowWel -from .mfpval import ModflowPval -from .mfoc import ModflowOc -from .mfsip import ModflowSip -from .mfsor import ModflowSor -from .mfswr1 import ModflowSwr1 -from .mfswi2 import ModflowSwi2 -from .mfpcg import ModflowPcg -from .mfpcgn import ModflowPcgn -from .mfde4 import ModflowDe4 -from .mfpks import ModflowPks -from .mflmt import ModflowLmt -from .mfuzf1 import ModflowUzf1 -from .mfupw import ModflowUpw -from .mfnwt import ModflowNwt -from .mfmnw1 import ModflowMnw1 -from .mfmnw2 import ModflowMnw2, Mnw -from .mfmnwi import ModflowMnwi -from .mfzon import ModflowZon -from .mfmlt import ModflowMlt -from .mfparbc import ModflowParBc -from .mfgmg import ModflowGmg -from .mfsms import ModflowSms -from .mfbct import ModflowBct -from .mfhfb import ModflowHfb -from .mfsfr2 import ModflowSfr2 -from .mflak import ModflowLak -from .mfgage import ModflowGage -from .mfsub import ModflowSub -from .mfswt import ModflowSwt -from .mfhyd import ModflowHyd -from .mfhob import ModflowHob, HeadObservation -from .mfflwob import ModflowFlwob -from .mfaddoutsidefile import mfaddoutsidefile - -# unstructured -from .mfdisu import ModflowDisU +from .mf import Modflow +from .mfbas import ModflowBas +from .mfbcf import ModflowBcf +from .mflpf import ModflowLpf +from .mfchd import ModflowChd +from .mffhb import ModflowFhb +from .mfdis import ModflowDis +from .mfdrn import ModflowDrn +from .mfdrt import ModflowDrt +from .mfevt import ModflowEvt +from .mfghb import ModflowGhb +from .mfpbc import ModflowPbc +from .mfrch import ModflowRch +from .mfriv import ModflowRiv +from .mfstr import ModflowStr +from .mfwel import ModflowWel +from .mfpval import ModflowPval +from .mfoc import ModflowOc +from .mfsip import ModflowSip +from .mfsor import ModflowSor +from .mfswr1 import ModflowSwr1 +from .mfswi2 import ModflowSwi2 +from .mfpcg import ModflowPcg +from .mfpcgn import ModflowPcgn +from .mfde4 import ModflowDe4 +from .mfpks import ModflowPks +from .mflmt import ModflowLmt +from .mfuzf1 import ModflowUzf1 +from .mfupw import ModflowUpw +from .mfnwt import ModflowNwt +from .mfmnw1 import ModflowMnw1 +from .mfmnw2 import ModflowMnw2, Mnw +from .mfmnwi import ModflowMnwi +from .mfzon import ModflowZon +from .mfmlt import ModflowMlt +from .mfparbc import ModflowParBc +from .mfgmg import ModflowGmg +from .mfsms import ModflowSms +from .mfbct import ModflowBct +from .mfhfb import ModflowHfb +from .mfsfr2 import ModflowSfr2 +from .mflak import ModflowLak +from .mfgage import ModflowGage +from .mfsub import ModflowSub +from .mfswt import ModflowSwt +from .mfhyd import ModflowHyd +from .mfhob import ModflowHob, HeadObservation +from .mfflwob import ModflowFlwob +from .mfaddoutsidefile import mfaddoutsidefile + +# unstructured +from .mfdisu import ModflowDisU diff --git a/flopy/modflow/mf.py b/flopy/modflow/mf.py index a41f861945..da5214ecf6 100644 --- a/flopy/modflow/mf.py +++ b/flopy/modflow/mf.py @@ -1,877 +1,877 @@ -""" -mf module. Contains the ModflowGlobal, ModflowList, and Modflow classes. - - -""" - -import os -import flopy -from inspect import getfullargspec -from ..mbase import BaseModel -from ..pakbase import Package -from ..utils import mfreadnam -from ..discretization.structuredgrid import StructuredGrid -from ..discretization.grid import Grid -from flopy.discretization.modeltime import ModelTime -from .mfpar import ModflowPar - - -class ModflowGlobal(Package): - """ - ModflowGlobal Package class - - """ - - def __init__(self, model, extension='glo'): - Package.__init__(self, model, extension, 'GLOBAL', 1) - return - - def __repr__(self): - return 'Global Package class' - - def write_file(self): - # Not implemented for global class - return - - -class ModflowList(Package): - """ - ModflowList Package class - - """ - - def __init__(self, model, extension='list', unitnumber=2): - Package.__init__(self, model, extension, 'LIST', unitnumber) - return - - def __repr__(self): - return 'List Package class' - - def write_file(self): - # Not implemented for list class - return - - -class Modflow(BaseModel): - """ - MODFLOW Model Class. - - Parameters - ---------- - modelname : string, optional - Name of model. This string will be used to name the MODFLOW input - that are created with write_model. (the default is 'modflowtest') - namefile_ext : string, optional - Extension for the namefile (the default is 'nam') - version : string, optional - Version of MODFLOW to use (the default is 'mf2005'). - exe_name : string, optional - The name of the executable to use (the default is - 'mf2005'). - listunit : integer, optional - Unit number for the list file (the default is 2). - model_ws : string, optional - model workspace. Directory name to create model data sets. - (default is the present working directory). - external_path : string - Location for external files (default is None). - verbose : boolean, optional - Print additional information to the screen (default is False). - load : boolean, optional - (default is True). - silent : integer - (default is 0) - - Attributes - ---------- - - Methods - ------- - - See Also - -------- - - Notes - ----- - - Examples - -------- - - >>> import flopy - >>> m = flopy.modflow.Modflow() - - """ - - def __init__(self, modelname='modflowtest', namefile_ext='nam', - version='mf2005', exe_name='mf2005.exe', - structured=True, listunit=2, model_ws='.', external_path=None, - verbose=False, **kwargs): - BaseModel.__init__(self, modelname, namefile_ext, exe_name, model_ws, - structured=structured, verbose=verbose, **kwargs) - self.version_types = {'mf2k': 'MODFLOW-2000', 'mf2005': 'MODFLOW-2005', - 'mfnwt': 'MODFLOW-NWT', 'mfusg': 'MODFLOW-USG'} - - self.set_version(version) - - if self.version == 'mf2k': - self.glo = ModflowGlobal(self) - - self.lst = ModflowList(self, unitnumber=listunit) - # -- check if unstructured is specified for something - # other than mfusg is specified - if not self.structured: - assert 'mfusg' in self.version, \ - 'structured=False can only be specified for mfusg models' - - # external option stuff - self.array_free_format = True - self.array_format = 'modflow' - # self.external_fnames = [] - # self.external_units = [] - # self.external_binflag = [] - - self.load_fail = False - # the starting external data unit number - self._next_ext_unit = 1000 - - if external_path is not None: - if os.path.exists(os.path.join(model_ws, external_path)): - print("Note: external_path " + str(external_path) + - " already exists") - else: - os.makedirs(os.path.join(model_ws, external_path)) - self.external_path = external_path - self.verbose = verbose - self.mfpar = ModflowPar() - - # output file info - self.hext = 'hds' - self.dext = 'ddn' - self.cext = 'cbc' - self.hpth = None - self.dpath = None - self.cpath = None - - # Create a dictionary to map package with package object. - # This is used for loading models. - self.mfnam_packages = { - "zone": flopy.modflow.ModflowZon, - "mult": flopy.modflow.ModflowMlt, - "pval": flopy.modflow.ModflowPval, - "bas6": flopy.modflow.ModflowBas, - "dis": flopy.modflow.ModflowDis, - "disu": flopy.modflow.ModflowDisU, - "bcf6": flopy.modflow.ModflowBcf, - "lpf": flopy.modflow.ModflowLpf, - "hfb6": flopy.modflow.ModflowHfb, - "chd": flopy.modflow.ModflowChd, - "fhb": flopy.modflow.ModflowFhb, - "wel": flopy.modflow.ModflowWel, - "mnw1": flopy.modflow.ModflowMnw1, - "mnw2": flopy.modflow.ModflowMnw2, - "mnwi": flopy.modflow.ModflowMnwi, - "drn": flopy.modflow.ModflowDrn, - "drt": flopy.modflow.ModflowDrt, - "rch": flopy.modflow.ModflowRch, - "evt": flopy.modflow.ModflowEvt, - "ghb": flopy.modflow.ModflowGhb, - "gmg": flopy.modflow.ModflowGmg, - "lmt6": flopy.modflow.ModflowLmt, - "lmt7": flopy.modflow.ModflowLmt, - "riv": flopy.modflow.ModflowRiv, - "str": flopy.modflow.ModflowStr, - "swi2": flopy.modflow.ModflowSwi2, - "pcg": flopy.modflow.ModflowPcg, - "pcgn": flopy.modflow.ModflowPcgn, - "nwt": flopy.modflow.ModflowNwt, - "pks": flopy.modflow.ModflowPks, - "sms": flopy.modflow.ModflowSms, - "sfr": flopy.modflow.ModflowSfr2, - "lak": flopy.modflow.ModflowLak, - "gage": flopy.modflow.ModflowGage, - "sip": flopy.modflow.ModflowSip, - "sor": flopy.modflow.ModflowSor, - "de4": flopy.modflow.ModflowDe4, - "oc": flopy.modflow.ModflowOc, - "uzf": flopy.modflow.ModflowUzf1, - "upw": flopy.modflow.ModflowUpw, - "sub": flopy.modflow.ModflowSub, - "swt": flopy.modflow.ModflowSwt, - "hyd": flopy.modflow.ModflowHyd, - "hob": flopy.modflow.ModflowHob, - "chob": flopy.modflow.ModflowFlwob, - "gbob": flopy.modflow.ModflowFlwob, - "drob": flopy.modflow.ModflowFlwob, - "rvob": flopy.modflow.ModflowFlwob, - "vdf": flopy.seawat.SeawatVdf, - "vsc": flopy.seawat.SeawatVsc - } - return - - def __repr__(self): - nrow, ncol, nlay, nper = self.get_nrow_ncol_nlay_nper() - if nrow is not None: - # structured case - s = ('MODFLOW {} layer(s) {} row(s) {} column(s) ' - '{} stress period(s)'.format(nlay, nrow, ncol, nper)) - else: - # unstructured case - nodes = ncol.sum() - nodelay = ' '.join(str(i) for i in ncol) - print(nodelay, nlay, nper) - s = ('MODFLOW unstructured\n' - ' nodes = {}\n' - ' layers = {}\n' - ' periods = {}\n' - ' nodelay = {}\n'.format(nodes, nlay, nper, ncol)) - return s - - # - # def next_ext_unit(self): - # """ - # Function to encapsulate next_ext_unit attribute - # - # """ - # next_unit = self.__next_ext_unit + 1 - # self.__next_ext_unit += 1 - # return next_unit - - @property - def modeltime(self): - # build model time - data_frame = {'perlen': self.dis.perlen.array, - 'nstp': self.dis.nstp.array, - 'tsmult': self.dis.tsmult.array} - self._model_time = ModelTime(data_frame, - self.dis.itmuni_dict[self.dis.itmuni], - self.dis.start_datetime, - self.dis.steady.array) - return self._model_time - - @property - def modelgrid(self): - if not self._mg_resync: - return self._modelgrid - - if self.has_package('bas6'): - ibound = self.bas6.ibound.array - else: - ibound = None - - if self.get_package('disu') is not None: - self._modelgrid = Grid(grid_type='USG-Unstructured', - top=self.disu.top, botm=self.disu.bot, - idomain=ibound, proj4=self._modelgrid.proj4, - epsg=self._modelgrid.epsg, - xoff=self._modelgrid.xoffset, - yoff=self._modelgrid.yoffset, - angrot=self._modelgrid.angrot) - print('WARNING: Model grid functionality limited for unstructured ' - 'grid.') - else: - # build structured grid - self._modelgrid = StructuredGrid(self.dis.delc.array, - self.dis.delr.array, - self.dis.top.array, - self.dis.botm.array, ibound, - self.dis.lenuni, - proj4=self._modelgrid.proj4, - epsg=self._modelgrid.epsg, - xoff=self._modelgrid.xoffset, - yoff=self._modelgrid.yoffset, - angrot=self._modelgrid.angrot, - nlay=self.dis.nlay, - laycbd=self.dis.laycbd) - - # resolve offsets - xoff = self._modelgrid.xoffset - if xoff is None: - if self._xul is not None: - xoff = self._modelgrid._xul_to_xll(self._xul) - else: - xoff = 0.0 - yoff = self._modelgrid.yoffset - if yoff is None: - if self._yul is not None: - yoff = self._modelgrid._yul_to_yll(self._yul) - else: - yoff = 0.0 - self._modelgrid.set_coord_info(xoff, yoff, self._modelgrid.angrot, - self._modelgrid.epsg, - self._modelgrid.proj4) - self._mg_resync = not self._modelgrid.is_complete - return self._modelgrid - - @modelgrid.setter - def modelgrid(self, value): - self._mg_resync = False - self._modelgrid = value - - @property - def solver_tols(self): - if self.pcg is not None: - return self.pcg.hclose, self.pcg.rclose - elif self.nwt is not None: - return self.nwt.headtol, self.nwt.fluxtol - elif self.sip is not None: - return self.sip.hclose, -999 - elif self.gmg is not None: - return self.gmg.hclose, self.gmg.rclose - return None - - @property - def nlay(self): - if (self.dis): - return self.dis.nlay - elif (self.disu): - return self.disu.nlay - else: - return 0 - - @property - def nrow(self): - if (self.dis): - return self.dis.nrow - else: - return 0 - - @property - def ncol(self): - if (self.dis): - return self.dis.ncol - else: - return 0 - - @property - def nper(self): - if (self.dis): - return self.dis.nper - elif (self.disu): - return self.disu.nper - else: - return 0 - - @property - def ncpl(self): - if (self.dis): - return self.dis.nrow * self.dis.ncol - elif (self.disu): - return self.disu.ncpl - else: - return 0 - - @property - def nrow_ncol_nlay_nper(self): - # structured dis - dis = self.get_package('DIS') - if dis: - return dis.nrow, dis.ncol, dis.nlay, dis.nper - # unstructured dis - dis = self.get_package('DISU') - if dis: - return None, dis.nodelay.array[:], dis.nlay, dis.nper - # no dis - return 0, 0, 0, 0 - - def get_nrow_ncol_nlay_nper(self): - return self.nrow_ncol_nlay_nper - - def get_ifrefm(self): - bas = self.get_package('BAS6') - if (bas): - return bas.ifrefm - else: - return False - - def set_ifrefm(self, value=True): - if not isinstance(value, bool): - print('Error: set_ifrefm passed value must be a boolean') - return False - self.array_free_format = value - bas = self.get_package('BAS6') - if (bas): - bas.ifrefm = value - else: - return False - - def _set_name(self, value): - # Overrides BaseModel's setter for name property - BaseModel._set_name(self, value) - - if self.version == 'mf2k': - for i in range(len(self.glo.extension)): - self.glo.file_name[i] = self.name + '.' + self.glo.extension[i] - - for i in range(len(self.lst.extension)): - self.lst.file_name[i] = self.name + '.' + self.lst.extension[i] - - def write_name_file(self): - """ - Write the model name file. - - """ - fn_path = os.path.join(self.model_ws, self.namefile) - f_nam = open(fn_path, 'w') - f_nam.write('{}\n'.format(self.heading)) - if self.structured: - f_nam.write('#' + str(self.modelgrid)) - f_nam.write("; start_datetime:{0}\n".format(self.start_datetime)) - if self.version == 'mf2k': - if self.glo.unit_number[0] > 0: - f_nam.write('{:14s} {:5d} {}\n'.format(self.glo.name[0], - self.glo.unit_number[ - 0], - self.glo.file_name[0])) - f_nam.write('{:14s} {:5d} {}\n'.format(self.lst.name[0], - self.lst.unit_number[0], - self.lst.file_name[0])) - f_nam.write('{}'.format(self.get_name_file_entries())) - - # write the external files - for u, f, b, o in zip(self.external_units, self.external_fnames, - self.external_binflag, self.external_output): - if u == 0: - continue - replace_text = '' - if o: - replace_text = 'REPLACE' - if b: - line = 'DATA(BINARY) {0:5d} '.format(u) + f + \ - replace_text + '\n' - f_nam.write(line) - else: - f_nam.write('DATA {0:5d} '.format(u) + f + '\n') - - # write the output files - for u, f, b in zip(self.output_units, self.output_fnames, - self.output_binflag): - if u == 0: - continue - if b: - f_nam.write( - 'DATA(BINARY) {0:5d} '.format(u) + f + ' REPLACE\n') - else: - f_nam.write('DATA {0:5d} '.format(u) + f + '\n') - - # close the name file - f_nam.close() - return - - def set_model_units(self, iunit0=None): - """ - Write the model name file. - - """ - if iunit0 is None: - iunit0 = 1001 - - # initialize starting unit number - self.next_unit(iunit0) - - if self.version == 'mf2k': - # update global file unit number - if self.glo.unit_number[0] > 0: - self.glo.unit_number[0] = self.next_unit() - - # update lst file unit number - self.lst.unit_number[0] = self.next_unit() - - # update package unit numbers - for p in self.packagelist: - p.unit_number[0] = self.next_unit() - - # update external unit numbers - for i, iu in enumerate(self.external_units): - if iu == 0: - continue - self.external_units[i] = self.next_unit() - - # update output files unit numbers - oc = self.get_package('OC') - output_units0 = list(self.output_units) - for i, iu in enumerate(self.output_units): - if iu == 0: - continue - iu1 = self.next_unit() - self.output_units[i] = iu1 - # update oc files - if oc is not None: - if oc.iuhead == iu: - oc.iuhead = iu1 - elif oc.iuddn == iu: - oc.iuddn = iu1 - - # replace value in ipakcb - for p in self.packagelist: - try: - iu0 = p.ipakcb - if iu0 in output_units0: - j = output_units0.index(iu0) - p.ipakcb = self.output_units[j] - except: - if self.verbose: - print(' could not replace value in ipakcb') - - return - - def load_results(self, **kwargs): - - # remove model if passed as a kwarg - if 'model' in kwargs: - kwargs.pop('model') - - as_dict = False - if "as_dict" in kwargs: - as_dict = bool(kwargs.pop("as_dict")) - - savehead = False - saveddn = False - savebud = False - - # check for oc - try: - oc = self.get_package('OC') - self.hext = oc.extension[1] - self.dext = oc.extension[2] - self.cext = oc.extension[3] - if oc.chedfm is None: - head_const = flopy.utils.HeadFile - else: - head_const = flopy.utils.FormattedHeadFile - if oc.cddnfm is None: - ddn_const = flopy.utils.HeadFile - else: - ddn_const = flopy.utils.FormattedHeadFile - - for k, lst in oc.stress_period_data.items(): - for v in lst: - if v.lower() == 'save head': - savehead = True - if v.lower() == 'save drawdown': - saveddn = True - if v.lower() == 'save budget': - savebud = True - except Exception as e: - print('error reading output filenames ' + - 'from OC package: {}'.format(str(e))) - - self.hpth = os.path.join(self.model_ws, - '{}.{}'.format(self.name, self.hext)) - self.dpth = os.path.join(self.model_ws, - '{}.{}'.format(self.name, self.dext)) - self.cpth = os.path.join(self.model_ws, - '{}.{}'.format(self.name, self.cext)) - - hdObj = None - ddObj = None - bdObj = None - - if savehead and os.path.exists(self.hpth): - hdObj = head_const(self.hpth, model=self, **kwargs) - - if saveddn and os.path.exists(self.dpth): - ddObj = ddn_const(self.dpth, model=self, **kwargs) - if savebud and os.path.exists(self.cpth): - bdObj = flopy.utils.CellBudgetFile(self.cpth, model=self, **kwargs) - - # get subsidence, if written - subObj = None - try: - - if self.sub is not None and "subsidence.hds" in self.sub.extension: - idx = self.sub.extension.index("subsidence.hds") - subObj = head_const( - os.path.join(self.model_ws, self.sub.file_name[idx]), - text="subsidence") - except Exception as e: - print("error loading subsidence.hds:{0}".format(str(e))) - - if as_dict: - oudic = {} - if subObj is not None: - oudic["subsidence.hds"] = subObj - if savehead and hdObj: - oudic[self.hpth] = hdObj - if saveddn and ddObj: - oudic[self.dpth] = ddObj - if savebud and bdObj: - oudic[self.cpth] = bdObj - return oudic - else: - return hdObj, ddObj, bdObj - - @staticmethod - def load(f, version='mf2005', exe_name='mf2005.exe', verbose=False, - model_ws='.', load_only=None, forgive=False, check=True): - """ - Load an existing MODFLOW model. - - Parameters - ---------- - f : str - Path to MODFLOW name file to load. - version : str, optional - MODFLOW version. Default 'mf2005', although can be modified on - loading packages unique to different MODFLOW versions. - exe_name : str, optional - MODFLOW executable name. Default 'mf2005.exe'. - verbose : bool, optional - Show messages that can be useful for debugging. Default False. - model_ws : str - Model workspace path. Default '.' or current directory. - load_only : list, str or None - List of case insensitive filetypes to load, e.g. ["bas6", "lpf"]. - One package can also be specified, e.g. "rch". Default is None, - which attempts to load all files. An empty list [] will not load - any additional packages than is necessary. At a minimum, "dis" or - "disu" is always loaded. - forgive : bool, optional - Option to raise exceptions on package load failure, which can be - useful for debugging. Default False. - check : boolean, optional - Check model input for common errors. Default True. - - Returns - ------- - ml : Modflow object - - Examples - -------- - - >>> import flopy - >>> ml = flopy.modflow.Modflow.load('model.nam') - - """ - - # similar to modflow command: if file does not exist , try file.nam - namefile_path = os.path.join(model_ws, f) - if (not os.path.isfile(namefile_path) and - os.path.isfile(namefile_path + '.nam')): - namefile_path += '.nam' - if not os.path.isfile(namefile_path): - raise IOError('cannot find name file: ' + str(namefile_path)) - - # Determine model name from 'f', without any extension or path - modelname = os.path.splitext(os.path.basename(f))[0] - - # if model_ws is None: - # model_ws = os.path.dirname(f) - if verbose: - print('\nCreating new model with name: {}\n{}\n' - .format(modelname, 50 * '-')) - - attribs = mfreadnam.attribs_from_namfile_header( - os.path.join(model_ws, f)) - - ml = Modflow(modelname, version=version, exe_name=exe_name, - verbose=verbose, model_ws=model_ws, **attribs) - - files_successfully_loaded = [] - files_not_loaded = [] - - # read name file - ext_unit_dict = mfreadnam.parsenamefile( - namefile_path, ml.mfnam_packages, verbose=verbose) - if ml.verbose: - print('\n{}\nExternal unit dictionary:\n{}\n{}\n' - .format(50 * '-', ext_unit_dict, 50 * '-')) - - # create a dict where key is the package name, value is unitnumber - ext_pkg_d = {v.filetype: k for (k, v) in ext_unit_dict.items()} - - # reset version based on packages in the name file - if 'NWT' in ext_pkg_d or 'UPW' in ext_pkg_d: - version = 'mfnwt' - if 'GLOBAL' in ext_pkg_d: - if version != "mf2k": - ml.glo = ModflowGlobal(ml) - version = 'mf2k' - if 'SMS' in ext_pkg_d: - version = 'mfusg' - if 'DISU' in ext_pkg_d: - version = 'mfusg' - ml.structured = False - # update the modflow version - ml.set_version(version) - - # reset unit number for glo file - if version == 'mf2k': - if 'GLOBAL' in ext_pkg_d: - unitnumber = ext_pkg_d['GLOBAL'] - filepth = os.path.basename(ext_unit_dict[unitnumber].filename) - ml.glo.unit_number = [unitnumber] - ml.glo.file_name = [filepth] - else: - # TODO: is this necessary? it's not done for LIST. - ml.glo.unit_number = [0] - ml.glo.file_name = [''] - - # reset unit number for list file - if 'LIST' in ext_pkg_d: - unitnumber = ext_pkg_d['LIST'] - filepth = os.path.basename(ext_unit_dict[unitnumber].filename) - ml.lst.unit_number = [unitnumber] - ml.lst.file_name = [filepth] - - # look for the free format flag in bas6 - bas_key = ext_pkg_d.get('BAS6') - if bas_key is not None: - bas = ext_unit_dict[bas_key] - start = bas.filehandle.tell() - line = bas.filehandle.readline() - while line.startswith("#"): - line = bas.filehandle.readline() - if "FREE" in line.upper(): - ml.free_format_input = True - bas.filehandle.seek(start) - if verbose: - print("ModflowBas6 free format:{0}\n".format(ml.free_format_input)) - - # load dis - dis_key = ext_pkg_d.get('DIS') or ext_pkg_d.get('DISU') - if dis_key is None: - raise KeyError('discretization entry not found in nam file') - disnamdata = ext_unit_dict[dis_key] - dis = disnamdata.package.load(disnamdata.filehandle, ml, - ext_unit_dict=ext_unit_dict, - check=False) - files_successfully_loaded.append(disnamdata.filename) - if ml.verbose: - print(' {:4s} package load...success'.format(dis.name[0])) - assert ml.pop_key_list.pop() == dis_key - ext_unit_dict.pop(dis_key).filehandle.close() - - dis.start_datetime = ml._start_datetime - - if load_only is None: - # load all packages/files - load_only = ext_pkg_d.keys() - else: # check items in list - if not isinstance(load_only, list): - load_only = [load_only] - not_found = [] - for i, filetype in enumerate(load_only): - load_only[i] = filetype = filetype.upper() - if filetype not in ext_pkg_d: - not_found.append(filetype) - if not_found: - raise KeyError( - "the following load_only entries were not found " - "in the ext_unit_dict: " + str(not_found)) - - # zone, mult, pval - if 'PVAL' in ext_pkg_d: - ml.mfpar.set_pval(ml, ext_unit_dict) - assert ml.pop_key_list.pop() == ext_pkg_d.get('PVAL') - if 'ZONE' in ext_pkg_d: - ml.mfpar.set_zone(ml, ext_unit_dict) - assert ml.pop_key_list.pop() == ext_pkg_d.get('ZONE') - if 'MULT' in ext_pkg_d: - ml.mfpar.set_mult(ml, ext_unit_dict) - assert ml.pop_key_list.pop() == ext_pkg_d.get('MULT') - - # try loading packages in ext_unit_dict - for key, item in ext_unit_dict.items(): - if item.package is not None: - if item.filetype in load_only: - package_load_args = getfullargspec(item.package.load)[0] - if forgive: - try: - if "check" in package_load_args: - item.package.load(item.filehandle, ml, - ext_unit_dict=ext_unit_dict, - check=False) - else: - item.package.load(item.filehandle, ml, - ext_unit_dict=ext_unit_dict) - files_successfully_loaded.append(item.filename) - if ml.verbose: - print(' {:4s} package load...success' - .format(item.filetype)) - except Exception as e: - ml.load_fail = True - if ml.verbose: - msg = 3 * ' ' + \ - '{:4s} '.format(item.filetype) + \ - 'package load...failed\n' + \ - 3 * ' ' + '{!s}'.format(e) - print(msg) - files_not_loaded.append(item.filename) - else: - if "check" in package_load_args: - item.package.load(item.filehandle, ml, - ext_unit_dict=ext_unit_dict, - check=False) - else: - item.package.load(item.filehandle, ml, - ext_unit_dict=ext_unit_dict) - files_successfully_loaded.append(item.filename) - if ml.verbose: - msg = 3 * ' ' + '{:4s} '.format(item.filetype) + \ - 'package load...success' - print(msg) - else: - if ml.verbose: - msg = 3 * ' ' + '{:4s} '.format(item.filetype) + \ - 'package load...skipped' - print(msg) - files_not_loaded.append(item.filename) - elif "data" not in item.filetype.lower(): - files_not_loaded.append(item.filename) - if ml.verbose: - msg = 3 * ' ' + '{:4s} '.format(item.filetype) + \ - 'package load...skipped' - print(msg) - elif "data" in item.filetype.lower(): - if ml.verbose: - msg = 3 * ' ' + '{:s} '.format(item.filetype) + \ - 'file load...skipped\n' + 6 * ' ' + \ - '{}'.format(os.path.basename(item.filename)) - print(msg) - if key not in ml.pop_key_list: - # do not add unit number (key) if it already exists - if key not in ml.external_units: - ml.external_fnames.append(item.filename) - ml.external_units.append(key) - ml.external_binflag.append("binary" - in item.filetype.lower()) - ml.external_output.append(False) - else: - raise KeyError('unhandled case: {}, {}'.format(key, item)) - - # pop binary output keys and any external file units that are now - # internal - for key in ml.pop_key_list: - try: - ml.remove_external(unit=key) - item = ext_unit_dict.pop(key) - if hasattr(item.filehandle, 'close'): - item.filehandle.close() - except KeyError: - if ml.verbose: - msg = '\nWARNING:\n External file ' + \ - 'unit {} '.format(key) + \ - 'does not exist in ext_unit_dict.' - print(msg) - - # write message indicating packages that were successfully loaded - if ml.verbose: - msg = 3 * ' ' + 'The following ' + \ - '{} '.format(len(files_successfully_loaded)) + \ - 'packages were successfully loaded.' - print('') - print(msg) - for fname in files_successfully_loaded: - print(' ' + os.path.basename(fname)) - if len(files_not_loaded) > 0: - msg = 3 * ' ' + 'The following ' + \ - '{} '.format(len(files_not_loaded)) + \ - 'packages were not loaded.' - print(msg) - for fname in files_not_loaded: - print(' ' + os.path.basename(fname)) - if check: - ml.check(f='{}.chk'.format(ml.name), verbose=ml.verbose, level=0) - - # return model object - return ml +""" +mf module. Contains the ModflowGlobal, ModflowList, and Modflow classes. + + +""" + +import os +import flopy +from inspect import getfullargspec +from ..mbase import BaseModel +from ..pakbase import Package +from ..utils import mfreadnam +from ..discretization.structuredgrid import StructuredGrid +from ..discretization.grid import Grid +from flopy.discretization.modeltime import ModelTime +from .mfpar import ModflowPar + + +class ModflowGlobal(Package): + """ + ModflowGlobal Package class + + """ + + def __init__(self, model, extension='glo'): + Package.__init__(self, model, extension, 'GLOBAL', 1) + return + + def __repr__(self): + return 'Global Package class' + + def write_file(self): + # Not implemented for global class + return + + +class ModflowList(Package): + """ + ModflowList Package class + + """ + + def __init__(self, model, extension='list', unitnumber=2): + Package.__init__(self, model, extension, 'LIST', unitnumber) + return + + def __repr__(self): + return 'List Package class' + + def write_file(self): + # Not implemented for list class + return + + +class Modflow(BaseModel): + """ + MODFLOW Model Class. + + Parameters + ---------- + modelname : string, optional + Name of model. This string will be used to name the MODFLOW input + that are created with write_model. (the default is 'modflowtest') + namefile_ext : string, optional + Extension for the namefile (the default is 'nam') + version : string, optional + Version of MODFLOW to use (the default is 'mf2005'). + exe_name : string, optional + The name of the executable to use (the default is + 'mf2005'). + listunit : integer, optional + Unit number for the list file (the default is 2). + model_ws : string, optional + model workspace. Directory name to create model data sets. + (default is the present working directory). + external_path : string + Location for external files (default is None). + verbose : boolean, optional + Print additional information to the screen (default is False). + load : boolean, optional + (default is True). + silent : integer + (default is 0) + + Attributes + ---------- + + Methods + ------- + + See Also + -------- + + Notes + ----- + + Examples + -------- + + >>> import flopy + >>> m = flopy.modflow.Modflow() + + """ + + def __init__(self, modelname='modflowtest', namefile_ext='nam', + version='mf2005', exe_name='mf2005.exe', + structured=True, listunit=2, model_ws='.', external_path=None, + verbose=False, **kwargs): + BaseModel.__init__(self, modelname, namefile_ext, exe_name, model_ws, + structured=structured, verbose=verbose, **kwargs) + self.version_types = {'mf2k': 'MODFLOW-2000', 'mf2005': 'MODFLOW-2005', + 'mfnwt': 'MODFLOW-NWT', 'mfusg': 'MODFLOW-USG'} + + self.set_version(version) + + if self.version == 'mf2k': + self.glo = ModflowGlobal(self) + + self.lst = ModflowList(self, unitnumber=listunit) + # -- check if unstructured is specified for something + # other than mfusg is specified + if not self.structured: + assert 'mfusg' in self.version, \ + 'structured=False can only be specified for mfusg models' + + # external option stuff + self.array_free_format = True + self.array_format = 'modflow' + # self.external_fnames = [] + # self.external_units = [] + # self.external_binflag = [] + + self.load_fail = False + # the starting external data unit number + self._next_ext_unit = 1000 + + if external_path is not None: + if os.path.exists(os.path.join(model_ws, external_path)): + print("Note: external_path " + str(external_path) + + " already exists") + else: + os.makedirs(os.path.join(model_ws, external_path)) + self.external_path = external_path + self.verbose = verbose + self.mfpar = ModflowPar() + + # output file info + self.hext = 'hds' + self.dext = 'ddn' + self.cext = 'cbc' + self.hpth = None + self.dpath = None + self.cpath = None + + # Create a dictionary to map package with package object. + # This is used for loading models. + self.mfnam_packages = { + "zone": flopy.modflow.ModflowZon, + "mult": flopy.modflow.ModflowMlt, + "pval": flopy.modflow.ModflowPval, + "bas6": flopy.modflow.ModflowBas, + "dis": flopy.modflow.ModflowDis, + "disu": flopy.modflow.ModflowDisU, + "bcf6": flopy.modflow.ModflowBcf, + "lpf": flopy.modflow.ModflowLpf, + "hfb6": flopy.modflow.ModflowHfb, + "chd": flopy.modflow.ModflowChd, + "fhb": flopy.modflow.ModflowFhb, + "wel": flopy.modflow.ModflowWel, + "mnw1": flopy.modflow.ModflowMnw1, + "mnw2": flopy.modflow.ModflowMnw2, + "mnwi": flopy.modflow.ModflowMnwi, + "drn": flopy.modflow.ModflowDrn, + "drt": flopy.modflow.ModflowDrt, + "rch": flopy.modflow.ModflowRch, + "evt": flopy.modflow.ModflowEvt, + "ghb": flopy.modflow.ModflowGhb, + "gmg": flopy.modflow.ModflowGmg, + "lmt6": flopy.modflow.ModflowLmt, + "lmt7": flopy.modflow.ModflowLmt, + "riv": flopy.modflow.ModflowRiv, + "str": flopy.modflow.ModflowStr, + "swi2": flopy.modflow.ModflowSwi2, + "pcg": flopy.modflow.ModflowPcg, + "pcgn": flopy.modflow.ModflowPcgn, + "nwt": flopy.modflow.ModflowNwt, + "pks": flopy.modflow.ModflowPks, + "sms": flopy.modflow.ModflowSms, + "sfr": flopy.modflow.ModflowSfr2, + "lak": flopy.modflow.ModflowLak, + "gage": flopy.modflow.ModflowGage, + "sip": flopy.modflow.ModflowSip, + "sor": flopy.modflow.ModflowSor, + "de4": flopy.modflow.ModflowDe4, + "oc": flopy.modflow.ModflowOc, + "uzf": flopy.modflow.ModflowUzf1, + "upw": flopy.modflow.ModflowUpw, + "sub": flopy.modflow.ModflowSub, + "swt": flopy.modflow.ModflowSwt, + "hyd": flopy.modflow.ModflowHyd, + "hob": flopy.modflow.ModflowHob, + "chob": flopy.modflow.ModflowFlwob, + "gbob": flopy.modflow.ModflowFlwob, + "drob": flopy.modflow.ModflowFlwob, + "rvob": flopy.modflow.ModflowFlwob, + "vdf": flopy.seawat.SeawatVdf, + "vsc": flopy.seawat.SeawatVsc + } + return + + def __repr__(self): + nrow, ncol, nlay, nper = self.get_nrow_ncol_nlay_nper() + if nrow is not None: + # structured case + s = ('MODFLOW {} layer(s) {} row(s) {} column(s) ' + '{} stress period(s)'.format(nlay, nrow, ncol, nper)) + else: + # unstructured case + nodes = ncol.sum() + nodelay = ' '.join(str(i) for i in ncol) + print(nodelay, nlay, nper) + s = ('MODFLOW unstructured\n' + ' nodes = {}\n' + ' layers = {}\n' + ' periods = {}\n' + ' nodelay = {}\n'.format(nodes, nlay, nper, ncol)) + return s + + # + # def next_ext_unit(self): + # """ + # Function to encapsulate next_ext_unit attribute + # + # """ + # next_unit = self.__next_ext_unit + 1 + # self.__next_ext_unit += 1 + # return next_unit + + @property + def modeltime(self): + # build model time + data_frame = {'perlen': self.dis.perlen.array, + 'nstp': self.dis.nstp.array, + 'tsmult': self.dis.tsmult.array} + self._model_time = ModelTime(data_frame, + self.dis.itmuni_dict[self.dis.itmuni], + self.dis.start_datetime, + self.dis.steady.array) + return self._model_time + + @property + def modelgrid(self): + if not self._mg_resync: + return self._modelgrid + + if self.has_package('bas6'): + ibound = self.bas6.ibound.array + else: + ibound = None + + if self.get_package('disu') is not None: + self._modelgrid = Grid(grid_type='USG-Unstructured', + top=self.disu.top, botm=self.disu.bot, + idomain=ibound, proj4=self._modelgrid.proj4, + epsg=self._modelgrid.epsg, + xoff=self._modelgrid.xoffset, + yoff=self._modelgrid.yoffset, + angrot=self._modelgrid.angrot) + print('WARNING: Model grid functionality limited for unstructured ' + 'grid.') + else: + # build structured grid + self._modelgrid = StructuredGrid(self.dis.delc.array, + self.dis.delr.array, + self.dis.top.array, + self.dis.botm.array, ibound, + self.dis.lenuni, + proj4=self._modelgrid.proj4, + epsg=self._modelgrid.epsg, + xoff=self._modelgrid.xoffset, + yoff=self._modelgrid.yoffset, + angrot=self._modelgrid.angrot, + nlay=self.dis.nlay, + laycbd=self.dis.laycbd) + + # resolve offsets + xoff = self._modelgrid.xoffset + if xoff is None: + if self._xul is not None: + xoff = self._modelgrid._xul_to_xll(self._xul) + else: + xoff = 0.0 + yoff = self._modelgrid.yoffset + if yoff is None: + if self._yul is not None: + yoff = self._modelgrid._yul_to_yll(self._yul) + else: + yoff = 0.0 + self._modelgrid.set_coord_info(xoff, yoff, self._modelgrid.angrot, + self._modelgrid.epsg, + self._modelgrid.proj4) + self._mg_resync = not self._modelgrid.is_complete + return self._modelgrid + + @modelgrid.setter + def modelgrid(self, value): + self._mg_resync = False + self._modelgrid = value + + @property + def solver_tols(self): + if self.pcg is not None: + return self.pcg.hclose, self.pcg.rclose + elif self.nwt is not None: + return self.nwt.headtol, self.nwt.fluxtol + elif self.sip is not None: + return self.sip.hclose, -999 + elif self.gmg is not None: + return self.gmg.hclose, self.gmg.rclose + return None + + @property + def nlay(self): + if (self.dis): + return self.dis.nlay + elif (self.disu): + return self.disu.nlay + else: + return 0 + + @property + def nrow(self): + if (self.dis): + return self.dis.nrow + else: + return 0 + + @property + def ncol(self): + if (self.dis): + return self.dis.ncol + else: + return 0 + + @property + def nper(self): + if (self.dis): + return self.dis.nper + elif (self.disu): + return self.disu.nper + else: + return 0 + + @property + def ncpl(self): + if (self.dis): + return self.dis.nrow * self.dis.ncol + elif (self.disu): + return self.disu.ncpl + else: + return 0 + + @property + def nrow_ncol_nlay_nper(self): + # structured dis + dis = self.get_package('DIS') + if dis: + return dis.nrow, dis.ncol, dis.nlay, dis.nper + # unstructured dis + dis = self.get_package('DISU') + if dis: + return None, dis.nodelay.array[:], dis.nlay, dis.nper + # no dis + return 0, 0, 0, 0 + + def get_nrow_ncol_nlay_nper(self): + return self.nrow_ncol_nlay_nper + + def get_ifrefm(self): + bas = self.get_package('BAS6') + if (bas): + return bas.ifrefm + else: + return False + + def set_ifrefm(self, value=True): + if not isinstance(value, bool): + print('Error: set_ifrefm passed value must be a boolean') + return False + self.array_free_format = value + bas = self.get_package('BAS6') + if (bas): + bas.ifrefm = value + else: + return False + + def _set_name(self, value): + # Overrides BaseModel's setter for name property + BaseModel._set_name(self, value) + + if self.version == 'mf2k': + for i in range(len(self.glo.extension)): + self.glo.file_name[i] = self.name + '.' + self.glo.extension[i] + + for i in range(len(self.lst.extension)): + self.lst.file_name[i] = self.name + '.' + self.lst.extension[i] + + def write_name_file(self): + """ + Write the model name file. + + """ + fn_path = os.path.join(self.model_ws, self.namefile) + f_nam = open(fn_path, 'w') + f_nam.write('{}\n'.format(self.heading)) + if self.structured: + f_nam.write('#' + str(self.modelgrid)) + f_nam.write("; start_datetime:{0}\n".format(self.start_datetime)) + if self.version == 'mf2k': + if self.glo.unit_number[0] > 0: + f_nam.write('{:14s} {:5d} {}\n'.format(self.glo.name[0], + self.glo.unit_number[ + 0], + self.glo.file_name[0])) + f_nam.write('{:14s} {:5d} {}\n'.format(self.lst.name[0], + self.lst.unit_number[0], + self.lst.file_name[0])) + f_nam.write('{}'.format(self.get_name_file_entries())) + + # write the external files + for u, f, b, o in zip(self.external_units, self.external_fnames, + self.external_binflag, self.external_output): + if u == 0: + continue + replace_text = '' + if o: + replace_text = 'REPLACE' + if b: + line = 'DATA(BINARY) {0:5d} '.format(u) + f + \ + replace_text + '\n' + f_nam.write(line) + else: + f_nam.write('DATA {0:5d} '.format(u) + f + '\n') + + # write the output files + for u, f, b in zip(self.output_units, self.output_fnames, + self.output_binflag): + if u == 0: + continue + if b: + f_nam.write( + 'DATA(BINARY) {0:5d} '.format(u) + f + ' REPLACE\n') + else: + f_nam.write('DATA {0:5d} '.format(u) + f + '\n') + + # close the name file + f_nam.close() + return + + def set_model_units(self, iunit0=None): + """ + Write the model name file. + + """ + if iunit0 is None: + iunit0 = 1001 + + # initialize starting unit number + self.next_unit(iunit0) + + if self.version == 'mf2k': + # update global file unit number + if self.glo.unit_number[0] > 0: + self.glo.unit_number[0] = self.next_unit() + + # update lst file unit number + self.lst.unit_number[0] = self.next_unit() + + # update package unit numbers + for p in self.packagelist: + p.unit_number[0] = self.next_unit() + + # update external unit numbers + for i, iu in enumerate(self.external_units): + if iu == 0: + continue + self.external_units[i] = self.next_unit() + + # update output files unit numbers + oc = self.get_package('OC') + output_units0 = list(self.output_units) + for i, iu in enumerate(self.output_units): + if iu == 0: + continue + iu1 = self.next_unit() + self.output_units[i] = iu1 + # update oc files + if oc is not None: + if oc.iuhead == iu: + oc.iuhead = iu1 + elif oc.iuddn == iu: + oc.iuddn = iu1 + + # replace value in ipakcb + for p in self.packagelist: + try: + iu0 = p.ipakcb + if iu0 in output_units0: + j = output_units0.index(iu0) + p.ipakcb = self.output_units[j] + except: + if self.verbose: + print(' could not replace value in ipakcb') + + return + + def load_results(self, **kwargs): + + # remove model if passed as a kwarg + if 'model' in kwargs: + kwargs.pop('model') + + as_dict = False + if "as_dict" in kwargs: + as_dict = bool(kwargs.pop("as_dict")) + + savehead = False + saveddn = False + savebud = False + + # check for oc + try: + oc = self.get_package('OC') + self.hext = oc.extension[1] + self.dext = oc.extension[2] + self.cext = oc.extension[3] + if oc.chedfm is None: + head_const = flopy.utils.HeadFile + else: + head_const = flopy.utils.FormattedHeadFile + if oc.cddnfm is None: + ddn_const = flopy.utils.HeadFile + else: + ddn_const = flopy.utils.FormattedHeadFile + + for k, lst in oc.stress_period_data.items(): + for v in lst: + if v.lower() == 'save head': + savehead = True + if v.lower() == 'save drawdown': + saveddn = True + if v.lower() == 'save budget': + savebud = True + except Exception as e: + print('error reading output filenames ' + + 'from OC package: {}'.format(str(e))) + + self.hpth = os.path.join(self.model_ws, + '{}.{}'.format(self.name, self.hext)) + self.dpth = os.path.join(self.model_ws, + '{}.{}'.format(self.name, self.dext)) + self.cpth = os.path.join(self.model_ws, + '{}.{}'.format(self.name, self.cext)) + + hdObj = None + ddObj = None + bdObj = None + + if savehead and os.path.exists(self.hpth): + hdObj = head_const(self.hpth, model=self, **kwargs) + + if saveddn and os.path.exists(self.dpth): + ddObj = ddn_const(self.dpth, model=self, **kwargs) + if savebud and os.path.exists(self.cpth): + bdObj = flopy.utils.CellBudgetFile(self.cpth, model=self, **kwargs) + + # get subsidence, if written + subObj = None + try: + + if self.sub is not None and "subsidence.hds" in self.sub.extension: + idx = self.sub.extension.index("subsidence.hds") + subObj = head_const( + os.path.join(self.model_ws, self.sub.file_name[idx]), + text="subsidence") + except Exception as e: + print("error loading subsidence.hds:{0}".format(str(e))) + + if as_dict: + oudic = {} + if subObj is not None: + oudic["subsidence.hds"] = subObj + if savehead and hdObj: + oudic[self.hpth] = hdObj + if saveddn and ddObj: + oudic[self.dpth] = ddObj + if savebud and bdObj: + oudic[self.cpth] = bdObj + return oudic + else: + return hdObj, ddObj, bdObj + + @staticmethod + def load(f, version='mf2005', exe_name='mf2005.exe', verbose=False, + model_ws='.', load_only=None, forgive=False, check=True): + """ + Load an existing MODFLOW model. + + Parameters + ---------- + f : str + Path to MODFLOW name file to load. + version : str, optional + MODFLOW version. Default 'mf2005', although can be modified on + loading packages unique to different MODFLOW versions. + exe_name : str, optional + MODFLOW executable name. Default 'mf2005.exe'. + verbose : bool, optional + Show messages that can be useful for debugging. Default False. + model_ws : str + Model workspace path. Default '.' or current directory. + load_only : list, str or None + List of case insensitive filetypes to load, e.g. ["bas6", "lpf"]. + One package can also be specified, e.g. "rch". Default is None, + which attempts to load all files. An empty list [] will not load + any additional packages than is necessary. At a minimum, "dis" or + "disu" is always loaded. + forgive : bool, optional + Option to raise exceptions on package load failure, which can be + useful for debugging. Default False. + check : boolean, optional + Check model input for common errors. Default True. + + Returns + ------- + ml : Modflow object + + Examples + -------- + + >>> import flopy + >>> ml = flopy.modflow.Modflow.load('model.nam') + + """ + + # similar to modflow command: if file does not exist , try file.nam + namefile_path = os.path.join(model_ws, f) + if (not os.path.isfile(namefile_path) and + os.path.isfile(namefile_path + '.nam')): + namefile_path += '.nam' + if not os.path.isfile(namefile_path): + raise IOError('cannot find name file: ' + str(namefile_path)) + + # Determine model name from 'f', without any extension or path + modelname = os.path.splitext(os.path.basename(f))[0] + + # if model_ws is None: + # model_ws = os.path.dirname(f) + if verbose: + print('\nCreating new model with name: {}\n{}\n' + .format(modelname, 50 * '-')) + + attribs = mfreadnam.attribs_from_namfile_header( + os.path.join(model_ws, f)) + + ml = Modflow(modelname, version=version, exe_name=exe_name, + verbose=verbose, model_ws=model_ws, **attribs) + + files_successfully_loaded = [] + files_not_loaded = [] + + # read name file + ext_unit_dict = mfreadnam.parsenamefile( + namefile_path, ml.mfnam_packages, verbose=verbose) + if ml.verbose: + print('\n{}\nExternal unit dictionary:\n{}\n{}\n' + .format(50 * '-', ext_unit_dict, 50 * '-')) + + # create a dict where key is the package name, value is unitnumber + ext_pkg_d = {v.filetype: k for (k, v) in ext_unit_dict.items()} + + # reset version based on packages in the name file + if 'NWT' in ext_pkg_d or 'UPW' in ext_pkg_d: + version = 'mfnwt' + if 'GLOBAL' in ext_pkg_d: + if version != "mf2k": + ml.glo = ModflowGlobal(ml) + version = 'mf2k' + if 'SMS' in ext_pkg_d: + version = 'mfusg' + if 'DISU' in ext_pkg_d: + version = 'mfusg' + ml.structured = False + # update the modflow version + ml.set_version(version) + + # reset unit number for glo file + if version == 'mf2k': + if 'GLOBAL' in ext_pkg_d: + unitnumber = ext_pkg_d['GLOBAL'] + filepth = os.path.basename(ext_unit_dict[unitnumber].filename) + ml.glo.unit_number = [unitnumber] + ml.glo.file_name = [filepth] + else: + # TODO: is this necessary? it's not done for LIST. + ml.glo.unit_number = [0] + ml.glo.file_name = [''] + + # reset unit number for list file + if 'LIST' in ext_pkg_d: + unitnumber = ext_pkg_d['LIST'] + filepth = os.path.basename(ext_unit_dict[unitnumber].filename) + ml.lst.unit_number = [unitnumber] + ml.lst.file_name = [filepth] + + # look for the free format flag in bas6 + bas_key = ext_pkg_d.get('BAS6') + if bas_key is not None: + bas = ext_unit_dict[bas_key] + start = bas.filehandle.tell() + line = bas.filehandle.readline() + while line.startswith("#"): + line = bas.filehandle.readline() + if "FREE" in line.upper(): + ml.free_format_input = True + bas.filehandle.seek(start) + if verbose: + print("ModflowBas6 free format:{0}\n".format(ml.free_format_input)) + + # load dis + dis_key = ext_pkg_d.get('DIS') or ext_pkg_d.get('DISU') + if dis_key is None: + raise KeyError('discretization entry not found in nam file') + disnamdata = ext_unit_dict[dis_key] + dis = disnamdata.package.load(disnamdata.filehandle, ml, + ext_unit_dict=ext_unit_dict, + check=False) + files_successfully_loaded.append(disnamdata.filename) + if ml.verbose: + print(' {:4s} package load...success'.format(dis.name[0])) + assert ml.pop_key_list.pop() == dis_key + ext_unit_dict.pop(dis_key).filehandle.close() + + dis.start_datetime = ml._start_datetime + + if load_only is None: + # load all packages/files + load_only = ext_pkg_d.keys() + else: # check items in list + if not isinstance(load_only, list): + load_only = [load_only] + not_found = [] + for i, filetype in enumerate(load_only): + load_only[i] = filetype = filetype.upper() + if filetype not in ext_pkg_d: + not_found.append(filetype) + if not_found: + raise KeyError( + "the following load_only entries were not found " + "in the ext_unit_dict: " + str(not_found)) + + # zone, mult, pval + if 'PVAL' in ext_pkg_d: + ml.mfpar.set_pval(ml, ext_unit_dict) + assert ml.pop_key_list.pop() == ext_pkg_d.get('PVAL') + if 'ZONE' in ext_pkg_d: + ml.mfpar.set_zone(ml, ext_unit_dict) + assert ml.pop_key_list.pop() == ext_pkg_d.get('ZONE') + if 'MULT' in ext_pkg_d: + ml.mfpar.set_mult(ml, ext_unit_dict) + assert ml.pop_key_list.pop() == ext_pkg_d.get('MULT') + + # try loading packages in ext_unit_dict + for key, item in ext_unit_dict.items(): + if item.package is not None: + if item.filetype in load_only: + package_load_args = getfullargspec(item.package.load)[0] + if forgive: + try: + if "check" in package_load_args: + item.package.load(item.filehandle, ml, + ext_unit_dict=ext_unit_dict, + check=False) + else: + item.package.load(item.filehandle, ml, + ext_unit_dict=ext_unit_dict) + files_successfully_loaded.append(item.filename) + if ml.verbose: + print(' {:4s} package load...success' + .format(item.filetype)) + except Exception as e: + ml.load_fail = True + if ml.verbose: + msg = 3 * ' ' + \ + '{:4s} '.format(item.filetype) + \ + 'package load...failed\n' + \ + 3 * ' ' + '{!s}'.format(e) + print(msg) + files_not_loaded.append(item.filename) + else: + if "check" in package_load_args: + item.package.load(item.filehandle, ml, + ext_unit_dict=ext_unit_dict, + check=False) + else: + item.package.load(item.filehandle, ml, + ext_unit_dict=ext_unit_dict) + files_successfully_loaded.append(item.filename) + if ml.verbose: + msg = 3 * ' ' + '{:4s} '.format(item.filetype) + \ + 'package load...success' + print(msg) + else: + if ml.verbose: + msg = 3 * ' ' + '{:4s} '.format(item.filetype) + \ + 'package load...skipped' + print(msg) + files_not_loaded.append(item.filename) + elif "data" not in item.filetype.lower(): + files_not_loaded.append(item.filename) + if ml.verbose: + msg = 3 * ' ' + '{:4s} '.format(item.filetype) + \ + 'package load...skipped' + print(msg) + elif "data" in item.filetype.lower(): + if ml.verbose: + msg = 3 * ' ' + '{:s} '.format(item.filetype) + \ + 'file load...skipped\n' + 6 * ' ' + \ + '{}'.format(os.path.basename(item.filename)) + print(msg) + if key not in ml.pop_key_list: + # do not add unit number (key) if it already exists + if key not in ml.external_units: + ml.external_fnames.append(item.filename) + ml.external_units.append(key) + ml.external_binflag.append("binary" + in item.filetype.lower()) + ml.external_output.append(False) + else: + raise KeyError('unhandled case: {}, {}'.format(key, item)) + + # pop binary output keys and any external file units that are now + # internal + for key in ml.pop_key_list: + try: + ml.remove_external(unit=key) + item = ext_unit_dict.pop(key) + if hasattr(item.filehandle, 'close'): + item.filehandle.close() + except KeyError: + if ml.verbose: + msg = '\nWARNING:\n External file ' + \ + 'unit {} '.format(key) + \ + 'does not exist in ext_unit_dict.' + print(msg) + + # write message indicating packages that were successfully loaded + if ml.verbose: + msg = 3 * ' ' + 'The following ' + \ + '{} '.format(len(files_successfully_loaded)) + \ + 'packages were successfully loaded.' + print('') + print(msg) + for fname in files_successfully_loaded: + print(' ' + os.path.basename(fname)) + if len(files_not_loaded) > 0: + msg = 3 * ' ' + 'The following ' + \ + '{} '.format(len(files_not_loaded)) + \ + 'packages were not loaded.' + print(msg) + for fname in files_not_loaded: + print(' ' + os.path.basename(fname)) + if check: + ml.check(f='{}.chk'.format(ml.name), verbose=ml.verbose, level=0) + + # return model object + return ml diff --git a/flopy/modflow/mfaddoutsidefile.py b/flopy/modflow/mfaddoutsidefile.py index 95731c7e5a..7d81eefa70 100644 --- a/flopy/modflow/mfaddoutsidefile.py +++ b/flopy/modflow/mfaddoutsidefile.py @@ -1,19 +1,19 @@ -from numpy import atleast_2d -from ..pakbase import Package - - -class mfaddoutsidefile(Package): - """ - Add a file for which you have a MODFLOW input file - """ - - def __init__(self, model, name, extension, unitnumber): - Package.__init__(self, model, extension, name, unitnumber, - allowDuplicates=True) # Call ancestor's init to set self.parent, extension, name and unit number - self.parent.add_package(self) - - def __repr__(self): - return 'Outside Package class' - - def write_file(self): - pass +from numpy import atleast_2d +from ..pakbase import Package + + +class mfaddoutsidefile(Package): + """ + Add a file for which you have a MODFLOW input file + """ + + def __init__(self, model, name, extension, unitnumber): + Package.__init__(self, model, extension, name, unitnumber, + allowDuplicates=True) # Call ancestor's init to set self.parent, extension, name and unit number + self.parent.add_package(self) + + def __repr__(self): + return 'Outside Package class' + + def write_file(self): + pass diff --git a/flopy/modflow/mfbas.py b/flopy/modflow/mfbas.py index 8c0ecc766b..3d0d99094f 100644 --- a/flopy/modflow/mfbas.py +++ b/flopy/modflow/mfbas.py @@ -1,363 +1,363 @@ -""" -mfbas module. Contains the ModflowBas class. Note that the user can access -the ModflowBas class as `flopy.modflow.ModflowBas`. - -Additional information for this MODFLOW package can be found at the `Online -MODFLOW Guide -`_. - -""" - -import re -import sys -import numpy as np -from ..pakbase import Package -from ..utils import Util3d, get_neighbors - - -class ModflowBas(Package): - """ - MODFLOW Basic Package Class. - - Parameters - ---------- - model : model object - The model object (of type :class:`flopy.modflow.mf.Modflow`) to which - this package will be added. - ibound : array of ints, optional - The ibound array (the default is 1). - strt : array of floats, optional - An array of starting heads (the default is 1.0). - ifrefm : bool, optional - Indication if data should be read using free format (the default is - True). - ixsec : bool, optional - Indication of whether model is cross sectional or not (the default is - False). - ichflg : bool, optional - Flag indicating that flows between constant head cells should be - calculated (the default is False). - stoper : float - percent discrepancy that is compared to the budget percent discrepancy - continue when the solver convergence criteria are not met. Execution - will unless the budget percent discrepancy is greater than stoper - (default is None). MODFLOW-2005 only - hnoflo : float - Head value assigned to inactive cells (default is -999.99). - extension : str, optional - File extension (default is 'bas'). - unitnumber : int, optional - FORTRAN unit number for this package (default is None). - filenames : str or list of str - Filenames to use for the package. If filenames=None the package name - will be created using the model name and package extension. If a single - string is passed the package name will be set to the string. - Default is None. - - Attributes - ---------- - heading : str - Text string written to top of package input file. - options : list of str - Can be either or a combination of XSECTION, CHTOCH or FREE. - ifrefm : bool - Indicates whether or not packages will be written as free format. - - Methods - ------- - - See Also - -------- - - Notes - ----- - - Examples - -------- - - >>> import flopy - >>> m = flopy.modflow.Modflow() - >>> bas = flopy.modflow.ModflowBas(m) - - """ - - @staticmethod - def ftype(): - return 'BAS6' - - @staticmethod - def defaultunit(): - return 13 - - def __init__(self, model, ibound=1, strt=1.0, ifrefm=True, ixsec=False, - ichflg=False, stoper=None, hnoflo=-999.99, extension='bas', - unitnumber=None, filenames=None): - """ - Package constructor. - - """ - - if unitnumber is None: - unitnumber = ModflowBas.defaultunit() - - # set filenames - if filenames is None: - filenames = [None] - elif isinstance(filenames, str): - filenames = [filenames] - - # Fill namefile items - name = [ModflowBas.ftype()] - units = [unitnumber] - extra = [''] - - # set package name - fname = [filenames[0]] - - # Call ancestor's init to set self.parent, extension, name and unit number - Package.__init__(self, model, extension=extension, name=name, - unit_number=units, extra=extra, filenames=fname) - - self.url = 'bas6.htm' - - nrow, ncol, nlay, nper = self.parent.nrow_ncol_nlay_nper - self.ibound = Util3d(model, (nlay, nrow, ncol), np.int32, ibound, - name='ibound', locat=self.unit_number[0]) - self.strt = Util3d(model, (nlay, nrow, ncol), np.float32, strt, - name='strt', locat=self.unit_number[0]) - self.heading = '# {} package for '.format(self.name[0]) + \ - ' {}, '.format(model.version_types[model.version]) + \ - 'generated by Flopy.' - self.options = '' - self.ixsec = ixsec - self.ichflg = ichflg - self.stoper = stoper - - # self.ifrefm = ifrefm - # model.array_free_format = ifrefm - model.free_format_input = ifrefm - - self.hnoflo = hnoflo - self.parent.add_package(self) - return - - @property - def ifrefm(self): - return self.parent.free_format_input - - def __setattr__(self, key, value): - if key == "ifrefm": - self.parent.free_format_input = value - else: - super(ModflowBas, self).__setattr__(key, value) - - def check(self, f=None, verbose=True, level=1, checktype=None): - """ - Check package data for common errors. - - Parameters - ---------- - f : str or file handle - String defining file name or file handle for summary file - of check method output. If a sting is passed a file handle - is created. If f is None, check method does not write - results to a summary file. (default is None) - verbose : bool - Boolean flag used to determine if check method results are - written to the screen - level : int - Check method analysis level. If level=0, summary checks are - performed. If level=1, full checks are performed. - - Returns - ------- - None - - Examples - -------- - - >>> import flopy - >>> m = flopy.modflow.Modflow.load('model.nam') - >>> m.bas6.check() - - """ - chk = self._get_check(f, verbose, level, checktype) - - neighbors = get_neighbors(self.ibound.array) - neighbors[ - np.isnan(neighbors)] = 0 # set neighbors at edges to 0 (inactive) - chk.values(self.ibound.array, - (self.ibound.array > 0) & np.all(neighbors < 1, axis=0), - 'isolated cells in ibound array', 'Warning') - chk.values(self.ibound.array, np.isnan(self.ibound.array), - error_name='Not a number', error_type='Error') - chk.summarize() - return chk - - def write_file(self, check=True): - """ - Write the package file. - - Parameters - ---------- - check : boolean - Check package data for common errors. (default True) - - Returns - ------- - None - - """ - # allows turning off package checks when writing files at model level - if check: - self.check(f='{}.chk'.format(self.name[0]), - verbose=self.parent.verbose, level=1) - # Open file for writing - f_bas = open(self.fn_path, 'w') - # First line: heading - # f_bas.write('%s\n' % self.heading) - f_bas.write('{0:s}\n'.format(self.heading)) - # Second line: format specifier - opts = [] - if self.ixsec: - opts.append('XSECTION') - if self.ichflg: - opts.append('CHTOCH') - if self.ifrefm: - opts.append('FREE') - if self.stoper is not None: - opts.append('STOPERROR {0}'.format(self.stoper)) - self.options = ' '.join(opts) - f_bas.write(self.options + '\n') - # IBOUND array - f_bas.write(self.ibound.get_file_entry()) - # Head in inactive cells - str_hnoflo = str(self.hnoflo).rjust(10) - if not self.ifrefm and len(str_hnoflo) > 10: - # write fixed-width no more than 10 characters - str_hnoflo = '{0:10.4G}'.format(self.hnoflo) - assert len(str_hnoflo) <= 10, str_hnoflo - f_bas.write(str_hnoflo + '\n') - # Starting heads array - f_bas.write(self.strt.get_file_entry()) - # Close file - f_bas.close() - - @staticmethod - def load(f, model, ext_unit_dict=None, check=True, **kwargs): - """ - Load an existing package. - - Parameters - ---------- - f : filename or file handle - File to load. - model : model object - The model object (of type :class:`flopy.modflow.mf.Modflow`) to - which this package will be added. - ext_unit_dict : dictionary, optional - If the arrays in the file are specified using EXTERNAL, - or older style array control records, then `f` should be a file - handle. In this case ext_unit_dict is required, which can be - constructed using the function - :class:`flopy.utils.mfreadnam.parsenamefile`. - check : boolean - Check package data for common errors. (default True) - kwargs : dictionary - Keyword arguments that are passed to load. - Possible keyword arguments are nlay, nrow, and ncol. - If not provided, then the model must contain a discretization - package with correct values for these parameters. - - Returns - ------- - bas : ModflowBas object - ModflowBas object (of type :class:`flopy.modflow.ModflowBas`) - - Examples - -------- - - >>> import flopy - >>> m = flopy.modflow.Modflow() - >>> bas = flopy.modflow.ModflowBas.load('test.bas', m, nlay=1, nrow=10, - >>> ncol=10) - - """ - - if model.verbose: - sys.stdout.write('loading bas6 package file...\n') - - # parse keywords - if 'nlay' in kwargs: - nlay = kwargs.pop('nlay') - else: - nlay = None - if 'nrow' in kwargs: - nrow = kwargs.pop('nrow') - else: - nrow = None - if 'ncol' in kwargs: - ncol = kwargs.pop('ncol') - else: - ncol = None - - # open the file if not already open - openfile = not hasattr(f, 'read') - if openfile: - filename = f - f = open(filename, 'r') - - # dataset 0 -- header - while True: - line = f.readline() - if line[0] != '#': - break - # dataset 1 -- options - # only accept alphanumeric characters, as well as '+', '-' and '.' - line = re.sub(r'[^A-Z0-9\.\-\+]', ' ', line.upper()) - opts = line.strip().split() - ixsec = 'XSECTION' in opts - ichflg = 'CHTOCH' in opts - ifrefm = 'FREE' in opts - iprinttime = 'PRINTTIME' in opts - ishowp = 'SHOWPROGRESS' in opts - if 'STOPERROR' in opts: - i = opts.index('STOPERROR') - stoper = np.float32(opts[i + 1]) - else: - stoper = None - # get nlay,nrow,ncol if not passed - if nlay is None and nrow is None and ncol is None: - nrow, ncol, nlay, nper = model.get_nrow_ncol_nlay_nper() - - # dataset 2 -- ibound - ibound = Util3d.load(f, model, (nlay, nrow, ncol), np.int32, 'ibound', - ext_unit_dict) - - # dataset 3 -- hnoflo - line = f.readline() - hnoflo = np.float32(line.strip().split()[0]) - - # dataset 4 -- strt - strt = Util3d.load(f, model, (nlay, nrow, ncol), np.float32, 'strt', - ext_unit_dict) - if openfile: - f.close() - - # set package unit number - unitnumber = None - filenames = [None] - if ext_unit_dict is not None: - unitnumber, filenames[0] = \ - model.get_ext_dict_attr(ext_unit_dict, - filetype=ModflowBas.ftype()) - - # create bas object and return - bas = ModflowBas(model, ibound=ibound, strt=strt, - ixsec=ixsec, ifrefm=ifrefm, ichflg=ichflg, - stoper=stoper, hnoflo=hnoflo, - unitnumber=unitnumber, filenames=filenames) - if check: - bas.check(f='{}.chk'.format(bas.name[0]), - verbose=bas.parent.verbose, level=0) - return bas +""" +mfbas module. Contains the ModflowBas class. Note that the user can access +the ModflowBas class as `flopy.modflow.ModflowBas`. + +Additional information for this MODFLOW package can be found at the `Online +MODFLOW Guide +`_. + +""" + +import re +import sys +import numpy as np +from ..pakbase import Package +from ..utils import Util3d, get_neighbors + + +class ModflowBas(Package): + """ + MODFLOW Basic Package Class. + + Parameters + ---------- + model : model object + The model object (of type :class:`flopy.modflow.mf.Modflow`) to which + this package will be added. + ibound : array of ints, optional + The ibound array (the default is 1). + strt : array of floats, optional + An array of starting heads (the default is 1.0). + ifrefm : bool, optional + Indication if data should be read using free format (the default is + True). + ixsec : bool, optional + Indication of whether model is cross sectional or not (the default is + False). + ichflg : bool, optional + Flag indicating that flows between constant head cells should be + calculated (the default is False). + stoper : float + percent discrepancy that is compared to the budget percent discrepancy + continue when the solver convergence criteria are not met. Execution + will unless the budget percent discrepancy is greater than stoper + (default is None). MODFLOW-2005 only + hnoflo : float + Head value assigned to inactive cells (default is -999.99). + extension : str, optional + File extension (default is 'bas'). + unitnumber : int, optional + FORTRAN unit number for this package (default is None). + filenames : str or list of str + Filenames to use for the package. If filenames=None the package name + will be created using the model name and package extension. If a single + string is passed the package name will be set to the string. + Default is None. + + Attributes + ---------- + heading : str + Text string written to top of package input file. + options : list of str + Can be either or a combination of XSECTION, CHTOCH or FREE. + ifrefm : bool + Indicates whether or not packages will be written as free format. + + Methods + ------- + + See Also + -------- + + Notes + ----- + + Examples + -------- + + >>> import flopy + >>> m = flopy.modflow.Modflow() + >>> bas = flopy.modflow.ModflowBas(m) + + """ + + @staticmethod + def ftype(): + return 'BAS6' + + @staticmethod + def defaultunit(): + return 13 + + def __init__(self, model, ibound=1, strt=1.0, ifrefm=True, ixsec=False, + ichflg=False, stoper=None, hnoflo=-999.99, extension='bas', + unitnumber=None, filenames=None): + """ + Package constructor. + + """ + + if unitnumber is None: + unitnumber = ModflowBas.defaultunit() + + # set filenames + if filenames is None: + filenames = [None] + elif isinstance(filenames, str): + filenames = [filenames] + + # Fill namefile items + name = [ModflowBas.ftype()] + units = [unitnumber] + extra = [''] + + # set package name + fname = [filenames[0]] + + # Call ancestor's init to set self.parent, extension, name and unit number + Package.__init__(self, model, extension=extension, name=name, + unit_number=units, extra=extra, filenames=fname) + + self.url = 'bas6.htm' + + nrow, ncol, nlay, nper = self.parent.nrow_ncol_nlay_nper + self.ibound = Util3d(model, (nlay, nrow, ncol), np.int32, ibound, + name='ibound', locat=self.unit_number[0]) + self.strt = Util3d(model, (nlay, nrow, ncol), np.float32, strt, + name='strt', locat=self.unit_number[0]) + self.heading = '# {} package for '.format(self.name[0]) + \ + ' {}, '.format(model.version_types[model.version]) + \ + 'generated by Flopy.' + self.options = '' + self.ixsec = ixsec + self.ichflg = ichflg + self.stoper = stoper + + # self.ifrefm = ifrefm + # model.array_free_format = ifrefm + model.free_format_input = ifrefm + + self.hnoflo = hnoflo + self.parent.add_package(self) + return + + @property + def ifrefm(self): + return self.parent.free_format_input + + def __setattr__(self, key, value): + if key == "ifrefm": + self.parent.free_format_input = value + else: + super(ModflowBas, self).__setattr__(key, value) + + def check(self, f=None, verbose=True, level=1, checktype=None): + """ + Check package data for common errors. + + Parameters + ---------- + f : str or file handle + String defining file name or file handle for summary file + of check method output. If a sting is passed a file handle + is created. If f is None, check method does not write + results to a summary file. (default is None) + verbose : bool + Boolean flag used to determine if check method results are + written to the screen + level : int + Check method analysis level. If level=0, summary checks are + performed. If level=1, full checks are performed. + + Returns + ------- + None + + Examples + -------- + + >>> import flopy + >>> m = flopy.modflow.Modflow.load('model.nam') + >>> m.bas6.check() + + """ + chk = self._get_check(f, verbose, level, checktype) + + neighbors = get_neighbors(self.ibound.array) + neighbors[ + np.isnan(neighbors)] = 0 # set neighbors at edges to 0 (inactive) + chk.values(self.ibound.array, + (self.ibound.array > 0) & np.all(neighbors < 1, axis=0), + 'isolated cells in ibound array', 'Warning') + chk.values(self.ibound.array, np.isnan(self.ibound.array), + error_name='Not a number', error_type='Error') + chk.summarize() + return chk + + def write_file(self, check=True): + """ + Write the package file. + + Parameters + ---------- + check : boolean + Check package data for common errors. (default True) + + Returns + ------- + None + + """ + # allows turning off package checks when writing files at model level + if check: + self.check(f='{}.chk'.format(self.name[0]), + verbose=self.parent.verbose, level=1) + # Open file for writing + f_bas = open(self.fn_path, 'w') + # First line: heading + # f_bas.write('%s\n' % self.heading) + f_bas.write('{0:s}\n'.format(self.heading)) + # Second line: format specifier + opts = [] + if self.ixsec: + opts.append('XSECTION') + if self.ichflg: + opts.append('CHTOCH') + if self.ifrefm: + opts.append('FREE') + if self.stoper is not None: + opts.append('STOPERROR {0}'.format(self.stoper)) + self.options = ' '.join(opts) + f_bas.write(self.options + '\n') + # IBOUND array + f_bas.write(self.ibound.get_file_entry()) + # Head in inactive cells + str_hnoflo = str(self.hnoflo).rjust(10) + if not self.ifrefm and len(str_hnoflo) > 10: + # write fixed-width no more than 10 characters + str_hnoflo = '{0:10.4G}'.format(self.hnoflo) + assert len(str_hnoflo) <= 10, str_hnoflo + f_bas.write(str_hnoflo + '\n') + # Starting heads array + f_bas.write(self.strt.get_file_entry()) + # Close file + f_bas.close() + + @staticmethod + def load(f, model, ext_unit_dict=None, check=True, **kwargs): + """ + Load an existing package. + + Parameters + ---------- + f : filename or file handle + File to load. + model : model object + The model object (of type :class:`flopy.modflow.mf.Modflow`) to + which this package will be added. + ext_unit_dict : dictionary, optional + If the arrays in the file are specified using EXTERNAL, + or older style array control records, then `f` should be a file + handle. In this case ext_unit_dict is required, which can be + constructed using the function + :class:`flopy.utils.mfreadnam.parsenamefile`. + check : boolean + Check package data for common errors. (default True) + kwargs : dictionary + Keyword arguments that are passed to load. + Possible keyword arguments are nlay, nrow, and ncol. + If not provided, then the model must contain a discretization + package with correct values for these parameters. + + Returns + ------- + bas : ModflowBas object + ModflowBas object (of type :class:`flopy.modflow.ModflowBas`) + + Examples + -------- + + >>> import flopy + >>> m = flopy.modflow.Modflow() + >>> bas = flopy.modflow.ModflowBas.load('test.bas', m, nlay=1, nrow=10, + >>> ncol=10) + + """ + + if model.verbose: + sys.stdout.write('loading bas6 package file...\n') + + # parse keywords + if 'nlay' in kwargs: + nlay = kwargs.pop('nlay') + else: + nlay = None + if 'nrow' in kwargs: + nrow = kwargs.pop('nrow') + else: + nrow = None + if 'ncol' in kwargs: + ncol = kwargs.pop('ncol') + else: + ncol = None + + # open the file if not already open + openfile = not hasattr(f, 'read') + if openfile: + filename = f + f = open(filename, 'r') + + # dataset 0 -- header + while True: + line = f.readline() + if line[0] != '#': + break + # dataset 1 -- options + # only accept alphanumeric characters, as well as '+', '-' and '.' + line = re.sub(r'[^A-Z0-9\.\-\+]', ' ', line.upper()) + opts = line.strip().split() + ixsec = 'XSECTION' in opts + ichflg = 'CHTOCH' in opts + ifrefm = 'FREE' in opts + iprinttime = 'PRINTTIME' in opts + ishowp = 'SHOWPROGRESS' in opts + if 'STOPERROR' in opts: + i = opts.index('STOPERROR') + stoper = np.float32(opts[i + 1]) + else: + stoper = None + # get nlay,nrow,ncol if not passed + if nlay is None and nrow is None and ncol is None: + nrow, ncol, nlay, nper = model.get_nrow_ncol_nlay_nper() + + # dataset 2 -- ibound + ibound = Util3d.load(f, model, (nlay, nrow, ncol), np.int32, 'ibound', + ext_unit_dict) + + # dataset 3 -- hnoflo + line = f.readline() + hnoflo = np.float32(line.strip().split()[0]) + + # dataset 4 -- strt + strt = Util3d.load(f, model, (nlay, nrow, ncol), np.float32, 'strt', + ext_unit_dict) + if openfile: + f.close() + + # set package unit number + unitnumber = None + filenames = [None] + if ext_unit_dict is not None: + unitnumber, filenames[0] = \ + model.get_ext_dict_attr(ext_unit_dict, + filetype=ModflowBas.ftype()) + + # create bas object and return + bas = ModflowBas(model, ibound=ibound, strt=strt, + ixsec=ixsec, ifrefm=ifrefm, ichflg=ichflg, + stoper=stoper, hnoflo=hnoflo, + unitnumber=unitnumber, filenames=filenames) + if check: + bas.check(f='{}.chk'.format(bas.name[0]), + verbose=bas.parent.verbose, level=0) + return bas diff --git a/flopy/modflow/mfbcf.py b/flopy/modflow/mfbcf.py index b0660f6a62..5c6cee1675 100644 --- a/flopy/modflow/mfbcf.py +++ b/flopy/modflow/mfbcf.py @@ -1,449 +1,449 @@ -import sys - -import numpy as np - -from ..pakbase import Package -from ..utils import Util2d, Util3d -from ..utils.flopy_io import line_parse - - -class ModflowBcf(Package): - """ - MODFLOW Block Centered Flow Package Class. - - Parameters - ---------- - model : model object - The model object (of type :class:`flopy.modflow.Modflow`) to which - this package will be added. - ipakcb : int - A flag that is used to determine if cell-by-cell budget data should be - saved. If ipakcb is non-zero cell-by-cell budget data will be saved. - (default is 53) - intercellt : int - Intercell transmissivities, harmonic mean (0), arithmetic mean (1), - logarithmic mean (2), combination (3). (default is 0) - laycon : int - Layer type, confined (0), unconfined (1), constant T, variable S (2), - variable T, variable S (default is 3) - trpy : float or array of floats (nlay) - horizontal anisotropy ratio (default is 1.0) - hdry : float - head assigned when cell is dry - used as indicator(default is -1E+30) - iwdflg : int - flag to indicate if wetting is inactive (0) or not (non zero) - (default is 0) - wetfct : float - factor used when cell is converted from dry to wet (default is 0.1) - iwetit : int - iteration interval in wetting/drying algorithm (default is 1) - ihdwet : int - flag to indicate how initial head is computed for cells that become - wet (default is 0) - tran : float or array of floats (nlay, nrow, ncol), optional - transmissivity (only read if laycon is 0 or 2) (default is 1.0) - hy : float or array of floats (nlay, nrow, ncol) - hydraulic conductivity (only read if laycon is 1 or 3) - (default is 1.0) - vcont : float or array of floats (nlay-1, nrow, ncol) - vertical leakance between layers (default is 1.0) - sf1 : float or array of floats (nlay, nrow, ncol) - specific storage (confined) or storage coefficient (unconfined), - read when there is at least one transient stress period. - (default is 1e-5) - sf2 : float or array of floats (nrow, ncol) - specific yield, only read when laycon is 2 or 3 and there is at least - one transient stress period (default is 0.15) - wetdry : float - a combination of the wetting threshold and a flag to indicate which - neighboring cells can cause a cell to become wet (default is -0.01) - extension : string - Filename extension (default is 'bcf') - unitnumber : int - File unit number (default is None). - filenames : str or list of str - Filenames to use for the package and the output files. If - filenames=None the package name will be created using the model name - and package extension and the cbc output name will be created using - the model name and .cbc extension (for example, modflowtest.cbc), - if ipakcbc is a number greater than zero. If a single string is passed - the package will be set to the string and cbc output name will be - created using the model name and .cbc extension, if ipakcbc is a - number greater than zero. To define the names for all package files - (input and output) the length of the list of strings should be 2. - Default is None. - - Methods - ------- - - See Also - -------- - - Notes - ----- - - Examples - -------- - - >>> import flopy - >>> ml = flopy.modflow.Modflow() - >>> bcf = flopy.modflow.ModflowBcf(ml) - - """ - - def __init__(self, model, ipakcb=None, intercellt=0, laycon=3, trpy=1.0, - hdry=-1E+30, iwdflg=0, wetfct=0.1, iwetit=1, ihdwet=0, - tran=1.0, hy=1.0, vcont=1.0, sf1=1e-5, sf2=0.15, wetdry=-0.01, - extension='bcf', unitnumber=None, filenames=None): - - if unitnumber is None: - unitnumber = ModflowBcf.defaultunit() - - # set filenames - if filenames is None: - filenames = [None, None] - elif isinstance(filenames, str): - filenames = [filenames, None] - elif isinstance(filenames, list): - if len(filenames) < 2: - filenames.append(None) - - # update external file information with cbc output, if necessary - if ipakcb is not None: - fname = filenames[1] - model.add_output_file(ipakcb, fname=fname, - package=ModflowBcf.ftype()) - else: - ipakcb = 0 - - # Fill namefile items - name = [ModflowBcf.ftype()] - units = [unitnumber] - extra = [''] - - # set package name - fname = [filenames[0]] - - # Call ancestor's init to set self.parent, extension, name and unit number - Package.__init__(self, model, extension=extension, name=name, - unit_number=units, extra=extra, filenames=fname) - - self.url = 'bcf.htm' - - nrow, ncol, nlay, nper = self.parent.nrow_ncol_nlay_nper - # Set values of all parameters - self.intercellt = Util2d(model, (nlay,), np.int32, intercellt, - name='laycon', locat=self.unit_number[0]) - self.laycon = Util2d(model, (nlay,), np.int32, laycon, name='laycon', - locat=self.unit_number[0]) - self.trpy = Util2d(model, (nlay,), np.float32, trpy, - name='Anisotropy factor', locat=self.unit_number[0]) - - # item 1 - self.ipakcb = ipakcb - self.hdry = hdry - self.iwdflg = iwdflg - self.wetfct = wetfct - self.iwetit = iwetit - self.ihdwet = ihdwet - self.tran = Util3d(model, (nlay, nrow, ncol), np.float32, tran, - 'Transmissivity', locat=self.unit_number[0]) - self.hy = Util3d(model, (nlay, nrow, ncol), np.float32, hy, - 'Horizontal Hydraulic Conductivity', - locat=self.unit_number[0]) - if model.nlay > 1: - self.vcont = Util3d(model, (nlay - 1, nrow, ncol), np.float32, - vcont, - 'Vertical Conductance', - locat=self.unit_number[0]) - else: - self.vcont = None - self.sf1 = Util3d(model, (nlay, nrow, ncol), np.float32, sf1, - 'Primary Storage Coefficient', - locat=self.unit_number[0]) - self.sf2 = Util3d(model, (nlay, nrow, ncol), np.float32, sf2, - 'Secondary Storage Coefficient', - locat=self.unit_number[0]) - self.wetdry = Util3d(model, (nlay, nrow, ncol), np.float32, wetdry, - 'WETDRY', locat=self.unit_number[0]) - self.parent.add_package(self) - return - - def write_file(self, f=None): - """ - Write the package file. - - Returns - ------- - None - - """ - # get model information - nrow, ncol, nlay, nper = self.parent.nrow_ncol_nlay_nper - dis = self.parent.get_package('DIS') - if dis is None: - dis = self.parent.get_package('DISU') - - ifrefm = self.parent.get_ifrefm() - - # Open file for writing - if f is not None: - f_bcf = f - else: - f_bcf = open(self.fn_path, 'w') - # Item 1: ipakcb, HDRY, IWDFLG, WETFCT, IWETIT, IHDWET - f_bcf.write('{:10d}{:10.6G}{:10d}{:10.3f}{:10d}{:10d}\n'.format( - self.ipakcb, self.hdry, self.iwdflg, self.wetfct, self.iwetit, - self.ihdwet)) - - # LAYCON array - for k in range(nlay): - if ifrefm: - if self.intercellt[k] > 0: - f_bcf.write('{0:1d}{1:1d} '.format(self.intercellt[k], - self.laycon[k])) - else: - f_bcf.write('0{0:1d} '.format(self.laycon[k])) - else: - if self.intercellt[k] > 0: - f_bcf.write('{0:1d}{1:1d}'.format(self.intercellt[k], - self.laycon[k])) - else: - f_bcf.write('0{0:1d}'.format(self.laycon[k])) - f_bcf.write('\n') - f_bcf.write(self.trpy.get_file_entry()) - transient = not dis.steady.all() - for k in range(nlay): - if (transient == True): - f_bcf.write(self.sf1[k].get_file_entry()) - if ((self.laycon[k] == 0) or (self.laycon[k] == 2)): - f_bcf.write(self.tran[k].get_file_entry()) - else: - f_bcf.write(self.hy[k].get_file_entry()) - if k < nlay - 1: - f_bcf.write(self.vcont[k].get_file_entry()) - if ((transient == True) and ( - (self.laycon[k] == 2) or (self.laycon[k] == 3))): - f_bcf.write(self.sf2[k].get_file_entry()) - if ((self.iwdflg != 0) and ( - (self.laycon[k] == 1) or (self.laycon[k] == 3))): - f_bcf.write(self.wetdry[k].get_file_entry()) - f_bcf.close() - - @staticmethod - def load(f, model, ext_unit_dict=None): - """ - Load an existing package. - - Parameters - ---------- - f : filename or file handle - File to load. - model : model object - The model object (of type :class:`flopy.modflow.mf.Modflow`) to - which this package will be added. - nper : int - The number of stress periods. If nper is None, then nper will be - obtained from the model object. (default is None). - ext_unit_dict : dictionary, optional - If the arrays in the file are specified using EXTERNAL, - or older style array control records, then `f` should be a file - handle. In this case ext_unit_dict is required, which can be - constructed using the function - :class:`flopy.utils.mfreadnam.parsenamefile`. - - Returns - ------- - wel : ModflowBcf object - ModflowBcf object. - - Examples - -------- - - >>> import flopy - >>> m = flopy.modflow.Modflow() - >>> wel = flopy.modflow.ModflowBcf.load('test.bcf', m) - - """ - - if model.verbose: - sys.stdout.write('loading bcf package file...\n') - - openfile = not hasattr(f, 'read') - if openfile: - filename = f - f = open(filename, 'r') - - # dataset 0 -- header - while True: - line = f.readline() - if line[0] != '#': - break - - # determine problem dimensions - nr, nc, nlay, nper = model.get_nrow_ncol_nlay_nper() - dis = model.get_package('DIS') - if dis is None: - dis = model.get_package('DISU') - - # Item 1: ipakcb, HDRY, IWDFLG, WETFCT, IWETIT, IHDWET - line already read above - if model.verbose: - print(' loading ipakcb, HDRY, IWDFLG, WETFCT, IWETIT, IHDWET...') - t = line_parse(line) - ipakcb, hdry, iwdflg, wetfct, iwetit, ihdwet = int(t[0]), \ - float(t[1]), \ - int(t[2]), \ - float(t[3]), \ - int(t[4]), \ - int(t[5]) - - # LAYCON array - ifrefm = model.get_ifrefm() - if model.verbose: - print(' loading LAYCON...') - line = f.readline() - if ifrefm: - t = [] - tt = line.strip().split() - for iv in tt: - t.append(iv) - # read the rest of the laycon values - if len(t) < nlay: - while True: - line = f.readline() - tt = line.strip().split() - for iv in tt: - t.append(iv) - if len(t) == nlay: - break - else: - t = [] - istart = 0 - for k in range(nlay): - lcode = line[istart:istart + 2] - if lcode.strip() == '': - # hit end of line before expected end of data - # read next line - line = f.readline() - istart = 0 - lcode = line[istart:istart + 2] - lcode = lcode.replace(' ', '0') - t.append(lcode) - istart += 2 - intercellt = np.zeros(nlay, dtype=np.int32) - laycon = np.zeros(nlay, dtype=np.int32) - for k in range(nlay): - if len(t[k]) > 1: - intercellt[k] = int(t[k][0]) - laycon[k] = int(t[k][1]) - else: - laycon[k] = int(t[k]) - - # TRPY array - if model.verbose: - print(' loading TRPY...') - trpy = Util2d.load(f, model, (nlay,), np.float32, 'trpy', - ext_unit_dict) - - # property data for each layer based on options - transient = not dis.steady.all() - sf1 = [0] * nlay - tran = [0] * nlay - hy = [0] * nlay - if nlay > 1: - vcont = [0] * (nlay - 1) - else: - vcont = [0] * nlay - sf2 = [0] * nlay - wetdry = [0] * nlay - - for k in range(nlay): - - # allow for unstructured changing nodes per layer - if nr is None: - nrow = 1 - ncol = nc[k] - else: - nrow = nr - ncol = nc - - # sf1 - if transient: - if model.verbose: - print(' loading sf1 layer {0:3d}...'.format(k + 1)) - t = Util2d.load(f, model, (nrow, ncol), np.float32, 'sf1', - ext_unit_dict) - sf1[k] = t - - # tran or hy - if ((laycon[k] == 0) or (laycon[k] == 2)): - if model.verbose: - print(' loading tran layer {0:3d}...'.format(k + 1)) - t = Util2d.load(f, model, (nrow, ncol), np.float32, 'tran', - ext_unit_dict) - tran[k] = t - else: - if model.verbose: - print(' loading hy layer {0:3d}...'.format(k + 1)) - t = Util2d.load(f, model, (nrow, ncol), np.float32, 'hy', - ext_unit_dict) - hy[k] = t - - # vcont - if k < (nlay - 1): - if model.verbose: - print(' loading vcont layer {0:3d}...'.format(k + 1)) - t = Util2d.load(f, model, (nrow, ncol), np.float32, 'vcont', - ext_unit_dict) - vcont[k] = t - - # sf2 - if (transient and ((laycon[k] == 2) or (laycon[k] == 3))): - if model.verbose: - print(' loading sf2 layer {0:3d}...'.format(k + 1)) - t = Util2d.load(f, model, (nrow, ncol), np.float32, 'sf2', - ext_unit_dict) - sf2[k] = t - - # wetdry - if ((iwdflg != 0) and ((laycon[k] == 1) or (laycon[k] == 3))): - if model.verbose: - print(' loading sf2 layer {0:3d}...'.format(k + 1)) - t = Util2d.load(f, model, (nrow, ncol), np.float32, 'wetdry', - ext_unit_dict) - wetdry[k] = t - - if openfile: - f.close() - - # set package unit number - unitnumber = None - filenames = [None, None] - if ext_unit_dict is not None: - unitnumber, filenames[0] = \ - model.get_ext_dict_attr(ext_unit_dict, - filetype=ModflowBcf.ftype()) - if ipakcb > 0: - iu, filenames[1] = \ - model.get_ext_dict_attr(ext_unit_dict, unit=ipakcb) - model.add_pop_key_list(ipakcb) - - # create instance of bcf object - bcf = ModflowBcf(model, ipakcb=ipakcb, intercellt=intercellt, - laycon=laycon, trpy=trpy, hdry=hdry, - iwdflg=iwdflg, wetfct=wetfct, iwetit=iwetit, - ihdwet=ihdwet, - tran=tran, hy=hy, vcont=vcont, sf1=sf1, sf2=sf2, - wetdry=wetdry, - unitnumber=unitnumber, filenames=filenames) - - # return bcf object - return bcf - - @staticmethod - def ftype(): - return 'BCF6' - - @staticmethod - def defaultunit(): - return 15 +import sys + +import numpy as np + +from ..pakbase import Package +from ..utils import Util2d, Util3d +from ..utils.flopy_io import line_parse + + +class ModflowBcf(Package): + """ + MODFLOW Block Centered Flow Package Class. + + Parameters + ---------- + model : model object + The model object (of type :class:`flopy.modflow.Modflow`) to which + this package will be added. + ipakcb : int + A flag that is used to determine if cell-by-cell budget data should be + saved. If ipakcb is non-zero cell-by-cell budget data will be saved. + (default is 53) + intercellt : int + Intercell transmissivities, harmonic mean (0), arithmetic mean (1), + logarithmic mean (2), combination (3). (default is 0) + laycon : int + Layer type, confined (0), unconfined (1), constant T, variable S (2), + variable T, variable S (default is 3) + trpy : float or array of floats (nlay) + horizontal anisotropy ratio (default is 1.0) + hdry : float + head assigned when cell is dry - used as indicator(default is -1E+30) + iwdflg : int + flag to indicate if wetting is inactive (0) or not (non zero) + (default is 0) + wetfct : float + factor used when cell is converted from dry to wet (default is 0.1) + iwetit : int + iteration interval in wetting/drying algorithm (default is 1) + ihdwet : int + flag to indicate how initial head is computed for cells that become + wet (default is 0) + tran : float or array of floats (nlay, nrow, ncol), optional + transmissivity (only read if laycon is 0 or 2) (default is 1.0) + hy : float or array of floats (nlay, nrow, ncol) + hydraulic conductivity (only read if laycon is 1 or 3) + (default is 1.0) + vcont : float or array of floats (nlay-1, nrow, ncol) + vertical leakance between layers (default is 1.0) + sf1 : float or array of floats (nlay, nrow, ncol) + specific storage (confined) or storage coefficient (unconfined), + read when there is at least one transient stress period. + (default is 1e-5) + sf2 : float or array of floats (nrow, ncol) + specific yield, only read when laycon is 2 or 3 and there is at least + one transient stress period (default is 0.15) + wetdry : float + a combination of the wetting threshold and a flag to indicate which + neighboring cells can cause a cell to become wet (default is -0.01) + extension : string + Filename extension (default is 'bcf') + unitnumber : int + File unit number (default is None). + filenames : str or list of str + Filenames to use for the package and the output files. If + filenames=None the package name will be created using the model name + and package extension and the cbc output name will be created using + the model name and .cbc extension (for example, modflowtest.cbc), + if ipakcbc is a number greater than zero. If a single string is passed + the package will be set to the string and cbc output name will be + created using the model name and .cbc extension, if ipakcbc is a + number greater than zero. To define the names for all package files + (input and output) the length of the list of strings should be 2. + Default is None. + + Methods + ------- + + See Also + -------- + + Notes + ----- + + Examples + -------- + + >>> import flopy + >>> ml = flopy.modflow.Modflow() + >>> bcf = flopy.modflow.ModflowBcf(ml) + + """ + + def __init__(self, model, ipakcb=None, intercellt=0, laycon=3, trpy=1.0, + hdry=-1E+30, iwdflg=0, wetfct=0.1, iwetit=1, ihdwet=0, + tran=1.0, hy=1.0, vcont=1.0, sf1=1e-5, sf2=0.15, wetdry=-0.01, + extension='bcf', unitnumber=None, filenames=None): + + if unitnumber is None: + unitnumber = ModflowBcf.defaultunit() + + # set filenames + if filenames is None: + filenames = [None, None] + elif isinstance(filenames, str): + filenames = [filenames, None] + elif isinstance(filenames, list): + if len(filenames) < 2: + filenames.append(None) + + # update external file information with cbc output, if necessary + if ipakcb is not None: + fname = filenames[1] + model.add_output_file(ipakcb, fname=fname, + package=ModflowBcf.ftype()) + else: + ipakcb = 0 + + # Fill namefile items + name = [ModflowBcf.ftype()] + units = [unitnumber] + extra = [''] + + # set package name + fname = [filenames[0]] + + # Call ancestor's init to set self.parent, extension, name and unit number + Package.__init__(self, model, extension=extension, name=name, + unit_number=units, extra=extra, filenames=fname) + + self.url = 'bcf.htm' + + nrow, ncol, nlay, nper = self.parent.nrow_ncol_nlay_nper + # Set values of all parameters + self.intercellt = Util2d(model, (nlay,), np.int32, intercellt, + name='laycon', locat=self.unit_number[0]) + self.laycon = Util2d(model, (nlay,), np.int32, laycon, name='laycon', + locat=self.unit_number[0]) + self.trpy = Util2d(model, (nlay,), np.float32, trpy, + name='Anisotropy factor', locat=self.unit_number[0]) + + # item 1 + self.ipakcb = ipakcb + self.hdry = hdry + self.iwdflg = iwdflg + self.wetfct = wetfct + self.iwetit = iwetit + self.ihdwet = ihdwet + self.tran = Util3d(model, (nlay, nrow, ncol), np.float32, tran, + 'Transmissivity', locat=self.unit_number[0]) + self.hy = Util3d(model, (nlay, nrow, ncol), np.float32, hy, + 'Horizontal Hydraulic Conductivity', + locat=self.unit_number[0]) + if model.nlay > 1: + self.vcont = Util3d(model, (nlay - 1, nrow, ncol), np.float32, + vcont, + 'Vertical Conductance', + locat=self.unit_number[0]) + else: + self.vcont = None + self.sf1 = Util3d(model, (nlay, nrow, ncol), np.float32, sf1, + 'Primary Storage Coefficient', + locat=self.unit_number[0]) + self.sf2 = Util3d(model, (nlay, nrow, ncol), np.float32, sf2, + 'Secondary Storage Coefficient', + locat=self.unit_number[0]) + self.wetdry = Util3d(model, (nlay, nrow, ncol), np.float32, wetdry, + 'WETDRY', locat=self.unit_number[0]) + self.parent.add_package(self) + return + + def write_file(self, f=None): + """ + Write the package file. + + Returns + ------- + None + + """ + # get model information + nrow, ncol, nlay, nper = self.parent.nrow_ncol_nlay_nper + dis = self.parent.get_package('DIS') + if dis is None: + dis = self.parent.get_package('DISU') + + ifrefm = self.parent.get_ifrefm() + + # Open file for writing + if f is not None: + f_bcf = f + else: + f_bcf = open(self.fn_path, 'w') + # Item 1: ipakcb, HDRY, IWDFLG, WETFCT, IWETIT, IHDWET + f_bcf.write('{:10d}{:10.6G}{:10d}{:10.3f}{:10d}{:10d}\n'.format( + self.ipakcb, self.hdry, self.iwdflg, self.wetfct, self.iwetit, + self.ihdwet)) + + # LAYCON array + for k in range(nlay): + if ifrefm: + if self.intercellt[k] > 0: + f_bcf.write('{0:1d}{1:1d} '.format(self.intercellt[k], + self.laycon[k])) + else: + f_bcf.write('0{0:1d} '.format(self.laycon[k])) + else: + if self.intercellt[k] > 0: + f_bcf.write('{0:1d}{1:1d}'.format(self.intercellt[k], + self.laycon[k])) + else: + f_bcf.write('0{0:1d}'.format(self.laycon[k])) + f_bcf.write('\n') + f_bcf.write(self.trpy.get_file_entry()) + transient = not dis.steady.all() + for k in range(nlay): + if (transient == True): + f_bcf.write(self.sf1[k].get_file_entry()) + if ((self.laycon[k] == 0) or (self.laycon[k] == 2)): + f_bcf.write(self.tran[k].get_file_entry()) + else: + f_bcf.write(self.hy[k].get_file_entry()) + if k < nlay - 1: + f_bcf.write(self.vcont[k].get_file_entry()) + if ((transient == True) and ( + (self.laycon[k] == 2) or (self.laycon[k] == 3))): + f_bcf.write(self.sf2[k].get_file_entry()) + if ((self.iwdflg != 0) and ( + (self.laycon[k] == 1) or (self.laycon[k] == 3))): + f_bcf.write(self.wetdry[k].get_file_entry()) + f_bcf.close() + + @staticmethod + def load(f, model, ext_unit_dict=None): + """ + Load an existing package. + + Parameters + ---------- + f : filename or file handle + File to load. + model : model object + The model object (of type :class:`flopy.modflow.mf.Modflow`) to + which this package will be added. + nper : int + The number of stress periods. If nper is None, then nper will be + obtained from the model object. (default is None). + ext_unit_dict : dictionary, optional + If the arrays in the file are specified using EXTERNAL, + or older style array control records, then `f` should be a file + handle. In this case ext_unit_dict is required, which can be + constructed using the function + :class:`flopy.utils.mfreadnam.parsenamefile`. + + Returns + ------- + wel : ModflowBcf object + ModflowBcf object. + + Examples + -------- + + >>> import flopy + >>> m = flopy.modflow.Modflow() + >>> wel = flopy.modflow.ModflowBcf.load('test.bcf', m) + + """ + + if model.verbose: + sys.stdout.write('loading bcf package file...\n') + + openfile = not hasattr(f, 'read') + if openfile: + filename = f + f = open(filename, 'r') + + # dataset 0 -- header + while True: + line = f.readline() + if line[0] != '#': + break + + # determine problem dimensions + nr, nc, nlay, nper = model.get_nrow_ncol_nlay_nper() + dis = model.get_package('DIS') + if dis is None: + dis = model.get_package('DISU') + + # Item 1: ipakcb, HDRY, IWDFLG, WETFCT, IWETIT, IHDWET - line already read above + if model.verbose: + print(' loading ipakcb, HDRY, IWDFLG, WETFCT, IWETIT, IHDWET...') + t = line_parse(line) + ipakcb, hdry, iwdflg, wetfct, iwetit, ihdwet = int(t[0]), \ + float(t[1]), \ + int(t[2]), \ + float(t[3]), \ + int(t[4]), \ + int(t[5]) + + # LAYCON array + ifrefm = model.get_ifrefm() + if model.verbose: + print(' loading LAYCON...') + line = f.readline() + if ifrefm: + t = [] + tt = line.strip().split() + for iv in tt: + t.append(iv) + # read the rest of the laycon values + if len(t) < nlay: + while True: + line = f.readline() + tt = line.strip().split() + for iv in tt: + t.append(iv) + if len(t) == nlay: + break + else: + t = [] + istart = 0 + for k in range(nlay): + lcode = line[istart:istart + 2] + if lcode.strip() == '': + # hit end of line before expected end of data + # read next line + line = f.readline() + istart = 0 + lcode = line[istart:istart + 2] + lcode = lcode.replace(' ', '0') + t.append(lcode) + istart += 2 + intercellt = np.zeros(nlay, dtype=np.int32) + laycon = np.zeros(nlay, dtype=np.int32) + for k in range(nlay): + if len(t[k]) > 1: + intercellt[k] = int(t[k][0]) + laycon[k] = int(t[k][1]) + else: + laycon[k] = int(t[k]) + + # TRPY array + if model.verbose: + print(' loading TRPY...') + trpy = Util2d.load(f, model, (nlay,), np.float32, 'trpy', + ext_unit_dict) + + # property data for each layer based on options + transient = not dis.steady.all() + sf1 = [0] * nlay + tran = [0] * nlay + hy = [0] * nlay + if nlay > 1: + vcont = [0] * (nlay - 1) + else: + vcont = [0] * nlay + sf2 = [0] * nlay + wetdry = [0] * nlay + + for k in range(nlay): + + # allow for unstructured changing nodes per layer + if nr is None: + nrow = 1 + ncol = nc[k] + else: + nrow = nr + ncol = nc + + # sf1 + if transient: + if model.verbose: + print(' loading sf1 layer {0:3d}...'.format(k + 1)) + t = Util2d.load(f, model, (nrow, ncol), np.float32, 'sf1', + ext_unit_dict) + sf1[k] = t + + # tran or hy + if ((laycon[k] == 0) or (laycon[k] == 2)): + if model.verbose: + print(' loading tran layer {0:3d}...'.format(k + 1)) + t = Util2d.load(f, model, (nrow, ncol), np.float32, 'tran', + ext_unit_dict) + tran[k] = t + else: + if model.verbose: + print(' loading hy layer {0:3d}...'.format(k + 1)) + t = Util2d.load(f, model, (nrow, ncol), np.float32, 'hy', + ext_unit_dict) + hy[k] = t + + # vcont + if k < (nlay - 1): + if model.verbose: + print(' loading vcont layer {0:3d}...'.format(k + 1)) + t = Util2d.load(f, model, (nrow, ncol), np.float32, 'vcont', + ext_unit_dict) + vcont[k] = t + + # sf2 + if (transient and ((laycon[k] == 2) or (laycon[k] == 3))): + if model.verbose: + print(' loading sf2 layer {0:3d}...'.format(k + 1)) + t = Util2d.load(f, model, (nrow, ncol), np.float32, 'sf2', + ext_unit_dict) + sf2[k] = t + + # wetdry + if ((iwdflg != 0) and ((laycon[k] == 1) or (laycon[k] == 3))): + if model.verbose: + print(' loading sf2 layer {0:3d}...'.format(k + 1)) + t = Util2d.load(f, model, (nrow, ncol), np.float32, 'wetdry', + ext_unit_dict) + wetdry[k] = t + + if openfile: + f.close() + + # set package unit number + unitnumber = None + filenames = [None, None] + if ext_unit_dict is not None: + unitnumber, filenames[0] = \ + model.get_ext_dict_attr(ext_unit_dict, + filetype=ModflowBcf.ftype()) + if ipakcb > 0: + iu, filenames[1] = \ + model.get_ext_dict_attr(ext_unit_dict, unit=ipakcb) + model.add_pop_key_list(ipakcb) + + # create instance of bcf object + bcf = ModflowBcf(model, ipakcb=ipakcb, intercellt=intercellt, + laycon=laycon, trpy=trpy, hdry=hdry, + iwdflg=iwdflg, wetfct=wetfct, iwetit=iwetit, + ihdwet=ihdwet, + tran=tran, hy=hy, vcont=vcont, sf1=sf1, sf2=sf2, + wetdry=wetdry, + unitnumber=unitnumber, filenames=filenames) + + # return bcf object + return bcf + + @staticmethod + def ftype(): + return 'BCF6' + + @staticmethod + def defaultunit(): + return 15 diff --git a/flopy/modflow/mfchd.py b/flopy/modflow/mfchd.py index 62cf85cfaf..54b06ceecb 100644 --- a/flopy/modflow/mfchd.py +++ b/flopy/modflow/mfchd.py @@ -1,247 +1,247 @@ -""" -mfchd module. Contains the ModflowChd class. Note that the user can access -the ModflowChd class as `flopy.modflow.ModflowChd`. - -Additional information for this MODFLOW package can be found at the `Online -MODFLOW Guide -`_. - -""" - -import sys -import numpy as np -from ..pakbase import Package -from ..utils import MfList -from ..utils.recarray_utils import create_empty_recarray - - -class ModflowChd(Package): - """ - MODFLOW Constant Head Package Class. - - Parameters - ---------- - model : model object - The model object (of type :class:`flopy.modflow.mf.Modflow`) to which - this package will be added. - stress_period_data : list of boundaries, recarrays, or dictionary of - boundaries. - - Each chd cell is defined through definition of - layer (int), row (int), column (int), shead (float), ehead (float) - shead is the head at the start of the stress period, and ehead is the - head at the end of the stress period. - The simplest form is a dictionary with a lists of boundaries for each - stress period, where each list of boundaries itself is a list of - boundaries. Indices of the dictionary are the numbers of the stress - period. This gives the form of:: - - stress_period_data = - {0: [ - [lay, row, col, shead, ehead], - [lay, row, col, shead, ehead], - [lay, row, col, shead, ehead] - ], - 1: [ - [lay, row, col, shead, ehead], - [lay, row, col, shead, ehead], - [lay, row, col, shead, ehead] - ], ... - kper: - [ - [lay, row, col, shead, ehead], - [lay, row, col, shead, ehead], - [lay, row, col, shead, ehead] - ] - } - - Note that if the number of lists is smaller than the number of stress - periods, then the last list of chds will apply until the end of the - simulation. Full details of all options to specify stress_period_data - can be found in the flopy3 boundaries Notebook in the basic - subdirectory of the examples directory. - - extension : string - Filename extension (default is 'chd') - unitnumber : int - File unit number (default is None). - filenames : str or list of str - Filenames to use for the package. If filenames=None the package name - will be created using the model name and package extension. If a - single string is passed the package will be set to the string. - Default is None. - - Attributes - ---------- - mxactc : int - Maximum number of chds for all stress periods. This is calculated - automatically by FloPy based on the information in - stress_period_data. - - Methods - ------- - - See Also - -------- - - Notes - ----- - Parameters are supported in Flopy only when reading in existing models. - Parameter values are converted to native values in Flopy and the - connection to "parameters" is thus nonexistent. - - Examples - -------- - - >>> import flopy - >>> m = flopy.modflow.Modflow() - >>> lrcd = {0:[[2, 3, 4, 10., 10.1]]} #this chd will be applied to all - >>> #stress periods - >>> chd = flopy.modflow.ModflowChd(m, stress_period_data=lrcd) - - """ - - def __init__(self, model, stress_period_data=None, dtype=None, - options=None, extension='chd', unitnumber=None, - filenames=None, **kwargs): - - # set default unit number if one is not specified - if unitnumber is None: - unitnumber = ModflowChd.defaultunit() - - # set filenames - if filenames is None: - filenames = [None] - elif isinstance(filenames, str): - filenames = [filenames] - - # Fill namefile items - name = [ModflowChd.ftype()] - units = [unitnumber] - extra = [''] - - # set package name - fname = [filenames[0]] - - # Call ancestor's init to set self.parent, extension, name and unit number - Package.__init__(self, model, extension=extension, name=name, - unit_number=units, extra=extra, filenames=fname) - - self.url = 'chd.htm' - self.heading = '# {} package for '.format(self.name[0]) + \ - ' {}, '.format(model.version_types[model.version]) + \ - 'generated by Flopy.' - - if dtype is not None: - self.dtype = dtype - else: - self.dtype = self.get_default_dtype( - structured=self.parent.structured) - self.stress_period_data = MfList(self, stress_period_data) - - self.np = 0 - if options is None: - options = [] - self.options = options - self.parent.add_package(self) - - def ncells(self): - # Returns the maximum number of cells that have recharge (developed for MT3DMS SSM package) - return self.stress_period_data.mxact - - def write_file(self): - """ - Write the package file. - - Returns - ------- - None - - """ - f_chd = open(self.fn_path, 'w') - f_chd.write('{0:s}\n'.format(self.heading)) - f_chd.write(' {0:9d}'.format(self.stress_period_data.mxact)) - for option in self.options: - f_chd.write(' {}'.format(option)) - f_chd.write('\n') - self.stress_period_data.write_transient(f_chd) - f_chd.close() - - def add_record(self, kper, index, values): - try: - self.stress_period_data.add_record(kper, index, values) - except Exception as e: - raise Exception("mfchd error adding record to list: " + str(e)) - - @staticmethod - def get_empty(ncells=0, aux_names=None, structured=True): - # get an empty recarray that corresponds to dtype - dtype = ModflowChd.get_default_dtype(structured=structured) - if aux_names is not None: - dtype = Package.add_to_dtype(dtype, aux_names, np.float32) - return create_empty_recarray(ncells, dtype, default_value=-1.0E+10) - - @staticmethod - def get_default_dtype(structured=True): - if structured: - dtype = np.dtype([("k", np.int), ("i", np.int), - ("j", np.int), ("shead", np.float32), - ("ehead", np.float32)]) - else: - dtype = np.dtype([("node", np.int), ("shead", np.float32), - ("ehead", np.float32)]) - return dtype - - @staticmethod - def get_sfac_columns(): - return ['shead', 'ehead'] - - @staticmethod - def load(f, model, nper=None, ext_unit_dict=None, check=True): - """ - Load an existing package. - - Parameters - ---------- - f : filename or file handle - File to load. - model : model object - The model object (of type :class:`flopy.modflow.mf.Modflow`) to - which this package will be added. - nper : int - The number of stress periods. If nper is None, then nper will be - obtained from the model object. (default is None). - ext_unit_dict : dictionary, optional - If the arrays in the file are specified using EXTERNAL, - or older style array control records, then `f` should be a file - handle. In this case ext_unit_dict is required, which can be - constructed using the function - :class:`flopy.utils.mfreadnam.parsenamefile`. - - Returns - ------- - chd : ModflowChd object - ModflowChd object. - - Examples - -------- - - >>> import flopy - >>> m = flopy.modflow.Modflow() - >>> wel = flopy.modflow.ModflowChd.load('test.chd', m) - - """ - - if model.verbose: - sys.stdout.write('loading chd package file...\n') - - return Package.load(f, model, ModflowChd, nper=nper, check=check, - ext_unit_dict=ext_unit_dict) - - @staticmethod - def ftype(): - return 'CHD' - - @staticmethod - def defaultunit(): - return 24 +""" +mfchd module. Contains the ModflowChd class. Note that the user can access +the ModflowChd class as `flopy.modflow.ModflowChd`. + +Additional information for this MODFLOW package can be found at the `Online +MODFLOW Guide +`_. + +""" + +import sys +import numpy as np +from ..pakbase import Package +from ..utils import MfList +from ..utils.recarray_utils import create_empty_recarray + + +class ModflowChd(Package): + """ + MODFLOW Constant Head Package Class. + + Parameters + ---------- + model : model object + The model object (of type :class:`flopy.modflow.mf.Modflow`) to which + this package will be added. + stress_period_data : list of boundaries, recarrays, or dictionary of + boundaries. + + Each chd cell is defined through definition of + layer (int), row (int), column (int), shead (float), ehead (float) + shead is the head at the start of the stress period, and ehead is the + head at the end of the stress period. + The simplest form is a dictionary with a lists of boundaries for each + stress period, where each list of boundaries itself is a list of + boundaries. Indices of the dictionary are the numbers of the stress + period. This gives the form of:: + + stress_period_data = + {0: [ + [lay, row, col, shead, ehead], + [lay, row, col, shead, ehead], + [lay, row, col, shead, ehead] + ], + 1: [ + [lay, row, col, shead, ehead], + [lay, row, col, shead, ehead], + [lay, row, col, shead, ehead] + ], ... + kper: + [ + [lay, row, col, shead, ehead], + [lay, row, col, shead, ehead], + [lay, row, col, shead, ehead] + ] + } + + Note that if the number of lists is smaller than the number of stress + periods, then the last list of chds will apply until the end of the + simulation. Full details of all options to specify stress_period_data + can be found in the flopy3 boundaries Notebook in the basic + subdirectory of the examples directory. + + extension : string + Filename extension (default is 'chd') + unitnumber : int + File unit number (default is None). + filenames : str or list of str + Filenames to use for the package. If filenames=None the package name + will be created using the model name and package extension. If a + single string is passed the package will be set to the string. + Default is None. + + Attributes + ---------- + mxactc : int + Maximum number of chds for all stress periods. This is calculated + automatically by FloPy based on the information in + stress_period_data. + + Methods + ------- + + See Also + -------- + + Notes + ----- + Parameters are supported in Flopy only when reading in existing models. + Parameter values are converted to native values in Flopy and the + connection to "parameters" is thus nonexistent. + + Examples + -------- + + >>> import flopy + >>> m = flopy.modflow.Modflow() + >>> lrcd = {0:[[2, 3, 4, 10., 10.1]]} #this chd will be applied to all + >>> #stress periods + >>> chd = flopy.modflow.ModflowChd(m, stress_period_data=lrcd) + + """ + + def __init__(self, model, stress_period_data=None, dtype=None, + options=None, extension='chd', unitnumber=None, + filenames=None, **kwargs): + + # set default unit number if one is not specified + if unitnumber is None: + unitnumber = ModflowChd.defaultunit() + + # set filenames + if filenames is None: + filenames = [None] + elif isinstance(filenames, str): + filenames = [filenames] + + # Fill namefile items + name = [ModflowChd.ftype()] + units = [unitnumber] + extra = [''] + + # set package name + fname = [filenames[0]] + + # Call ancestor's init to set self.parent, extension, name and unit number + Package.__init__(self, model, extension=extension, name=name, + unit_number=units, extra=extra, filenames=fname) + + self.url = 'chd.htm' + self.heading = '# {} package for '.format(self.name[0]) + \ + ' {}, '.format(model.version_types[model.version]) + \ + 'generated by Flopy.' + + if dtype is not None: + self.dtype = dtype + else: + self.dtype = self.get_default_dtype( + structured=self.parent.structured) + self.stress_period_data = MfList(self, stress_period_data) + + self.np = 0 + if options is None: + options = [] + self.options = options + self.parent.add_package(self) + + def ncells(self): + # Returns the maximum number of cells that have recharge (developed for MT3DMS SSM package) + return self.stress_period_data.mxact + + def write_file(self): + """ + Write the package file. + + Returns + ------- + None + + """ + f_chd = open(self.fn_path, 'w') + f_chd.write('{0:s}\n'.format(self.heading)) + f_chd.write(' {0:9d}'.format(self.stress_period_data.mxact)) + for option in self.options: + f_chd.write(' {}'.format(option)) + f_chd.write('\n') + self.stress_period_data.write_transient(f_chd) + f_chd.close() + + def add_record(self, kper, index, values): + try: + self.stress_period_data.add_record(kper, index, values) + except Exception as e: + raise Exception("mfchd error adding record to list: " + str(e)) + + @staticmethod + def get_empty(ncells=0, aux_names=None, structured=True): + # get an empty recarray that corresponds to dtype + dtype = ModflowChd.get_default_dtype(structured=structured) + if aux_names is not None: + dtype = Package.add_to_dtype(dtype, aux_names, np.float32) + return create_empty_recarray(ncells, dtype, default_value=-1.0E+10) + + @staticmethod + def get_default_dtype(structured=True): + if structured: + dtype = np.dtype([("k", np.int), ("i", np.int), + ("j", np.int), ("shead", np.float32), + ("ehead", np.float32)]) + else: + dtype = np.dtype([("node", np.int), ("shead", np.float32), + ("ehead", np.float32)]) + return dtype + + @staticmethod + def get_sfac_columns(): + return ['shead', 'ehead'] + + @staticmethod + def load(f, model, nper=None, ext_unit_dict=None, check=True): + """ + Load an existing package. + + Parameters + ---------- + f : filename or file handle + File to load. + model : model object + The model object (of type :class:`flopy.modflow.mf.Modflow`) to + which this package will be added. + nper : int + The number of stress periods. If nper is None, then nper will be + obtained from the model object. (default is None). + ext_unit_dict : dictionary, optional + If the arrays in the file are specified using EXTERNAL, + or older style array control records, then `f` should be a file + handle. In this case ext_unit_dict is required, which can be + constructed using the function + :class:`flopy.utils.mfreadnam.parsenamefile`. + + Returns + ------- + chd : ModflowChd object + ModflowChd object. + + Examples + -------- + + >>> import flopy + >>> m = flopy.modflow.Modflow() + >>> wel = flopy.modflow.ModflowChd.load('test.chd', m) + + """ + + if model.verbose: + sys.stdout.write('loading chd package file...\n') + + return Package.load(f, model, ModflowChd, nper=nper, check=check, + ext_unit_dict=ext_unit_dict) + + @staticmethod + def ftype(): + return 'CHD' + + @staticmethod + def defaultunit(): + return 24 diff --git a/flopy/modflow/mfde4.py b/flopy/modflow/mfde4.py index f8c5412c62..b892b82551 100644 --- a/flopy/modflow/mfde4.py +++ b/flopy/modflow/mfde4.py @@ -1,305 +1,305 @@ -""" -mfde4 module. Contains the ModflowDe4 class. Note that the user can access -the ModflowDe4 class as `flopy.modflow.ModflowDe4`. - -Additional information for this MODFLOW package can be found at the `Online -MODFLOW Guide -`_. - -""" -import sys -from ..pakbase import Package - - -class ModflowDe4(Package): - """ - MODFLOW DE4 - Direct Solver Package - - Parameters - ---------- - model : model object - The model object (of type :class:`flopy.modflow.mf.Modflow`) to which - this package will be added. - itmx : int - Maximum number of iterations for each time step. Specify ITMAX = 1 if - iteration is not desired. Ideally iteration would not be required for - direct solution. However, it is necessary to iterate if the flow - equation is nonlinear or if computer precision limitations result in - inaccurate calculations as indicated by a large water budget error - (default is 50). - mxup : int - Maximum number of equations in the upper part of the equations to be - solved. This value impacts the amount of memory used by the DE4 - Package. If specified as 0, the program will calculate MXUP as half - the number of cells in the model, which is an upper limit (default - is 0). - mxlow : int - Maximum number of equations in the lower part of equations to be - solved. This value impacts the amount of memory used by the DE4 - Package. If specified as 0, the program will calculate MXLOW as half - the number of cells in the model, which is an upper limit (default is - 0). - mxbw : int - Maximum band width plus 1 of the lower part of the head coefficients - matrix. This value impacts the amount of memory used by the DE4 - Package. If specified as 0, the program will calculate MXBW as the - product of the two smallest grid dimensions plus 1, which is an - upper limit (default is 0). - ifreq : int - Flag indicating the frequency at which coefficients in head matrix - change. - IFREQ = 1 indicates that the flow equations are linear and that - coefficients of simulated head for all stress terms are constant - for all stress periods. - IFREQ = 2 indicates that the flow equations are linear, but - coefficients of simulated head for some stress terms may change - at the start of each stress period. - IFREQ = 3 indicates that a nonlinear flow equation is being solved, - which means that some terms in the head coefficients matrix depend - on simulated head (default is 3). - mutd4 : int - Flag that indicates the quantity of information that is printed when - convergence information is printed for a time step. - MUTD4 = 0 indicates that the number of iterations in the time step - and the maximum head change each iteration are printed. - MUTD4 = 1 indicates that only the number of iterations in the time - step is printed. - MUTD4 = 2 indicates no information is printed (default is 0). - accl : int - Multiplier for the computed head change for each iteration. Normally - this value is 1. A value greater than 1 may be useful for improving - the rate of convergence when using external iteration to solve - nonlinear problems (default is 1). - hclose : float - Head change closure criterion. If iterating (ITMX > 1), iteration - stops when the absolute value of head change at every node is less - than or equal to HCLOSE. HCLOSE is not used if not iterating, but a - value must always be specified (default is 1e-5). - iprd4 : int - Time step interval for printing out convergence information when - iterating (ITMX > 1). If IPRD4 is 2, convergence information is - printed every other time step. A value must always be specified - even if not iterating (default is 1). - extension : string - Filename extension (default is 'de4') - unitnumber : int - File unit number (default is None). - filenames : str or list of str - Filenames to use for the package. If filenames=None the package name - will be created using the model name and package extension. If a - single string is passed the package will be set to the string. - Default is None. - - - Attributes - ---------- - - Methods - ------- - - See Also - -------- - - Notes - ----- - - Examples - -------- - - >>> import flopy - >>> m = flopy.modflow.Modflow() - >>> de4 = flopy.modflow.ModflowDe4(m) - - """ - - def __init__(self, model, itmx=50, mxup=0, mxlow=0, mxbw=0, - ifreq=3, mutd4=0, accl=1., hclose=1e-5, iprd4=1, - extension='de4', unitnumber=None, filenames=None): - """ - Package constructor. - - """ - - # set default unit number of one is not specified - if unitnumber is None: - unitnumber = ModflowDe4.defaultunit() - - # set filenames - if filenames is None: - filenames = [None] - elif isinstance(filenames, str): - filenames = [filenames] - - # Fill namefile items - name = [ModflowDe4.ftype()] - units = [unitnumber] - extra = [''] - - # set package name - fname = [filenames[0]] - - # Call ancestor's init to set self.parent, extension, name and unit number - Package.__init__(self, model, extension=extension, name=name, - unit_number=units, extra=extra, filenames=fname) - - # check if a valid model version has been specified - if model.version == 'mfusg': - err = 'Error: cannot use {} package with model version {}'.format( - self.name, model.version) - raise Exception(err) - - self.heading = '# {} package for '.format(self.name[0]) + \ - ' {}, '.format(model.version_types[model.version]) + \ - 'generated by Flopy.' - self.url = 'de4.htm' - - self.itmx = itmx - self.mxup = mxup - self.mxlow = mxlow - self.mxbw = mxbw - self.ifreq = ifreq - self.mutd4 = mutd4 - self.accl = accl - self.hclose = hclose - self.iprd4 = iprd4 - self.parent.add_package(self) - return - - def write_file(self): - """ - Write the package file. - - Returns - ------- - None - - """ - # Open file for writing - f = open(self.fn_path, 'w') - f.write('{}\n'.format(self.heading)) - ifrfm = self.parent.get_ifrefm() - if ifrfm: - f.write('{} '.format(self.itmx)) - f.write('{} '.format(self.mxup)) - f.write('{} '.format(self.mxlow)) - f.write('{} '.format(self.mxbw)) - f.write('\n') - f.write('{} '.format(self.ifreq)) - f.write('{} '.format(self.mutd4)) - f.write('{} '.format(self.accl)) - f.write('{} '.format(self.hclose)) - f.write('{} '.format(self.iprd4)) - f.write('\n') - else: - f.write('{:10d}'.format(self.itmx)) - f.write('{:10d}'.format(self.mxup)) - f.write('{:10d}'.format(self.mxlow)) - f.write('{:10d}'.format(self.mxbw)) - f.write('\n') - f.write('{:10d}'.format(self.ifreq)) - f.write('{:10d}'.format(self.mutd4)) - f.write('{:9.4e} '.format(self.accl)) - f.write('{:9.4e} '.format(self.hclose)) - f.write('{:10d}'.format(self.iprd4)) - f.write('\n') - f.close() - - @staticmethod - def load(f, model, ext_unit_dict=None): - """ - Load an existing package. - - Parameters - ---------- - f : filename or file handle - File to load. - model : model object - The model object (of type :class:`flopy.modflow.mf.Modflow`) to - which this package will be added. - ext_unit_dict : dictionary, optional - If the arrays in the file are specified using EXTERNAL, - or older style array control records, then `f` should be a file - handle. In this case ext_unit_dict is required, which can be - constructed using the function - :class:`flopy.utils.mfreadnam.parsenamefile`. - - Returns - ------- - de4 : ModflowDe4 object - - Examples - -------- - - >>> import flopy - >>> m = flopy.modflow.Modflow() - >>> de4 = flopy.modflow.ModflowDe4.load('test.de4', m) - - """ - - if model.verbose: - sys.stdout.write('loading de4 package file...\n') - - openfile = not hasattr(f, 'read') - if openfile: - filename = f - f = open(filename, 'r') - - # read dataset 0 -- header - while True: - line = f.readline() - if line[0] != '#': - break - # read dataset 1 - ifrfm = model.get_ifrefm() - if model.version != 'mf2k': - ifrfm = True - ifreq = 1 - if ifrfm: - t = line.strip().split() - itmx = int(t[0]) - mxup = int(t[1]) - mxlow = int(t[2]) - mxbw = int(t[3]) - line = f.readline() - t = line.strip().split() - ifreq = int(t[0]) - mutd4 = int(t[1]) - accl = float(t[2]) - hclose = float(t[3]) - iprd4 = int(t[4]) - else: - itmx = int(line[0:10].strip()) - mxup = int(line[10:20].strip()) - mxlow = int(line[20:30].strip()) - mxbw = int(line[30:40].strip()) - line = f.readline() - ifreq = int(line[0:10].strip()) - mutd4 = int(line[10:20].strip()) - accl = float(line[20:30].strip()) - hclose = float(line[30:40].strip()) - iprd4 = int(line[40:50].strip()) - - if openfile: - f.close() - - # set package unit number - unitnumber = None - filenames = [None] - if ext_unit_dict is not None: - unitnumber, filenames[0] = \ - model.get_ext_dict_attr(ext_unit_dict, - filetype=ModflowDe4.ftype()) - - de4 = ModflowDe4(model, itmx=itmx, mxup=mxup, mxlow=mxlow, mxbw=mxbw, - ifreq=ifreq, mutd4=mutd4, accl=accl, hclose=hclose, - iprd4=iprd4, unitnumber=unitnumber, - filenames=filenames) - return de4 - - @staticmethod - def ftype(): - return 'DE4' - - @staticmethod - def defaultunit(): - return 28 +""" +mfde4 module. Contains the ModflowDe4 class. Note that the user can access +the ModflowDe4 class as `flopy.modflow.ModflowDe4`. + +Additional information for this MODFLOW package can be found at the `Online +MODFLOW Guide +`_. + +""" +import sys +from ..pakbase import Package + + +class ModflowDe4(Package): + """ + MODFLOW DE4 - Direct Solver Package + + Parameters + ---------- + model : model object + The model object (of type :class:`flopy.modflow.mf.Modflow`) to which + this package will be added. + itmx : int + Maximum number of iterations for each time step. Specify ITMAX = 1 if + iteration is not desired. Ideally iteration would not be required for + direct solution. However, it is necessary to iterate if the flow + equation is nonlinear or if computer precision limitations result in + inaccurate calculations as indicated by a large water budget error + (default is 50). + mxup : int + Maximum number of equations in the upper part of the equations to be + solved. This value impacts the amount of memory used by the DE4 + Package. If specified as 0, the program will calculate MXUP as half + the number of cells in the model, which is an upper limit (default + is 0). + mxlow : int + Maximum number of equations in the lower part of equations to be + solved. This value impacts the amount of memory used by the DE4 + Package. If specified as 0, the program will calculate MXLOW as half + the number of cells in the model, which is an upper limit (default is + 0). + mxbw : int + Maximum band width plus 1 of the lower part of the head coefficients + matrix. This value impacts the amount of memory used by the DE4 + Package. If specified as 0, the program will calculate MXBW as the + product of the two smallest grid dimensions plus 1, which is an + upper limit (default is 0). + ifreq : int + Flag indicating the frequency at which coefficients in head matrix + change. + IFREQ = 1 indicates that the flow equations are linear and that + coefficients of simulated head for all stress terms are constant + for all stress periods. + IFREQ = 2 indicates that the flow equations are linear, but + coefficients of simulated head for some stress terms may change + at the start of each stress period. + IFREQ = 3 indicates that a nonlinear flow equation is being solved, + which means that some terms in the head coefficients matrix depend + on simulated head (default is 3). + mutd4 : int + Flag that indicates the quantity of information that is printed when + convergence information is printed for a time step. + MUTD4 = 0 indicates that the number of iterations in the time step + and the maximum head change each iteration are printed. + MUTD4 = 1 indicates that only the number of iterations in the time + step is printed. + MUTD4 = 2 indicates no information is printed (default is 0). + accl : int + Multiplier for the computed head change for each iteration. Normally + this value is 1. A value greater than 1 may be useful for improving + the rate of convergence when using external iteration to solve + nonlinear problems (default is 1). + hclose : float + Head change closure criterion. If iterating (ITMX > 1), iteration + stops when the absolute value of head change at every node is less + than or equal to HCLOSE. HCLOSE is not used if not iterating, but a + value must always be specified (default is 1e-5). + iprd4 : int + Time step interval for printing out convergence information when + iterating (ITMX > 1). If IPRD4 is 2, convergence information is + printed every other time step. A value must always be specified + even if not iterating (default is 1). + extension : string + Filename extension (default is 'de4') + unitnumber : int + File unit number (default is None). + filenames : str or list of str + Filenames to use for the package. If filenames=None the package name + will be created using the model name and package extension. If a + single string is passed the package will be set to the string. + Default is None. + + + Attributes + ---------- + + Methods + ------- + + See Also + -------- + + Notes + ----- + + Examples + -------- + + >>> import flopy + >>> m = flopy.modflow.Modflow() + >>> de4 = flopy.modflow.ModflowDe4(m) + + """ + + def __init__(self, model, itmx=50, mxup=0, mxlow=0, mxbw=0, + ifreq=3, mutd4=0, accl=1., hclose=1e-5, iprd4=1, + extension='de4', unitnumber=None, filenames=None): + """ + Package constructor. + + """ + + # set default unit number of one is not specified + if unitnumber is None: + unitnumber = ModflowDe4.defaultunit() + + # set filenames + if filenames is None: + filenames = [None] + elif isinstance(filenames, str): + filenames = [filenames] + + # Fill namefile items + name = [ModflowDe4.ftype()] + units = [unitnumber] + extra = [''] + + # set package name + fname = [filenames[0]] + + # Call ancestor's init to set self.parent, extension, name and unit number + Package.__init__(self, model, extension=extension, name=name, + unit_number=units, extra=extra, filenames=fname) + + # check if a valid model version has been specified + if model.version == 'mfusg': + err = 'Error: cannot use {} package with model version {}'.format( + self.name, model.version) + raise Exception(err) + + self.heading = '# {} package for '.format(self.name[0]) + \ + ' {}, '.format(model.version_types[model.version]) + \ + 'generated by Flopy.' + self.url = 'de4.htm' + + self.itmx = itmx + self.mxup = mxup + self.mxlow = mxlow + self.mxbw = mxbw + self.ifreq = ifreq + self.mutd4 = mutd4 + self.accl = accl + self.hclose = hclose + self.iprd4 = iprd4 + self.parent.add_package(self) + return + + def write_file(self): + """ + Write the package file. + + Returns + ------- + None + + """ + # Open file for writing + f = open(self.fn_path, 'w') + f.write('{}\n'.format(self.heading)) + ifrfm = self.parent.get_ifrefm() + if ifrfm: + f.write('{} '.format(self.itmx)) + f.write('{} '.format(self.mxup)) + f.write('{} '.format(self.mxlow)) + f.write('{} '.format(self.mxbw)) + f.write('\n') + f.write('{} '.format(self.ifreq)) + f.write('{} '.format(self.mutd4)) + f.write('{} '.format(self.accl)) + f.write('{} '.format(self.hclose)) + f.write('{} '.format(self.iprd4)) + f.write('\n') + else: + f.write('{:10d}'.format(self.itmx)) + f.write('{:10d}'.format(self.mxup)) + f.write('{:10d}'.format(self.mxlow)) + f.write('{:10d}'.format(self.mxbw)) + f.write('\n') + f.write('{:10d}'.format(self.ifreq)) + f.write('{:10d}'.format(self.mutd4)) + f.write('{:9.4e} '.format(self.accl)) + f.write('{:9.4e} '.format(self.hclose)) + f.write('{:10d}'.format(self.iprd4)) + f.write('\n') + f.close() + + @staticmethod + def load(f, model, ext_unit_dict=None): + """ + Load an existing package. + + Parameters + ---------- + f : filename or file handle + File to load. + model : model object + The model object (of type :class:`flopy.modflow.mf.Modflow`) to + which this package will be added. + ext_unit_dict : dictionary, optional + If the arrays in the file are specified using EXTERNAL, + or older style array control records, then `f` should be a file + handle. In this case ext_unit_dict is required, which can be + constructed using the function + :class:`flopy.utils.mfreadnam.parsenamefile`. + + Returns + ------- + de4 : ModflowDe4 object + + Examples + -------- + + >>> import flopy + >>> m = flopy.modflow.Modflow() + >>> de4 = flopy.modflow.ModflowDe4.load('test.de4', m) + + """ + + if model.verbose: + sys.stdout.write('loading de4 package file...\n') + + openfile = not hasattr(f, 'read') + if openfile: + filename = f + f = open(filename, 'r') + + # read dataset 0 -- header + while True: + line = f.readline() + if line[0] != '#': + break + # read dataset 1 + ifrfm = model.get_ifrefm() + if model.version != 'mf2k': + ifrfm = True + ifreq = 1 + if ifrfm: + t = line.strip().split() + itmx = int(t[0]) + mxup = int(t[1]) + mxlow = int(t[2]) + mxbw = int(t[3]) + line = f.readline() + t = line.strip().split() + ifreq = int(t[0]) + mutd4 = int(t[1]) + accl = float(t[2]) + hclose = float(t[3]) + iprd4 = int(t[4]) + else: + itmx = int(line[0:10].strip()) + mxup = int(line[10:20].strip()) + mxlow = int(line[20:30].strip()) + mxbw = int(line[30:40].strip()) + line = f.readline() + ifreq = int(line[0:10].strip()) + mutd4 = int(line[10:20].strip()) + accl = float(line[20:30].strip()) + hclose = float(line[30:40].strip()) + iprd4 = int(line[40:50].strip()) + + if openfile: + f.close() + + # set package unit number + unitnumber = None + filenames = [None] + if ext_unit_dict is not None: + unitnumber, filenames[0] = \ + model.get_ext_dict_attr(ext_unit_dict, + filetype=ModflowDe4.ftype()) + + de4 = ModflowDe4(model, itmx=itmx, mxup=mxup, mxlow=mxlow, mxbw=mxbw, + ifreq=ifreq, mutd4=mutd4, accl=accl, hclose=hclose, + iprd4=iprd4, unitnumber=unitnumber, + filenames=filenames) + return de4 + + @staticmethod + def ftype(): + return 'DE4' + + @staticmethod + def defaultunit(): + return 28 diff --git a/flopy/modflow/mfdis.py b/flopy/modflow/mfdis.py index 8f36524b41..74e348e936 100644 --- a/flopy/modflow/mfdis.py +++ b/flopy/modflow/mfdis.py @@ -1,991 +1,991 @@ -""" -mfdis module. Contains the ModflowDis class. Note that the user can access -the ModflowDis class as `flopy.modflow.ModflowDis`. - -Additional information for this MODFLOW package can be found at the `Online -MODFLOW Guide -`_. - -""" - -import sys -import warnings - -import numpy as np - -from ..pakbase import Package -from ..utils import Util2d, Util3d -from ..utils.reference import SpatialReference, TemporalReference -from ..utils.flopy_io import line_parse - -ITMUNI = {"u": 0, "s": 1, "m": 2, "h": 3, "d": 4, "y": 5} -LENUNI = {"u": 0, "f": 1, "m": 2, "c": 3} - - -class ModflowDis(Package): - """ - MODFLOW Discretization Package Class. - - Parameters - ---------- - model : model object - The model object (of type :class:`flopy.modflow.Modflow`) to which - this package will be added. - nlay : int - Number of model layers (the default is 1). - nrow : int - Number of model rows (the default is 2). - ncol : int - Number of model columns (the default is 2). - nper : int - Number of model stress periods (the default is 1). - delr : float or array of floats (ncol), optional - An array of spacings along a row (the default is 1.0). - delc : float or array of floats (nrow), optional - An array of spacings along a column (the default is 0.0). - laycbd : int or array of ints (nlay), optional - An array of flags indicating whether or not a layer has a Quasi-3D - confining bed below it. 0 indicates no confining bed, and not zero - indicates a confining bed. LAYCBD for the bottom layer must be 0. (the - default is 0) - top : float or array of floats (nrow, ncol), optional - An array of the top elevation of layer 1. For the common situation in - which the top layer represents a water-table aquifer, it may be - reasonable to set Top equal to land-surface elevation (the default is - 1.0) - botm : float or array of floats (nlay, nrow, ncol), optional - An array of the bottom elevation for each model cell (the default is - 0.) - perlen : float or array of floats (nper) - An array of the stress period lengths. - nstp : int or array of ints (nper) - Number of time steps in each stress period (default is 1). - tsmult : float or array of floats (nper) - Time step multiplier (default is 1.0). - steady : boolean or array of boolean (nper) - true or False indicating whether or not stress period is steady state - (default is True). - itmuni : int - Time units, default is days (4) - lenuni : int - Length units, default is meters (2) - extension : string - Filename extension (default is 'dis') - unitnumber : int - File unit number (default is None). - filenames : str or list of str - Filenames to use for the package. If filenames=None the package name - will be created using the model name and package extension. If a - single string is passed the package will be set to the string. - Default is None. - xul : float - x coordinate of upper left corner of the grid, default is None, which - means xul will be set to zero. - yul : float - y coordinate of upper-left corner of the grid, default is None, which - means yul will be calculated as the sum of the delc array. This - default, combined with the xul and rotation defaults will place the - lower-left corner of the grid at (0, 0). - rotation : float - counter-clockwise rotation (in degrees) of the grid about the lower- - left corner. default is 0.0 - proj4_str : str - PROJ4 string that defines the projected coordinate system - (e.g. '+proj=utm +zone=14 +datum=WGS84 +units=m +no_defs '). - Can be an EPSG code (e.g. 'EPSG:32614'). Default is None. - start_datetime : str - starting datetime of the simulation. default is '1/1/1970' - - Attributes - ---------- - heading : str - Text string written to top of package input file. - - Methods - ------- - - See Also - -------- - - Notes - ----- - - Examples - -------- - - >>> import flopy - >>> m = flopy.modflow.Modflow() - >>> dis = flopy.modflow.ModflowDis(m) - - """ - - def __init__(self, model, nlay=1, nrow=2, ncol=2, nper=1, delr=1.0, - delc=1.0, laycbd=0, top=1, botm=0, perlen=1, nstp=1, - tsmult=1, steady=True, itmuni=4, lenuni=2, extension='dis', - unitnumber=None, filenames=None, - xul=None, yul=None, rotation=None, - proj4_str=None, start_datetime=None): - - # set default unit number of one is not specified - if unitnumber is None: - unitnumber = ModflowDis.defaultunit() - - # set filenames - if filenames is None: - filenames = [None] - elif isinstance(filenames, str): - filenames = [filenames] - - # Fill namefile items - name = [ModflowDis.ftype()] - units = [unitnumber] - extra = [''] - - # set package name - fname = [filenames[0]] - - # Call ancestor's init to set self.parent, extension, name and unit number - Package.__init__(self, model, extension=extension, name=name, - unit_number=units, extra=extra, filenames=fname) - - self.url = 'dis.htm' - self.nrow = nrow - self.ncol = ncol - self.nlay = nlay - self.nper = nper - - # initialize botm to an appropriate sized - if nlay > 1: - if isinstance(botm, float) or isinstance(botm, int): - botm = np.linspace(top, botm, nlay) - - # Set values of all parameters - self.heading = '# {} package for '.format(self.name[0]) + \ - ' {}, '.format(model.version_types[model.version]) + \ - 'generated by Flopy.' - self.laycbd = Util2d(model, (self.nlay,), np.int32, laycbd, - name='laycbd') - self.laycbd[-1] = 0 # bottom layer must be zero - self.delr = Util2d(model, (self.ncol,), np.float32, delr, name='delr', - locat=self.unit_number[0]) - self.delc = Util2d(model, (self.nrow,), np.float32, delc, name='delc', - locat=self.unit_number[0]) - self.top = Util2d(model, (self.nrow, self.ncol), np.float32, - top, name='model_top', locat=self.unit_number[0]) - self.botm = Util3d(model, (self.nlay + sum(self.laycbd), - self.nrow, self.ncol), np.float32, botm, - 'botm', locat=self.unit_number[0]) - self.perlen = Util2d(model, (self.nper,), np.float32, perlen, - name='perlen') - self.nstp = Util2d(model, (self.nper,), np.int32, nstp, name='nstp') - self.tsmult = Util2d(model, (self.nper,), np.float32, tsmult, - name='tsmult') - self.steady = Util2d(model, (self.nper,), np.bool, - steady, name='steady') - - try: - self.itmuni = int(itmuni) - except: - self.itmuni = ITMUNI[itmuni.lower()[0]] - try: - self.lenuni = int(lenuni) - except: - self.lenuni = LENUNI[lenuni.lower()[0]] - - self.parent.add_package(self) - self.itmuni_dict = {0: "undefined", 1: "seconds", 2: "minutes", - 3: "hours", 4: "days", 5: "years"} - - if xul is None: - xul = model._xul - if yul is None: - yul = model._yul - if rotation is None: - rotation = model._rotation - if proj4_str is None: - proj4_str = model._proj4_str - if start_datetime is None: - start_datetime = model._start_datetime - - # set the model grid coordinate info - xll = None - yll = None - mg = model.modelgrid - if rotation is not None: - mg.set_coord_info(xoff=None, yoff=None, angrot=rotation) - if xul is not None: - xll = mg._xul_to_xll(xul) - if yul is not None: - yll = mg._yul_to_yll(yul) - mg.set_coord_info(xoff=xll, yoff=yll, angrot=rotation, proj4=proj4_str) - - xll = mg.xoffset - yll = mg.yoffset - rotation = mg.angrot - with warnings.catch_warnings(): - warnings.simplefilter("ignore", category=DeprecationWarning) - self._sr = SpatialReference(self.delr, self.delc, self.lenuni, - xll=xll, yll=yll, - rotation=rotation or 0.0, - proj4_str=proj4_str) - - self.tr = TemporalReference(itmuni=self.itmuni, - start_datetime=start_datetime) - - self.start_datetime = start_datetime - # calculate layer thicknesses - self.__calculate_thickness() - - @property - def sr(self): - warnings.warn( - 'SpatialReference has been deprecated. Use Grid instead.', - DeprecationWarning) - return self._sr - - @sr.setter - def sr(self, sr): - warnings.warn( - 'SpatialReference has been deprecated. Use Grid instead.', - DeprecationWarning) - self._sr = sr - - def checklayerthickness(self): - """ - Check layer thickness. - - """ - return (self.thickness > 0).all() - - def get_totim(self): - """ - Get the totim at the end of each time step - - Returns - ------- - totim: numpy array - numpy array with simulation totim at the end of each time step - - """ - totim = [] - nstp = self.nstp.array - perlen = self.perlen.array - tsmult = self.tsmult.array - t = 0. - for kper in range(self.nper): - m = tsmult[kper] - p = float(nstp[kper]) - dt = perlen[kper] - if m > 1: - dt *= (m - 1.) / (m ** p - 1.) - else: - dt = dt / p - for kstp in range(nstp[kper]): - t += dt - totim.append(t) - if m > 1: - dt *= m - return np.array(totim, dtype=np.float) - - def get_final_totim(self): - """ - Get the totim at the end of the simulation - - Returns - ------- - totim: float - maximum simulation totim - - """ - return self.get_totim()[-1] - - def get_kstp_kper_toffset(self, t=0.): - """ - Get the stress period, time step, and time offset from passed time. - - Parameters - ---------- - t : float - totim to return the stress period, time step, and toffset for - based on time discretization data. Default is 0. - - Returns - ------- - kstp : int - time step in stress period corresponding to passed totim - kper : int - stress period corresponding to passed totim - toffset : float - time offset of passed totim from the beginning of kper - - """ - - if t < 0.: - t = 0. - totim = self.get_totim() - nstp = self.nstp.array - ipos = 0 - t0 = 0. - kper = self.nper - 1 - kstp = nstp[-1] - 1 - toffset = self.perlen.array[-1] - done = False - for iper in range(self.nper): - tp0 = t0 - for istp in range(nstp[iper]): - t1 = totim[ipos] - if t >= t0 and t < t1: - done = True - kper = iper - kstp = istp - toffset = t - tp0 - break - ipos += 1 - t0 = t1 - if done: - break - return kstp, kper, toffset - - def get_totim_from_kper_toffset(self, kper=0, toffset=0.): - """ - Get totim from a passed kper and time offset from the beginning - of a stress period - - Parameters - ---------- - kper : int - stress period. Default is 0 - toffset : float - time offset relative to the beginning of kper - - Returns - ------- - t : float - totim to return the stress period, time step, and toffset for - based on time discretization data. Default is 0. - - """ - - if kper < 0: - kper = 0. - if kper >= self.nper: - msg = 'kper ({}) '.format(kper) + 'must be less than ' + \ - 'to nper ({}).'.format(self.nper) - raise ValueError() - totim = self.get_totim() - nstp = self.nstp.array - ipos = 0 - t0 = 0. - tp0 = 0. - for iper in range(kper + 1): - tp0 = t0 - if iper == kper: - break - for istp in range(nstp[iper]): - t1 = totim[ipos] - ipos += 1 - t0 = t1 - t = tp0 + toffset - return t - - def get_cell_volumes(self): - """ - Get an array of cell volumes. - - Returns - ------- - vol : array of floats (nlay, nrow, ncol) - - """ - vol = np.empty((self.nlay, self.nrow, self.ncol)) - for l in range(self.nlay): - vol[l, :, :] = self.thickness.array[l] - for r in range(self.nrow): - vol[:, r, :] *= self.delc[r] - for c in range(self.ncol): - vol[:, :, c] *= self.delr[c] - return vol - - @property - def zcentroids(self): - z = np.empty((self.nlay, self.nrow, self.ncol)) - z[0, :, :] = (self.top[:, :] + self.botm[0, :, :]) / 2. - - for l in range(1, self.nlay): - z[l, :, :] = (self.botm[l - 1, :, :] + self.botm[l, :, :]) / 2. - return z - - def get_node_coordinates(self): - """ - Get y, x, and z cell centroids in local model coordinates. - - Returns - ------- - y : list of cell y-centroids - - x : list of cell x-centroids - - z : array of floats (nlay, nrow, ncol) - - """ - - delr = self.delr.array - delc = self.delc.array - - # In row direction - Ly = np.add.reduce(delc) - y = Ly - (np.add.accumulate(self.delc) - 0.5 * delc) - - # In column direction - x = np.add.accumulate(self.delr) - 0.5 * delr - - # In layer direction - z = self.zcentroids - - return y, x, z - - def get_rc_from_node_coordinates(self, x, y, local=True): - """ - Get the row and column of a point or sequence of points - in model coordinates. - - Parameters - ---------- - x : float or sequence of floats - x coordinate(s) of points to find in model grid - y : float or sequence floats - y coordinate(s) of points to find in model grid - local : bool - x and y coordinates are in model local coordinates. If false, then - x and y are in world coordinates. (default is True) - - Returns - ------- - r : row or sequence of rows (zero-based) - c : column or sequence of columns (zero-based) - - """ - mg = self.parent.modelgrid - if np.isscalar(x): - r, c = mg.intersect(x, y, local=local) - else: - r = [] - c = [] - for xx, yy in zip(x, y): - rr, cc = mg.intersect(xx, yy, local=local) - r.append(rr) - c.append(cc) - return r, c - - def get_lrc(self, nodes): - """ - Get layer, row, column from a list of zero based - MODFLOW node numbers. - - Returns - ------- - v : list of tuples containing the layer (k), row (i), - and column (j) for each node in the input list - """ - if not isinstance(nodes, list): - nodes = [nodes] - nrc = self.nrow * self.ncol - v = [] - for node in nodes: - k = int((node + 1) / nrc) - if (k * nrc) < node: - k += 1 - ij = int(node - (k - 1) * nrc) - i = int(ij / self.ncol) - if (i * self.ncol) < ij: - i += 1 - j = ij - (i - 1) * self.ncol - v.append((k - 1, i - 1, j)) - return v - - def get_node(self, lrc_list): - """ - Get node number from a list of zero based MODFLOW - layer, row, column tuples. - - Returns - ------- - v : list of MODFLOW nodes for each layer (k), row (i), - and column (j) tuple in the input list - """ - if not isinstance(lrc_list, list): - lrc_list = [lrc_list] - nrc = self.nrow * self.ncol - v = [] - for [k, i, j] in lrc_list: - node = int(((k) * nrc) + ((i) * self.ncol) + j) - v.append(node) - return v - - def get_layer(self, i, j, elev): - """Return the layer for an elevation at an i, j location. - - Parameters - ---------- - i : row index (zero-based) - j : column index - elev : elevation (in same units as model) - - Returns - ------- - k : zero-based layer index - """ - return get_layer(self, i, j, elev) - - def gettop(self): - """ - Get the top array. - - Returns - ------- - top : array of floats (nrow, ncol) - """ - return self.top.array - - def getbotm(self, k=None): - """ - Get the bottom array. - - Returns - ------- - botm : array of floats (nlay, nrow, ncol), or - - botm : array of floats (nrow, ncol) if k is not none - """ - if k is None: - return self.botm.array - else: - return self.botm.array[k, :, :] - - def __calculate_thickness(self): - thk = [] - thk.append(self.top - self.botm[0]) - for k in range(1, self.nlay + sum(self.laycbd)): - thk.append(self.botm[k - 1] - self.botm[k]) - self.__thickness = Util3d(self.parent, (self.nlay + sum(self.laycbd), - self.nrow, self.ncol), - np.float32, thk, name='thickness') - - @property - def thickness(self): - """ - Get a Util3d array of cell thicknesses. - - Returns - ------- - thickness : util3d array of floats (nlay, nrow, ncol) - - """ - # return self.__thickness - thk = [] - thk.append(self.top - self.botm[0]) - for k in range(1, self.nlay + sum(self.laycbd)): - thk.append(self.botm[k - 1] - self.botm[k]) - return Util3d(self.parent, (self.nlay + sum(self.laycbd), - self.nrow, self.ncol), np.float32, - thk, name='thickness') - - def write_file(self, check=True): - """ - Write the package file. - - Parameters - ---------- - check : boolean - Check package data for common errors. (default True) - - Returns - ------- - None - - """ - if check: # allows turning off package checks when writing files at model level - self.check(f='{}.chk'.format(self.name[0]), - verbose=self.parent.verbose, level=1) - # Open file for writing - f_dis = open(self.fn_path, 'w') - # Item 0: heading - f_dis.write('{0:s}\n'.format(self.heading)) - # f_dis.write('#{0:s}'.format(str(self.sr))) - # f_dis.write(" ,{0:s}:{1:s}\n".format("start_datetime", - # self.start_datetime)) - # Item 1: NLAY, NROW, NCOL, NPER, ITMUNI, LENUNI - f_dis.write('{0:10d}{1:10d}{2:10d}{3:10d}{4:10d}{5:10d}\n' \ - .format(self.nlay, self.nrow, self.ncol, self.nper, - self.itmuni, self.lenuni)) - # Item 2: LAYCBD - for l in range(0, self.nlay): - f_dis.write('{0:3d}'.format(self.laycbd[l])) - f_dis.write('\n') - # Item 3: DELR - f_dis.write(self.delr.get_file_entry()) - # Item 4: DELC - f_dis.write(self.delc.get_file_entry()) - # Item 5: Top(NCOL, NROW) - f_dis.write(self.top.get_file_entry()) - # Item 5: BOTM(NCOL, NROW) - f_dis.write(self.botm.get_file_entry()) - - # Item 6: NPER, NSTP, TSMULT, Ss/tr - for t in range(self.nper): - f_dis.write('{0:14f}{1:14d}{2:10f} '.format(self.perlen[t], - self.nstp[t], - self.tsmult[t])) - if self.steady[t]: - f_dis.write(' {0:3s}\n'.format('SS')) - else: - f_dis.write(' {0:3s}\n'.format('TR')) - f_dis.close() - - def check(self, f=None, verbose=True, level=1, checktype=None): - """ - Check dis package data for zero and negative thicknesses. - - Parameters - ---------- - f : str or file handle - String defining file name or file handle for summary file - of check method output. If a sting is passed a file handle - is created. If f is None, check method does not write - results to a summary file. (default is None) - verbose : bool - Boolean flag used to determine if check method results are - written to the screen - level : int - Check method analysis level. If level=0, summary checks are - performed. If level=1, full checks are performed. - - Returns - ------- - None - - Examples - -------- - - >>> import flopy - >>> m = flopy.modflow.Modflow.load('model.nam') - >>> m.dis.check() - """ - chk = self._get_check(f, verbose, level, checktype) - - # make ibound of same shape as thicknesses/botm for quasi-3D models - active = chk.get_active(include_cbd=True) - - # Use either a numpy array or masked array - thickness = self.thickness.array - non_finite = ~(np.isfinite(thickness)) - if non_finite.any(): - thickness[non_finite] = 0 - thickness = np.ma.array(thickness, mask=non_finite) - - chk.values(thickness, active & (thickness <= 0), - 'zero or negative thickness', 'Error') - thin_cells = (thickness < chk.thin_cell_threshold) & (thickness > 0) - chk.values(thickness, active & thin_cells, - 'thin cells (less than checker threshold of {:.1f})' - .format(chk.thin_cell_threshold), 'Error') - chk.values(self.top.array, - active[0, :, :] & np.isnan(self.top.array), - 'nan values in top array', 'Error') - chk.values(self.botm.array, - active & np.isnan(self.botm.array), - 'nan values in bottom array', 'Error') - chk.summarize() - return chk - - # if f is not None: - # if isinstance(f, str): - # pth = os.path.join(self.parent.model_ws, f) - # f = open(pth, 'w', 0) - # - # errors = False - # txt = '\n{} PACKAGE DATA VALIDATION:\n'.format(self.name[0]) - # t = '' - # t1 = '' - # inactive = self.parent.bas6.ibound.array == 0 - # # thickness errors - # d = self.thickness.array - # d[inactive] = 1. - # if d.min() <= 0: - # errors = True - # t = '{} ERROR: Negative or zero cell thickness specified.\n'.format(t) - # if level > 0: - # idx = np.column_stack(np.where(d <= 0.)) - # t1 = self.level1_arraylist(idx, d, self.thickness.name, t1) - # else: - # t = '{} Specified cell thickness is OK.\n'.format(t) - # - # # add header to level 0 text - # txt += t - # - # if level > 0: - # if errors: - # txt += '\n DETAILED SUMMARY OF {} ERRORS:\n'.format(self.name[0]) - # # add level 1 header to level 1 text - # txt += t1 - # - # # write errors to summary file - # if f is not None: - # f.write('{}\n'.format(txt)) - # - # # write errors to stdout - # if verbose: - # print(txt) - - @staticmethod - def load(f, model, ext_unit_dict=None, check=True): - """ - Load an existing package. - - Parameters - ---------- - f : filename or file handle - File to load. - model : model object - The model object (of type :class:`flopy.modflow.mf.Modflow`) to - which this package will be added. - ext_unit_dict : dictionary, optional - If the arrays in the file are specified using EXTERNAL, - or older style array control records, then `f` should be a file - handle. In this case ext_unit_dict is required, which can be - constructed using the function - :class:`flopy.utils.mfreadnam.parsenamefile`. - check : boolean - Check package data for common errors. (default True) - - Returns - ------- - dis : ModflowDis object - ModflowDis object. - - Examples - -------- - - >>> import flopy - >>> m = flopy.modflow.Modflow() - >>> dis = flopy.modflow.ModflowDis.load('test.dis', m) - - """ - - if model.verbose: - sys.stdout.write('loading dis package file...\n') - - openfile = not hasattr(f, 'read') - if openfile: - filename = f - f = open(filename, 'r') - - # dataset 0 -- header - header = '' - while True: - line = f.readline() - if line[0] != '#': - break - header += line.strip() - - header = header.replace('#', '') - xul, yul = None, None - rotation = None - proj4_str = None - start_datetime = "1/1/1970" - dep = False - for item in header.split(','): - if "xul" in item.lower(): - try: - xul = float(item.split(':')[1]) - except: - if model.verbose: - print(' could not parse xul ' + - 'in {}'.format(filename)) - dep = True - elif "yul" in item.lower(): - try: - yul = float(item.split(':')[1]) - except: - if model.verbose: - print(' could not parse yul ' + - 'in {}'.format(filename)) - dep = True - elif "rotation" in item.lower(): - try: - rotation = float(item.split(':')[1]) - except: - if model.verbose: - print(' could not parse rotation ' + - 'in {}'.format(filename)) - dep = True - elif "proj4_str" in item.lower(): - try: - proj4_str = ':'.join(item.split(':')[1:]).strip() - except: - if model.verbose: - print(' could not parse proj4_str ' + - 'in {}'.format(filename)) - dep = True - elif "start" in item.lower(): - try: - start_datetime = item.split(':')[1].strip() - except: - if model.verbose: - print(' could not parse start ' + - 'in {}'.format(filename)) - dep = True - if dep: - warnings.warn("SpatialReference information found in DIS header," - "this information is being ignored. " - "SpatialReference info is now stored in the namfile" - "header") - # dataset 1 - nlay, nrow, ncol, nper, itmuni, lenuni = line.strip().split()[0:6] - nlay = int(nlay) - nrow = int(nrow) - ncol = int(ncol) - nper = int(nper) - itmuni = int(itmuni) - lenuni = int(lenuni) - # dataset 2 -- laycbd - if model.verbose: - print(' Loading dis package with:\n ' + \ - '{0} layers, {1} rows, {2} columns, and {3} stress periods'.format( - nlay, nrow, ncol, nper)) - print(' loading laycbd...') - laycbd = np.zeros(nlay, dtype=np.int) - d = 0 - while True: - line = f.readline() - raw = line.strip('\n').split() - for val in raw: - if (np.int(val)) != 0: - laycbd[d] = 1 - d += 1 - if d == nlay: - break - if d == nlay: - break - # dataset 3 -- delr - if model.verbose: - print(' loading delr...') - delr = Util2d.load(f, model, (ncol,), np.float32, 'delr', - ext_unit_dict) - # dataset 4 -- delc - if model.verbose: - print(' loading delc...') - delc = Util2d.load(f, model, (nrow,), np.float32, 'delc', - ext_unit_dict) - # dataset 5 -- top - if model.verbose: - print(' loading top...') - top = Util2d.load(f, model, (nrow, ncol), np.float32, 'top', - ext_unit_dict) - # dataset 6 -- botm - ncbd = laycbd.sum() - if model.verbose: - print(' loading botm...') - print(' for {} layers and '.format(nlay) + - '{} confining beds'.format(ncbd)) - if nlay > 1: - botm = Util3d.load(f, model, (nlay + ncbd, nrow, ncol), np.float32, - 'botm', ext_unit_dict) - else: - botm = Util3d.load(f, model, (nlay, nrow, ncol), np.float32, - 'botm', - ext_unit_dict) - # dataset 7 -- stress period info - if model.verbose: - print(' loading stress period data...') - print(' for {} stress periods'.format(nper)) - perlen = [] - nstp = [] - tsmult = [] - steady = [] - for k in range(nper): - line = f.readline() - a1, a2, a3, a4 = line_parse(line)[0:4] - a1 = float(a1) - a2 = int(a2) - a3 = float(a3) - if a4.upper() == 'TR': - a4 = False - else: - a4 = True - perlen.append(a1) - nstp.append(a2) - tsmult.append(a3) - steady.append(a4) - - if openfile: - f.close() - - # set package unit number - unitnumber = None - filenames = [None] - if ext_unit_dict is not None: - unitnumber, filenames[0] = \ - model.get_ext_dict_attr(ext_unit_dict, - filetype=ModflowDis.ftype()) - - # create dis object instance - dis = ModflowDis(model, nlay=nlay, nrow=nrow, ncol=ncol, nper=nper, - delr=delr, delc=delc, laycbd=laycbd, - top=top, botm=botm, - perlen=perlen, nstp=nstp, tsmult=tsmult, - steady=steady, itmuni=itmuni, lenuni=lenuni, - xul=xul, yul=yul, rotation=rotation, - proj4_str=proj4_str, start_datetime=start_datetime, - unitnumber=unitnumber, filenames=filenames) - if check: - dis.check(f='{}.chk'.format(dis.name[0]), - verbose=dis.parent.verbose, level=0) - # return dis object instance - return dis - - @staticmethod - def ftype(): - return 'DIS' - - @staticmethod - def defaultunit(): - return 11 - - -def get_layer(dis, i, j, elev): - """Return the layers for elevations at i, j locations. - - Parameters - ---------- - dis : flopy.modflow.ModflowDis object - i : scaler or sequence - row index (zero-based) - j : scaler or sequence - column index - elev : scaler or sequence - elevation (in same units as model) - - Returns - ------- - k : np.ndarray (1-D) or scalar - zero-based layer index - """ - - def to_array(arg): - if not isinstance(arg, np.ndarray): - return np.array([arg]) - else: - return arg - - i = to_array(i) - j = to_array(j) - elev = to_array(elev) - botms = dis.botm.array[:, i, j].tolist() - layers = np.sum(((botms - elev) > 0), axis=0) - # force elevations below model bottom into bottom layer - layers[layers > dis.nlay - 1] = dis.nlay - 1 - layers = np.atleast_1d(np.squeeze(layers)) - if len(layers) == 1: - layers = layers[0] - return layers +""" +mfdis module. Contains the ModflowDis class. Note that the user can access +the ModflowDis class as `flopy.modflow.ModflowDis`. + +Additional information for this MODFLOW package can be found at the `Online +MODFLOW Guide +`_. + +""" + +import sys +import warnings + +import numpy as np + +from ..pakbase import Package +from ..utils import Util2d, Util3d +from ..utils.reference import SpatialReference, TemporalReference +from ..utils.flopy_io import line_parse + +ITMUNI = {"u": 0, "s": 1, "m": 2, "h": 3, "d": 4, "y": 5} +LENUNI = {"u": 0, "f": 1, "m": 2, "c": 3} + + +class ModflowDis(Package): + """ + MODFLOW Discretization Package Class. + + Parameters + ---------- + model : model object + The model object (of type :class:`flopy.modflow.Modflow`) to which + this package will be added. + nlay : int + Number of model layers (the default is 1). + nrow : int + Number of model rows (the default is 2). + ncol : int + Number of model columns (the default is 2). + nper : int + Number of model stress periods (the default is 1). + delr : float or array of floats (ncol), optional + An array of spacings along a row (the default is 1.0). + delc : float or array of floats (nrow), optional + An array of spacings along a column (the default is 0.0). + laycbd : int or array of ints (nlay), optional + An array of flags indicating whether or not a layer has a Quasi-3D + confining bed below it. 0 indicates no confining bed, and not zero + indicates a confining bed. LAYCBD for the bottom layer must be 0. (the + default is 0) + top : float or array of floats (nrow, ncol), optional + An array of the top elevation of layer 1. For the common situation in + which the top layer represents a water-table aquifer, it may be + reasonable to set Top equal to land-surface elevation (the default is + 1.0) + botm : float or array of floats (nlay, nrow, ncol), optional + An array of the bottom elevation for each model cell (the default is + 0.) + perlen : float or array of floats (nper) + An array of the stress period lengths. + nstp : int or array of ints (nper) + Number of time steps in each stress period (default is 1). + tsmult : float or array of floats (nper) + Time step multiplier (default is 1.0). + steady : boolean or array of boolean (nper) + true or False indicating whether or not stress period is steady state + (default is True). + itmuni : int + Time units, default is days (4) + lenuni : int + Length units, default is meters (2) + extension : string + Filename extension (default is 'dis') + unitnumber : int + File unit number (default is None). + filenames : str or list of str + Filenames to use for the package. If filenames=None the package name + will be created using the model name and package extension. If a + single string is passed the package will be set to the string. + Default is None. + xul : float + x coordinate of upper left corner of the grid, default is None, which + means xul will be set to zero. + yul : float + y coordinate of upper-left corner of the grid, default is None, which + means yul will be calculated as the sum of the delc array. This + default, combined with the xul and rotation defaults will place the + lower-left corner of the grid at (0, 0). + rotation : float + counter-clockwise rotation (in degrees) of the grid about the lower- + left corner. default is 0.0 + proj4_str : str + PROJ4 string that defines the projected coordinate system + (e.g. '+proj=utm +zone=14 +datum=WGS84 +units=m +no_defs '). + Can be an EPSG code (e.g. 'EPSG:32614'). Default is None. + start_datetime : str + starting datetime of the simulation. default is '1/1/1970' + + Attributes + ---------- + heading : str + Text string written to top of package input file. + + Methods + ------- + + See Also + -------- + + Notes + ----- + + Examples + -------- + + >>> import flopy + >>> m = flopy.modflow.Modflow() + >>> dis = flopy.modflow.ModflowDis(m) + + """ + + def __init__(self, model, nlay=1, nrow=2, ncol=2, nper=1, delr=1.0, + delc=1.0, laycbd=0, top=1, botm=0, perlen=1, nstp=1, + tsmult=1, steady=True, itmuni=4, lenuni=2, extension='dis', + unitnumber=None, filenames=None, + xul=None, yul=None, rotation=None, + proj4_str=None, start_datetime=None): + + # set default unit number of one is not specified + if unitnumber is None: + unitnumber = ModflowDis.defaultunit() + + # set filenames + if filenames is None: + filenames = [None] + elif isinstance(filenames, str): + filenames = [filenames] + + # Fill namefile items + name = [ModflowDis.ftype()] + units = [unitnumber] + extra = [''] + + # set package name + fname = [filenames[0]] + + # Call ancestor's init to set self.parent, extension, name and unit number + Package.__init__(self, model, extension=extension, name=name, + unit_number=units, extra=extra, filenames=fname) + + self.url = 'dis.htm' + self.nrow = nrow + self.ncol = ncol + self.nlay = nlay + self.nper = nper + + # initialize botm to an appropriate sized + if nlay > 1: + if isinstance(botm, float) or isinstance(botm, int): + botm = np.linspace(top, botm, nlay) + + # Set values of all parameters + self.heading = '# {} package for '.format(self.name[0]) + \ + ' {}, '.format(model.version_types[model.version]) + \ + 'generated by Flopy.' + self.laycbd = Util2d(model, (self.nlay,), np.int32, laycbd, + name='laycbd') + self.laycbd[-1] = 0 # bottom layer must be zero + self.delr = Util2d(model, (self.ncol,), np.float32, delr, name='delr', + locat=self.unit_number[0]) + self.delc = Util2d(model, (self.nrow,), np.float32, delc, name='delc', + locat=self.unit_number[0]) + self.top = Util2d(model, (self.nrow, self.ncol), np.float32, + top, name='model_top', locat=self.unit_number[0]) + self.botm = Util3d(model, (self.nlay + sum(self.laycbd), + self.nrow, self.ncol), np.float32, botm, + 'botm', locat=self.unit_number[0]) + self.perlen = Util2d(model, (self.nper,), np.float32, perlen, + name='perlen') + self.nstp = Util2d(model, (self.nper,), np.int32, nstp, name='nstp') + self.tsmult = Util2d(model, (self.nper,), np.float32, tsmult, + name='tsmult') + self.steady = Util2d(model, (self.nper,), np.bool, + steady, name='steady') + + try: + self.itmuni = int(itmuni) + except: + self.itmuni = ITMUNI[itmuni.lower()[0]] + try: + self.lenuni = int(lenuni) + except: + self.lenuni = LENUNI[lenuni.lower()[0]] + + self.parent.add_package(self) + self.itmuni_dict = {0: "undefined", 1: "seconds", 2: "minutes", + 3: "hours", 4: "days", 5: "years"} + + if xul is None: + xul = model._xul + if yul is None: + yul = model._yul + if rotation is None: + rotation = model._rotation + if proj4_str is None: + proj4_str = model._proj4_str + if start_datetime is None: + start_datetime = model._start_datetime + + # set the model grid coordinate info + xll = None + yll = None + mg = model.modelgrid + if rotation is not None: + mg.set_coord_info(xoff=None, yoff=None, angrot=rotation) + if xul is not None: + xll = mg._xul_to_xll(xul) + if yul is not None: + yll = mg._yul_to_yll(yul) + mg.set_coord_info(xoff=xll, yoff=yll, angrot=rotation, proj4=proj4_str) + + xll = mg.xoffset + yll = mg.yoffset + rotation = mg.angrot + with warnings.catch_warnings(): + warnings.simplefilter("ignore", category=DeprecationWarning) + self._sr = SpatialReference(self.delr, self.delc, self.lenuni, + xll=xll, yll=yll, + rotation=rotation or 0.0, + proj4_str=proj4_str) + + self.tr = TemporalReference(itmuni=self.itmuni, + start_datetime=start_datetime) + + self.start_datetime = start_datetime + # calculate layer thicknesses + self.__calculate_thickness() + + @property + def sr(self): + warnings.warn( + 'SpatialReference has been deprecated. Use Grid instead.', + DeprecationWarning) + return self._sr + + @sr.setter + def sr(self, sr): + warnings.warn( + 'SpatialReference has been deprecated. Use Grid instead.', + DeprecationWarning) + self._sr = sr + + def checklayerthickness(self): + """ + Check layer thickness. + + """ + return (self.thickness > 0).all() + + def get_totim(self): + """ + Get the totim at the end of each time step + + Returns + ------- + totim: numpy array + numpy array with simulation totim at the end of each time step + + """ + totim = [] + nstp = self.nstp.array + perlen = self.perlen.array + tsmult = self.tsmult.array + t = 0. + for kper in range(self.nper): + m = tsmult[kper] + p = float(nstp[kper]) + dt = perlen[kper] + if m > 1: + dt *= (m - 1.) / (m ** p - 1.) + else: + dt = dt / p + for kstp in range(nstp[kper]): + t += dt + totim.append(t) + if m > 1: + dt *= m + return np.array(totim, dtype=np.float) + + def get_final_totim(self): + """ + Get the totim at the end of the simulation + + Returns + ------- + totim: float + maximum simulation totim + + """ + return self.get_totim()[-1] + + def get_kstp_kper_toffset(self, t=0.): + """ + Get the stress period, time step, and time offset from passed time. + + Parameters + ---------- + t : float + totim to return the stress period, time step, and toffset for + based on time discretization data. Default is 0. + + Returns + ------- + kstp : int + time step in stress period corresponding to passed totim + kper : int + stress period corresponding to passed totim + toffset : float + time offset of passed totim from the beginning of kper + + """ + + if t < 0.: + t = 0. + totim = self.get_totim() + nstp = self.nstp.array + ipos = 0 + t0 = 0. + kper = self.nper - 1 + kstp = nstp[-1] - 1 + toffset = self.perlen.array[-1] + done = False + for iper in range(self.nper): + tp0 = t0 + for istp in range(nstp[iper]): + t1 = totim[ipos] + if t >= t0 and t < t1: + done = True + kper = iper + kstp = istp + toffset = t - tp0 + break + ipos += 1 + t0 = t1 + if done: + break + return kstp, kper, toffset + + def get_totim_from_kper_toffset(self, kper=0, toffset=0.): + """ + Get totim from a passed kper and time offset from the beginning + of a stress period + + Parameters + ---------- + kper : int + stress period. Default is 0 + toffset : float + time offset relative to the beginning of kper + + Returns + ------- + t : float + totim to return the stress period, time step, and toffset for + based on time discretization data. Default is 0. + + """ + + if kper < 0: + kper = 0. + if kper >= self.nper: + msg = 'kper ({}) '.format(kper) + 'must be less than ' + \ + 'to nper ({}).'.format(self.nper) + raise ValueError() + totim = self.get_totim() + nstp = self.nstp.array + ipos = 0 + t0 = 0. + tp0 = 0. + for iper in range(kper + 1): + tp0 = t0 + if iper == kper: + break + for istp in range(nstp[iper]): + t1 = totim[ipos] + ipos += 1 + t0 = t1 + t = tp0 + toffset + return t + + def get_cell_volumes(self): + """ + Get an array of cell volumes. + + Returns + ------- + vol : array of floats (nlay, nrow, ncol) + + """ + vol = np.empty((self.nlay, self.nrow, self.ncol)) + for l in range(self.nlay): + vol[l, :, :] = self.thickness.array[l] + for r in range(self.nrow): + vol[:, r, :] *= self.delc[r] + for c in range(self.ncol): + vol[:, :, c] *= self.delr[c] + return vol + + @property + def zcentroids(self): + z = np.empty((self.nlay, self.nrow, self.ncol)) + z[0, :, :] = (self.top[:, :] + self.botm[0, :, :]) / 2. + + for l in range(1, self.nlay): + z[l, :, :] = (self.botm[l - 1, :, :] + self.botm[l, :, :]) / 2. + return z + + def get_node_coordinates(self): + """ + Get y, x, and z cell centroids in local model coordinates. + + Returns + ------- + y : list of cell y-centroids + + x : list of cell x-centroids + + z : array of floats (nlay, nrow, ncol) + + """ + + delr = self.delr.array + delc = self.delc.array + + # In row direction + Ly = np.add.reduce(delc) + y = Ly - (np.add.accumulate(self.delc) - 0.5 * delc) + + # In column direction + x = np.add.accumulate(self.delr) - 0.5 * delr + + # In layer direction + z = self.zcentroids + + return y, x, z + + def get_rc_from_node_coordinates(self, x, y, local=True): + """ + Get the row and column of a point or sequence of points + in model coordinates. + + Parameters + ---------- + x : float or sequence of floats + x coordinate(s) of points to find in model grid + y : float or sequence floats + y coordinate(s) of points to find in model grid + local : bool + x and y coordinates are in model local coordinates. If false, then + x and y are in world coordinates. (default is True) + + Returns + ------- + r : row or sequence of rows (zero-based) + c : column or sequence of columns (zero-based) + + """ + mg = self.parent.modelgrid + if np.isscalar(x): + r, c = mg.intersect(x, y, local=local) + else: + r = [] + c = [] + for xx, yy in zip(x, y): + rr, cc = mg.intersect(xx, yy, local=local) + r.append(rr) + c.append(cc) + return r, c + + def get_lrc(self, nodes): + """ + Get layer, row, column from a list of zero based + MODFLOW node numbers. + + Returns + ------- + v : list of tuples containing the layer (k), row (i), + and column (j) for each node in the input list + """ + if not isinstance(nodes, list): + nodes = [nodes] + nrc = self.nrow * self.ncol + v = [] + for node in nodes: + k = int((node + 1) / nrc) + if (k * nrc) < node: + k += 1 + ij = int(node - (k - 1) * nrc) + i = int(ij / self.ncol) + if (i * self.ncol) < ij: + i += 1 + j = ij - (i - 1) * self.ncol + v.append((k - 1, i - 1, j)) + return v + + def get_node(self, lrc_list): + """ + Get node number from a list of zero based MODFLOW + layer, row, column tuples. + + Returns + ------- + v : list of MODFLOW nodes for each layer (k), row (i), + and column (j) tuple in the input list + """ + if not isinstance(lrc_list, list): + lrc_list = [lrc_list] + nrc = self.nrow * self.ncol + v = [] + for [k, i, j] in lrc_list: + node = int(((k) * nrc) + ((i) * self.ncol) + j) + v.append(node) + return v + + def get_layer(self, i, j, elev): + """Return the layer for an elevation at an i, j location. + + Parameters + ---------- + i : row index (zero-based) + j : column index + elev : elevation (in same units as model) + + Returns + ------- + k : zero-based layer index + """ + return get_layer(self, i, j, elev) + + def gettop(self): + """ + Get the top array. + + Returns + ------- + top : array of floats (nrow, ncol) + """ + return self.top.array + + def getbotm(self, k=None): + """ + Get the bottom array. + + Returns + ------- + botm : array of floats (nlay, nrow, ncol), or + + botm : array of floats (nrow, ncol) if k is not none + """ + if k is None: + return self.botm.array + else: + return self.botm.array[k, :, :] + + def __calculate_thickness(self): + thk = [] + thk.append(self.top - self.botm[0]) + for k in range(1, self.nlay + sum(self.laycbd)): + thk.append(self.botm[k - 1] - self.botm[k]) + self.__thickness = Util3d(self.parent, (self.nlay + sum(self.laycbd), + self.nrow, self.ncol), + np.float32, thk, name='thickness') + + @property + def thickness(self): + """ + Get a Util3d array of cell thicknesses. + + Returns + ------- + thickness : util3d array of floats (nlay, nrow, ncol) + + """ + # return self.__thickness + thk = [] + thk.append(self.top - self.botm[0]) + for k in range(1, self.nlay + sum(self.laycbd)): + thk.append(self.botm[k - 1] - self.botm[k]) + return Util3d(self.parent, (self.nlay + sum(self.laycbd), + self.nrow, self.ncol), np.float32, + thk, name='thickness') + + def write_file(self, check=True): + """ + Write the package file. + + Parameters + ---------- + check : boolean + Check package data for common errors. (default True) + + Returns + ------- + None + + """ + if check: # allows turning off package checks when writing files at model level + self.check(f='{}.chk'.format(self.name[0]), + verbose=self.parent.verbose, level=1) + # Open file for writing + f_dis = open(self.fn_path, 'w') + # Item 0: heading + f_dis.write('{0:s}\n'.format(self.heading)) + # f_dis.write('#{0:s}'.format(str(self.sr))) + # f_dis.write(" ,{0:s}:{1:s}\n".format("start_datetime", + # self.start_datetime)) + # Item 1: NLAY, NROW, NCOL, NPER, ITMUNI, LENUNI + f_dis.write('{0:10d}{1:10d}{2:10d}{3:10d}{4:10d}{5:10d}\n' \ + .format(self.nlay, self.nrow, self.ncol, self.nper, + self.itmuni, self.lenuni)) + # Item 2: LAYCBD + for l in range(0, self.nlay): + f_dis.write('{0:3d}'.format(self.laycbd[l])) + f_dis.write('\n') + # Item 3: DELR + f_dis.write(self.delr.get_file_entry()) + # Item 4: DELC + f_dis.write(self.delc.get_file_entry()) + # Item 5: Top(NCOL, NROW) + f_dis.write(self.top.get_file_entry()) + # Item 5: BOTM(NCOL, NROW) + f_dis.write(self.botm.get_file_entry()) + + # Item 6: NPER, NSTP, TSMULT, Ss/tr + for t in range(self.nper): + f_dis.write('{0:14f}{1:14d}{2:10f} '.format(self.perlen[t], + self.nstp[t], + self.tsmult[t])) + if self.steady[t]: + f_dis.write(' {0:3s}\n'.format('SS')) + else: + f_dis.write(' {0:3s}\n'.format('TR')) + f_dis.close() + + def check(self, f=None, verbose=True, level=1, checktype=None): + """ + Check dis package data for zero and negative thicknesses. + + Parameters + ---------- + f : str or file handle + String defining file name or file handle for summary file + of check method output. If a sting is passed a file handle + is created. If f is None, check method does not write + results to a summary file. (default is None) + verbose : bool + Boolean flag used to determine if check method results are + written to the screen + level : int + Check method analysis level. If level=0, summary checks are + performed. If level=1, full checks are performed. + + Returns + ------- + None + + Examples + -------- + + >>> import flopy + >>> m = flopy.modflow.Modflow.load('model.nam') + >>> m.dis.check() + """ + chk = self._get_check(f, verbose, level, checktype) + + # make ibound of same shape as thicknesses/botm for quasi-3D models + active = chk.get_active(include_cbd=True) + + # Use either a numpy array or masked array + thickness = self.thickness.array + non_finite = ~(np.isfinite(thickness)) + if non_finite.any(): + thickness[non_finite] = 0 + thickness = np.ma.array(thickness, mask=non_finite) + + chk.values(thickness, active & (thickness <= 0), + 'zero or negative thickness', 'Error') + thin_cells = (thickness < chk.thin_cell_threshold) & (thickness > 0) + chk.values(thickness, active & thin_cells, + 'thin cells (less than checker threshold of {:.1f})' + .format(chk.thin_cell_threshold), 'Error') + chk.values(self.top.array, + active[0, :, :] & np.isnan(self.top.array), + 'nan values in top array', 'Error') + chk.values(self.botm.array, + active & np.isnan(self.botm.array), + 'nan values in bottom array', 'Error') + chk.summarize() + return chk + + # if f is not None: + # if isinstance(f, str): + # pth = os.path.join(self.parent.model_ws, f) + # f = open(pth, 'w', 0) + # + # errors = False + # txt = '\n{} PACKAGE DATA VALIDATION:\n'.format(self.name[0]) + # t = '' + # t1 = '' + # inactive = self.parent.bas6.ibound.array == 0 + # # thickness errors + # d = self.thickness.array + # d[inactive] = 1. + # if d.min() <= 0: + # errors = True + # t = '{} ERROR: Negative or zero cell thickness specified.\n'.format(t) + # if level > 0: + # idx = np.column_stack(np.where(d <= 0.)) + # t1 = self.level1_arraylist(idx, d, self.thickness.name, t1) + # else: + # t = '{} Specified cell thickness is OK.\n'.format(t) + # + # # add header to level 0 text + # txt += t + # + # if level > 0: + # if errors: + # txt += '\n DETAILED SUMMARY OF {} ERRORS:\n'.format(self.name[0]) + # # add level 1 header to level 1 text + # txt += t1 + # + # # write errors to summary file + # if f is not None: + # f.write('{}\n'.format(txt)) + # + # # write errors to stdout + # if verbose: + # print(txt) + + @staticmethod + def load(f, model, ext_unit_dict=None, check=True): + """ + Load an existing package. + + Parameters + ---------- + f : filename or file handle + File to load. + model : model object + The model object (of type :class:`flopy.modflow.mf.Modflow`) to + which this package will be added. + ext_unit_dict : dictionary, optional + If the arrays in the file are specified using EXTERNAL, + or older style array control records, then `f` should be a file + handle. In this case ext_unit_dict is required, which can be + constructed using the function + :class:`flopy.utils.mfreadnam.parsenamefile`. + check : boolean + Check package data for common errors. (default True) + + Returns + ------- + dis : ModflowDis object + ModflowDis object. + + Examples + -------- + + >>> import flopy + >>> m = flopy.modflow.Modflow() + >>> dis = flopy.modflow.ModflowDis.load('test.dis', m) + + """ + + if model.verbose: + sys.stdout.write('loading dis package file...\n') + + openfile = not hasattr(f, 'read') + if openfile: + filename = f + f = open(filename, 'r') + + # dataset 0 -- header + header = '' + while True: + line = f.readline() + if line[0] != '#': + break + header += line.strip() + + header = header.replace('#', '') + xul, yul = None, None + rotation = None + proj4_str = None + start_datetime = "1/1/1970" + dep = False + for item in header.split(','): + if "xul" in item.lower(): + try: + xul = float(item.split(':')[1]) + except: + if model.verbose: + print(' could not parse xul ' + + 'in {}'.format(filename)) + dep = True + elif "yul" in item.lower(): + try: + yul = float(item.split(':')[1]) + except: + if model.verbose: + print(' could not parse yul ' + + 'in {}'.format(filename)) + dep = True + elif "rotation" in item.lower(): + try: + rotation = float(item.split(':')[1]) + except: + if model.verbose: + print(' could not parse rotation ' + + 'in {}'.format(filename)) + dep = True + elif "proj4_str" in item.lower(): + try: + proj4_str = ':'.join(item.split(':')[1:]).strip() + except: + if model.verbose: + print(' could not parse proj4_str ' + + 'in {}'.format(filename)) + dep = True + elif "start" in item.lower(): + try: + start_datetime = item.split(':')[1].strip() + except: + if model.verbose: + print(' could not parse start ' + + 'in {}'.format(filename)) + dep = True + if dep: + warnings.warn("SpatialReference information found in DIS header," + "this information is being ignored. " + "SpatialReference info is now stored in the namfile" + "header") + # dataset 1 + nlay, nrow, ncol, nper, itmuni, lenuni = line.strip().split()[0:6] + nlay = int(nlay) + nrow = int(nrow) + ncol = int(ncol) + nper = int(nper) + itmuni = int(itmuni) + lenuni = int(lenuni) + # dataset 2 -- laycbd + if model.verbose: + print(' Loading dis package with:\n ' + \ + '{0} layers, {1} rows, {2} columns, and {3} stress periods'.format( + nlay, nrow, ncol, nper)) + print(' loading laycbd...') + laycbd = np.zeros(nlay, dtype=np.int) + d = 0 + while True: + line = f.readline() + raw = line.strip('\n').split() + for val in raw: + if (np.int(val)) != 0: + laycbd[d] = 1 + d += 1 + if d == nlay: + break + if d == nlay: + break + # dataset 3 -- delr + if model.verbose: + print(' loading delr...') + delr = Util2d.load(f, model, (ncol,), np.float32, 'delr', + ext_unit_dict) + # dataset 4 -- delc + if model.verbose: + print(' loading delc...') + delc = Util2d.load(f, model, (nrow,), np.float32, 'delc', + ext_unit_dict) + # dataset 5 -- top + if model.verbose: + print(' loading top...') + top = Util2d.load(f, model, (nrow, ncol), np.float32, 'top', + ext_unit_dict) + # dataset 6 -- botm + ncbd = laycbd.sum() + if model.verbose: + print(' loading botm...') + print(' for {} layers and '.format(nlay) + + '{} confining beds'.format(ncbd)) + if nlay > 1: + botm = Util3d.load(f, model, (nlay + ncbd, nrow, ncol), np.float32, + 'botm', ext_unit_dict) + else: + botm = Util3d.load(f, model, (nlay, nrow, ncol), np.float32, + 'botm', + ext_unit_dict) + # dataset 7 -- stress period info + if model.verbose: + print(' loading stress period data...') + print(' for {} stress periods'.format(nper)) + perlen = [] + nstp = [] + tsmult = [] + steady = [] + for k in range(nper): + line = f.readline() + a1, a2, a3, a4 = line_parse(line)[0:4] + a1 = float(a1) + a2 = int(a2) + a3 = float(a3) + if a4.upper() == 'TR': + a4 = False + else: + a4 = True + perlen.append(a1) + nstp.append(a2) + tsmult.append(a3) + steady.append(a4) + + if openfile: + f.close() + + # set package unit number + unitnumber = None + filenames = [None] + if ext_unit_dict is not None: + unitnumber, filenames[0] = \ + model.get_ext_dict_attr(ext_unit_dict, + filetype=ModflowDis.ftype()) + + # create dis object instance + dis = ModflowDis(model, nlay=nlay, nrow=nrow, ncol=ncol, nper=nper, + delr=delr, delc=delc, laycbd=laycbd, + top=top, botm=botm, + perlen=perlen, nstp=nstp, tsmult=tsmult, + steady=steady, itmuni=itmuni, lenuni=lenuni, + xul=xul, yul=yul, rotation=rotation, + proj4_str=proj4_str, start_datetime=start_datetime, + unitnumber=unitnumber, filenames=filenames) + if check: + dis.check(f='{}.chk'.format(dis.name[0]), + verbose=dis.parent.verbose, level=0) + # return dis object instance + return dis + + @staticmethod + def ftype(): + return 'DIS' + + @staticmethod + def defaultunit(): + return 11 + + +def get_layer(dis, i, j, elev): + """Return the layers for elevations at i, j locations. + + Parameters + ---------- + dis : flopy.modflow.ModflowDis object + i : scaler or sequence + row index (zero-based) + j : scaler or sequence + column index + elev : scaler or sequence + elevation (in same units as model) + + Returns + ------- + k : np.ndarray (1-D) or scalar + zero-based layer index + """ + + def to_array(arg): + if not isinstance(arg, np.ndarray): + return np.array([arg]) + else: + return arg + + i = to_array(i) + j = to_array(j) + elev = to_array(elev) + botms = dis.botm.array[:, i, j].tolist() + layers = np.sum(((botms - elev) > 0), axis=0) + # force elevations below model bottom into bottom layer + layers[layers > dis.nlay - 1] = dis.nlay - 1 + layers = np.atleast_1d(np.squeeze(layers)) + if len(layers) == 1: + layers = layers[0] + return layers diff --git a/flopy/modflow/mfdisu.py b/flopy/modflow/mfdisu.py index dfc94344de..a14d423301 100644 --- a/flopy/modflow/mfdisu.py +++ b/flopy/modflow/mfdisu.py @@ -1,1030 +1,1030 @@ -""" -mfdisu module. Contains the ModflowDisU class. Note that the user can access -the ModflowDisU class as `flopy.modflow.ModflowDisU`. - -""" - -import sys -import numpy as np -from ..pakbase import Package -from ..utils import Util2d, Util3d, read1d - -ITMUNI = {"u": 0, "s": 1, "m": 2, "h": 3, "d": 4, "y": 5} -LENUNI = {"u": 0, "f": 1, "m": 2, "c": 3} - - -class ModflowDisU(Package): - """ - MODFLOW Unstructured Discretization Package Class. - - Parameters - ---------- - model : model object - The model object (of type :class:`flopy.modflow.Modflow`) to which - this package will be added. - nodes : int - Number of nodes in the model grid (default is 2). - nlay : int - Number of layers in the model grid (default is 1). - njag : int - Total number of connections of an unstructured grid. njag is used to - dimension the sparse matrix in a compressed row storage format. For - symmetric arrays, only the upper triangle of the matrix may be - entered. For that case, the symmetric portion (minus the diagonal - terms) is dimensioned as njags = (njag - nodes) / 2. - (default is None). - ivsd : int - is the vertical sub-discretization index. For purposes of this flag, - vertical sub-discretization is defined to occur when all layers are - not a stacked representation of each other. - If IVSD = 0 there is no sub-discretization of layers within the model - domain. That is, grids are not nested in the vertical direction. - However, one layer may have a different grid structure from the next - due to different sub-gridding structures within each layer. - If IVSD = 1 there could be sub-discretization of layers with - vertically nested grids (as shown in Figure 5c in the MODFLOW-USG - document) within the domain. For this case, the vertical connection - index IVC is required to determine the vertical connections of every - node. Otherwise, the vertical connections are internally computed and - IVC is not read. - If IVSD = -1 there is no vertical sub-discretization of layers, and - further, the horizontal discretization of all layers is the same. For - this case, the cell areas (AREA) are read only for one layer and are - computed to be the same for all the stacked layers. A structured - finite-difference grid is an example of this condition. - (default is 0). - nper : int - Number of model stress periods (the default is 1). - itmuni : int - Time units, default is days (4) - lenuni : int - Length units, default is meters (2) - idsymrd : int - A flag indicating if the finite-volume connectivity information of an - unstructured grid is input as a full matrix or as a symmetric matrix - in the input file. - If idsymrd is 0 the finite-volume connectivity information is provided - for the full matrix of the porous matrix grid-block connections of an - unstructured grid. The code internally stores only the symmetric - portion of this information. This input structure (IDSYMRD=0) is easy - to organize but contains unwanted information which is parsed out - when the information is stored. - If idsymrd is 1 then finite-volume connectivity information is - provided only for the upper triangular portion of the porous matrix - grid-block connections within the unstructured grid. This input - structure (IDSYMRD=1) is compact but is slightly more complicated to - organize. Only the non-zero upper triangular items of each row are - read in sequence for all symmetric matrices. - (default is 0). - laycbd : int or array of ints (nlay), optional - An array of flags indicating whether or not a layer has a Quasi-3D - confining bed below it. 0 indicates no confining bed, and not zero - indicates a confining bed. LAYCBD for the bottom layer must be 0. (the - default is 0) - nodelay : int or array of ints (nlay) - The number of cells in each layer. (the default is None, which means - the number of cells in a layer is equal to nodes / nlay). - top : float or array of floats (nodes), optional - An array of the top elevation for every cell. For the situation in - which the top layer represents a water-table aquifer, it may be - reasonable to set Top equal to land-surface elevation (the default is - 1.0) - bot : float or array of floats (nodes), optional - An array of the bottom elevation for each model cell (the default is - 0.) - area : float or array of floats - Surface area for model cells. Area is for only one layer if IVSD = -1 - to indicate that the grid is vertically stacked. Otherwise, area is - required for each layer in the model grid. Note that there may be - different number of nodes per layer (ndslay) for an unstructured grid. - (default is 1.0) - iac : array of integers - is a vector indicating the number of connections plus 1 for each - node. Note that the IAC array is only supplied for the GWF cells; - the IAC array is internally expanded to include CLN or GNC nodes if - they are present in a simulation. - (default is None. iac must be provided). - ja : array of integers - is a list of cell number (n) followed by its connecting cell numbers - (m) for each of the m cells connected to cell n. This list is - sequentially provided for the first to the last GWF cell. Note that - the cell and its connections are only supplied for the GWF cells and - their connections to the other GWF cells. This connectivity is - internally expanded if CLN or GNC nodes are present in a simulation. - Also note that the JA list input may be chopped up to have every node - number and its connectivity list on a separate line for ease in - readability of the file. To further ease readability of the file, the - node number of the cell whose connectivity is subsequently listed, - may be expressed as a negative number the sign of which is - subsequently corrected by the code. - (default is None. ja must be provided). - ivc : int or array of integers - is an index array indicating the direction between a node n and all - its m connections. IVC = 0 if the connection between n and m is - horizontal. IVC = 1 if the connecting node m is vertically oriented - to node n. Note that if the CLN Process is active, the connection - between two CLN cells has IVC = 2 and the connection between a CLN - cell and a GWF cell has IVC = 3. - (default is None. ivc must be provided if ivsd = 1) - cl1 : float or array of floats - is the perpendicular length between the center of a node (node 1) and - the interface between the node and its adjoining node (node 2). - (default is None. cl1 and cl2 must be specified, or cl12 must be - specified) - cl2 : float or array of floats - is the perpendicular length between node 2 and the interface between - nodes 1 and 2, and is at the symmetric location of CL1. - (default is None. cl1 and cl2 must be specified, or cl12 must be - specified) - cl12 : float or array of floats - is the array containing CL1 and CL2 lengths, where CL1 is the - perpendicular length between the center of a node (node 1) and the - interface between the node and its adjoining node (node 2). CL2, - which is the perpendicular length between node 2 and the interface - between nodes 1 and 2 is at the symmetric location of CL1. The array - CL12 reads both CL1 and CL2 in the upper and lower triangular - portions of the matrix respectively. Note that the CL1 and CL2 arrays - are only supplied for the GWF cell connections and are internally - expanded if CLN or GNC nodes exist in a simulation. - (default is None. cl1 and cl2 must be specified, or cl12 must be - specified) - fahl : float or arry of floats - Area of the interface Anm between nodes n and m. - (default is None. fahl must be specified.) - perlen : float or array of floats (nper) - An array of the stress period lengths. - nstp : int or array of ints (nper) - Number of time steps in each stress period (default is 1). - tsmult : float or array of floats (nper) - Time step multiplier (default is 1.0). - steady : boolean or array of boolean (nper) - true or False indicating whether or not stress period is steady state - (default is True). - extension : string - Filename extension (default is 'dis') - unitnumber : int - File unit number (default is None). - filenames : str or list of str - Filenames to use for the package. If filenames=None the package name - will be created using the model name and package extension. If a - single string is passed the package will be set to the string. - Default is None. - - - Attributes - ---------- - heading : str - Text string written to top of package input file. - - Methods - ------- - - See Also - -------- - - Notes - ----- - Does not work yet for multi-layer USG models because top and bot cannot - be u3d instances until u3d is modified to handle multiple u2d instances - of different size. - - Examples - -------- - - >>> import flopy - >>> m = flopy.modflow.Modflow() - >>> disu = flopy.modflow.ModflowDisU(m) - - """ - - def __init__(self, model, nodes=2, nlay=1, njag=None, ivsd=0, nper=1, - itmuni=4, lenuni=2, idsymrd=0, laycbd=0, nodelay=None, - top=1, bot=0, area=1.0, iac=None, ja=None, ivc=None, - cl1=None, cl2=None, cl12=None, fahl=None, perlen=1, nstp=1, - tsmult=1, steady=True, extension='disu', - unitnumber=None, filenames=None, start_datetime="1/1/1970"): - - # set default unit number of one is not specified - if unitnumber is None: - unitnumber = ModflowDisU.defaultunit() - - # set filenames - if filenames is None: - filenames = [None] - elif isinstance(filenames, str): - filenames = [filenames] - - # Fill namefile items - name = [ModflowDisU.ftype()] - units = [unitnumber] - extra = [''] - - # set package name - fname = [filenames[0]] - - # Call ancestor's init to set self.parent, extension, name and unit number - Package.__init__(self, model, extension=extension, name=name, - unit_number=units, extra=extra, filenames=fname) - - # Set values of all parameters - self.url = 'dis.htm' - self.heading = '# {} package for '.format(self.name[0]) + \ - ' {}, '.format(model.version_types[model.version]) + \ - 'generated by Flopy.' - - self.nodes = nodes - self.nlay = nlay - self.njag = njag - self.ivsd = ivsd - self.nper = nper - try: - self.itmuni = int(itmuni) - except: - self.itmuni = ITMUNI[itmuni.lower()[0]] - try: - self.lenuni = int(lenuni) - except: - self.lenuni = LENUNI[lenuni.lower()[0]] - self.idsymrd = idsymrd - - # LAYCBD - self.laycbd = Util2d(model, (self.nlay,), np.int32, laycbd, - name='laycbd') - self.laycbd[-1] = 0 # bottom layer must be zero - - # NODELAY - if nodelay is None: - npl = int(nodes / nlay) - nodelay = [] - for k in range(self.nlay): - nodelay.append(npl) - self.nodelay = Util2d(model, (self.nlay,), np.int32, nodelay, - name='nodelay', locat=self.unit_number[0]) - - # set ncol and nrow for array readers - nrow = None - ncol = self.nodelay.array[:] - - # Top and bot are both 1d arrays of size nodes - self.top = Util3d(model, (nlay, nrow, ncol), np.float32, top, - name='top', - locat=self.unit_number[0]) - self.bot = Util3d(model, (nlay, nrow, ncol), np.float32, bot, - name='bot', - locat=self.unit_number[0]) - - # Area is Util2d if ivsd == -1, otherwise it is Util3d - if ivsd == -1: - self.area = Util2d(model, (self.nodelay[0],), np.float32, area, - 'area', locat=self.unit_number[0]) - else: - self.area = Util3d(model, (nlay, nrow, ncol), np.float32, area, - name='area', locat=self.unit_number[0]) - - # Connectivity and ivc - if iac is None: - raise Exception('iac must be provided') - self.iac = Util2d(model, (self.nodes,), np.int32, - iac, name='iac', locat=self.unit_number[0]) - assert self.iac.array.sum() == njag, 'The sum of iac must equal njag.' - if ja is None: - raise Exception('ja must be provided') - self.ja = Util2d(model, (self.njag,), np.int32, - ja, name='ja', locat=self.unit_number[0]) - self.ivc = None - if self.ivsd == 1: - if ivc is None: - raise Exception('ivc must be provided if ivsd is 1.') - self.ivc = Util2d(model, (self.njag,), np.int32, - ivc, name='ivc', locat=self.unit_number[0]) - - # Connection lengths - if idsymrd == 1: - njags = int((njag - nodes) / 2) - if cl1 is None: - raise Exception('idsymrd is 1 but cl1 was not specified.') - if cl2 is None: - raise Exception('idsymrd is 1 but cl2 was not specified.') - self.cl1 = Util2d(model, (njags,), np.float32, - cl1, name='cl1', locat=self.unit_number[0]) - self.cl2 = Util2d(model, (njags,), np.float32, - cl2, name='cl2', locat=self.unit_number[0]) - - if idsymrd == 0: - if cl12 is None: - raise Exception('idsymrd is 0 but cl12 was not specified') - self.cl12 = Util2d(model, (self.njag,), np.float32, - cl12, name='cl12', locat=self.unit_number[0]) - - # Flow area (set size of array to njag or njags depending on idsymrd) - if fahl is None: - raise Exception('fahl must be provided') - if idsymrd == 1: - n = njags - elif idsymrd == 0: - n = self.njag - self.fahl = Util2d(model, (n,), np.float32, - fahl, name='fahl', locat=self.unit_number[0]) - - # Stress period information - self.perlen = Util2d(model, (self.nper,), np.float32, perlen, - name='perlen') - self.nstp = Util2d(model, (self.nper,), np.int32, nstp, name='nstp') - self.tsmult = Util2d(model, (self.nper,), np.float32, tsmult, - name='tsmult') - self.steady = Util2d(model, (self.nper,), np.bool, - steady, name='steady') - - self.itmuni_dict = {0: "undefined", 1: "seconds", 2: "minutes", - 3: "hours", 4: "days", 5: "years"} - - # self.sr = reference.SpatialReference(self.delr.array, self.delc.array, - # self.lenuni, xul=xul, - # yul=yul, rotation=rotation) - self.start_datetime = start_datetime - - # calculate layer thicknesses - self.__calculate_thickness() - - # Add package and return - self.parent.add_package(self) - return - - def __calculate_thickness(self): - # set ncol and nrow for array readers - nrow = None - ncol = self.nodelay.array - nlay = self.nlay - thk = [] - for k in range(self.nlay): - thk.append(self.top[k] - self.bot[k]) - self.__thickness = Util3d(self.parent, (nlay, nrow, ncol), - np.float32, thk, name='thickness') - return - - @property - def thickness(self): - """ - Get a Util2d array of cell thicknesses. - - Returns - ------- - thickness : util2d array of floats (nodes,) - - """ - return self.__thickness - - def checklayerthickness(self): - """ - Check layer thickness. - - """ - return (self.thickness > 0).all() - - def get_cell_volumes(self): - """ - Get an array of cell volumes. - - Returns - ------- - vol : array of floats (nodes) - - """ - vol = np.empty((self.nodes)) - for n in range(self.nodes): - nn = n - if self.ivsd == -1: - nn = n % self.nodelay[0] - area = self.area[nn] - vol[n] = area * (self.top[n] - self.bot[n]) - return vol - - @property - def zcentroids(self): - """ - Return an array of size nodes that contains the vertical cell center - elevation. - - """ - z = np.empty((self.nodes)) - z[:] = (self.top[:] - self.bot[:]) / 2. - return z - - @property - def ncpl(self): - return self.nodes / self.nlay - - @staticmethod - def load(f, model, ext_unit_dict=None, check=False): - """ - Load an existing package. - - Parameters - ---------- - f : filename or file handle - File to load. - model : model object - The model object (of type :class:`flopy.modflow.mf.Modflow`) to - which this package will be added. - ext_unit_dict : dictionary, optional - If the arrays in the file are specified using EXTERNAL, - or older style array control records, then `f` should be a file - handle. In this case ext_unit_dict is required, which can be - constructed using the function - :class:`flopy.utils.mfreadnam.parsenamefile`. - check : boolean - Check package data for common errors. (default False; not setup yet) - - Returns - ------- - dis : ModflowDisU object - ModflowDisU object. - - Examples - -------- - - >>> import flopy - >>> m = flopy.modflow.Modflow() - >>> disu = flopy.modflow.ModflowDisU.load('test.disu', m) - - """ - - if model.verbose: - sys.stdout.write('loading disu package file...\n') - - if model.version != 'mfusg': - msg = "Warning: model version was reset from " + \ - "'{}' to 'mfusg' in order to load a DISU file".format( - model.version) - print(msg) - model.version = 'mfusg' - - openfile = not hasattr(f, 'read') - if openfile: - filename = f - f = open(filename, 'r') - - # dataset 0 -- header - while True: - line = f.readline() - if line[0] != '#': - break - - # dataset 1 - if model.verbose: - print(' loading NODES, NLAY, NJAG, IVSD, NPER, ITMUNI, LENUNI,' - ' IDSYMRD...') - ll = line.strip().split() - nodes = int(ll.pop(0)) - nlay = int(ll.pop(0)) - njag = int(ll.pop(0)) - ivsd = int(ll.pop(0)) - nper = int(ll.pop(0)) - # mimic urword behavior in case these values aren't present on line - if len(ll) > 0: - itmuni = int(ll.pop(0)) - else: - itmuni = 0 - if len(ll) > 0: - lenuni = int(ll.pop(0)) - else: - lenuni = 0 - if len(ll) > 0: - idsymrd = int(ll.pop(0)) - else: - idsymrd = 0 - if model.verbose: - print(' NODES {}'.format(nodes)) - print(' NLAY {}'.format(nlay)) - print(' NJAG {}'.format(njag)) - print(' IVSD {}'.format(ivsd)) - print(' NPER {}'.format(nper)) - print(' ITMUNI {}'.format(itmuni)) - print(' LENUNI {}'.format(lenuni)) - print(' IDSYMRD {}'.format(idsymrd)) - - # Calculate njags - njags = int((njag - nodes) / 2) - if model.verbose: - print(' NJAGS calculated as {}'.format(njags)) - - # dataset 2 -- laycbd - if model.verbose: - print(' loading LAYCBD...') - laycbd = np.empty((nlay,), np.int32) - laycbd = read1d(f, laycbd) - if model.verbose: - print(' LAYCBD {}'.format(laycbd)) - - # dataset 3 -- nodelay - if model.verbose: - print(' loading NODELAY...') - nodelay = Util2d.load(f, model, (nlay,), np.int32, 'nodelay', - ext_unit_dict) - if model.verbose: - print(' NODELAY {}'.format(nodelay)) - - # dataset 4 -- top - if model.verbose: - print(' loading TOP...') - top = [0] * nlay - for k in range(nlay): - tpk = Util2d.load(f, model, (nodelay[k],), np.float32, 'top', - ext_unit_dict) - top[k] = tpk - if model.verbose: - for k, tpk in enumerate(top): - print(' TOP layer {}: {}'.format(k, tpk.array)) - - # dataset 5 -- bot - if model.verbose: - print(' loading BOT...') - bot = [0] * nlay - for k in range(nlay): - btk = Util2d.load(f, model, (nodelay[k],), np.float32, 'btk', - ext_unit_dict) - bot[k] = btk - if model.verbose: - for k, btk in enumerate(bot): - print(' BOT layer {}: {}'.format(k, btk.array)) - - # dataset 6 -- area - if model.verbose: - print(' loading AREA...') - if ivsd == -1: - area = Util2d.load(f, model, (nodelay[0],), np.float32, 'area', - ext_unit_dict) - else: - area = [0] * nlay - for k in range(nlay): - ak = Util2d.load(f, model, (nodelay[k],), np.float32, 'ak', - ext_unit_dict) - area[k] = ak - if model.verbose: - for k, ak in enumerate(area): - print(' AREA layer {}: {}'.format(k, ak)) - - # dataset 7 -- iac - if model.verbose: - print(' loading IAC...') - iac = Util2d.load(f, model, (nodes,), np.int32, 'iac', ext_unit_dict) - if model.verbose: - print(' IAC {}'.format(iac)) - - # dataset 8 -- ja - if model.verbose: - print(' loading JA...') - ja = Util2d.load(f, model, (njag,), np.int32, 'ja', ext_unit_dict) - if model.verbose: - print(' JA {}'.format(ja)) - - # dataset 9 -- ivc - ivc = None - if ivsd == 1: - if model.verbose: - print(' loading IVC...') - ivc = Util2d.load(f, model, (njag,), np.int32, 'ivc', - ext_unit_dict) - if model.verbose: - print(' IVC {}'.format(ivc)) - - # dataset 10a -- cl1 - cl1 = None - if idsymrd == 1: - if model.verbose: - print(' loading CL1...') - cl1 = Util2d.load(f, model, (njags,), np.float32, 'cl1', - ext_unit_dict) - if model.verbose: - print(' CL1 {}'.format(cl1)) - - # dataset 10b -- cl2 - cl2 = None - if idsymrd == 1: - if model.verbose: - print(' loading CL2...') - cl2 = Util2d.load(f, model, (njags,), np.float32, 'cl2', - ext_unit_dict) - if model.verbose: - print(' CL2 {}'.format(cl2)) - - # dataset 11 -- cl12 - cl12 = None - if idsymrd == 0: - if model.verbose: - print(' loading CL12...') - cl12 = Util2d.load(f, model, (njag,), np.float32, 'cl12', - ext_unit_dict) - if model.verbose: - print(' CL12 {}'.format(cl12)) - - # dataset 12 -- fahl - fahl = None - if idsymrd == 0: - n = njag - elif idsymrd == 1: - n = njags - if model.verbose: - print(' loading FAHL...') - fahl = Util2d.load(f, model, (n,), np.float32, 'fahl', ext_unit_dict) - if model.verbose: - print(' FAHL {}'.format(fahl)) - - # dataset 7 -- stress period info - if model.verbose: - print(' loading stress period data...') - perlen = [] - nstp = [] - tsmult = [] - steady = [] - for k in range(nper): - line = f.readline() - a1, a2, a3, a4 = line.strip().split()[0:4] - a1 = float(a1) - a2 = int(a2) - a3 = float(a3) - if a4.upper() == 'TR': - a4 = False - else: - a4 = True - perlen.append(a1) - nstp.append(a2) - tsmult.append(a3) - steady.append(a4) - if model.verbose: - print(' PERLEN {}'.format(perlen)) - print(' NSTP {}'.format(nstp)) - print(' TSMULT {}'.format(tsmult)) - print(' STEADY {}'.format(steady)) - - if openfile: - f.close() - - # set package unit number - unitnumber = None - filenames = [None] - if ext_unit_dict is not None: - unitnumber, filenames[0] = \ - model.get_ext_dict_attr(ext_unit_dict, - filetype=ModflowDisU.ftype()) - - # create dis object instance - disu = ModflowDisU(model, nodes=nodes, nlay=nlay, njag=njag, ivsd=ivsd, - nper=nper, itmuni=itmuni, lenuni=lenuni, - idsymrd=idsymrd, laycbd=laycbd, nodelay=nodelay, - top=top, bot=bot, area=area, iac=iac, ja=ja, - ivc=ivc, cl1=cl1, cl2=cl2, cl12=cl12, fahl=fahl, - perlen=perlen, nstp=nstp, tsmult=tsmult, - steady=steady, unitnumber=unitnumber, - filenames=filenames) - - # return dis object instance - return disu - - def write_file(self): - """ - Write the package file. - - Returns - ------- - None - - """ - # Open file for writing - f_dis = open(self.fn_path, 'w') - - # Item 0: heading - f_dis.write('{0:s}\n'.format(self.heading)) - - # Item 1: NODES NLAY NJAG IVSD NPER ITMUNI LENUNI IDSYMRD - s = '' - for var in [self.nodes, self.nlay, self.njag, self.ivsd, self.nper, - self.itmuni, self.lenuni, self.idsymrd]: - s += '{} '.format(var) - f_dis.write(s + '\n') - - # Item 2: LAYCBD - for k in range(self.nlay): - f_dis.write('{0:3d}'.format(self.laycbd[k])) - f_dis.write('\n') - - # Item 3: NODELAY - f_dis.write(self.nodelay.get_file_entry()) - - # Item 4: TOP - f_dis.write(self.top.get_file_entry()) - - # Item 5: BOT - f_dis.write(self.bot.get_file_entry()) - - # Item 6: AREA - f_dis.write(self.area.get_file_entry()) - - # Item 7: IAC - f_dis.write(self.iac.get_file_entry()) - - # Item 8: JA - f_dis.write(self.ja.get_file_entry()) - - # Item 9: IVC - if self.ivsd == 1: - f_dis.write(self.ivc.get_file_entry()) - - # Item 10a: CL1 - if self.idsymrd == 1: - f_dis.write(self.cl1.get_file_entry()) - - # Item 10b: CL2 - if self.idsymrd == 1: - f_dis.write(self.cl2.get_file_entry()) - - # Item 11: CL12 - if self.idsymrd == 0: - f_dis.write(self.cl12.get_file_entry()) - - # Item 12: FAHL - f_dis.write(self.fahl.get_file_entry()) - - # Item 13: NPER, NSTP, TSMULT, Ss/tr - for t in range(self.nper): - f_dis.write('{0:14f}{1:14d}{2:10f} '.format(self.perlen[t], - self.nstp[t], - self.tsmult[t])) - if self.steady[t]: - f_dis.write(' {0:3s}\n'.format('SS')) - else: - f_dis.write(' {0:3s}\n'.format('TR')) - - # Close and return - f_dis.close() - return - - @staticmethod - def ftype(): - return 'DISU' - - @staticmethod - def defaultunit(): - return 11 - - # def get_node_coordinates(self): - # """ - # Get y, x, and z cell centroids. - # - # Returns - # ------- - # y : list of cell y-centroids - # - # x : list of cell x-centroids - # - # z : array of floats (nlay, nrow, ncol) - # """ - # # In row direction - # y = np.empty((self.nrow)) - # for r in range(self.nrow): - # if (r == 0): - # y[r] = self.delc[r] / 2. - # else: - # y[r] = y[r - 1] + (self.delc[r] + self.delc[r - 1]) / 2. - # # Invert y to convert to a cartesian coordinate system - # y = y[::-1] - # # In column direction - # x = np.empty((self.ncol)) - # for c in range(self.ncol): - # if (c == 0): - # x[c] = self.delr[c] / 2. - # else: - # x[c] = x[c - 1] + (self.delr[c] + self.delr[c - 1]) / 2. - # # In layer direction - # z = np.empty((self.nlay, self.nrow, self.ncol)) - # for l in range(self.nlay): - # if (l == 0): - # z[l, :, :] = (self.top[:, :] + self.botm[l, :, :]) / 2. - # else: - # z[l, :, :] = (self.botm[l - 1, :, :] + self.botm[l, :, :]) / 2. - # return y, x, z - # - # def get_lrc(self, nodes): - # """ - # Get layer, row, column from a list of MODFLOW node numbers. - # - # Returns - # ------- - # v : list of tuples containing the layer (k), row (i), - # and column (j) for each node in the input list - # """ - # if not isinstance(nodes, list): - # nodes = [nodes] - # nrc = self.nrow * self.ncol - # v = [] - # for node in nodes: - # k = int(node / nrc) - # if (k * nrc) < node: - # k += 1 - # ij = int(node - (k - 1) * nrc) - # i = int(ij / self.ncol) - # if (i * self.ncol) < ij: - # i += 1 - # j = ij - (i - 1) * self.ncol - # v.append((k, i, j)) - # return v - # - # def get_node(self, lrc_list): - # """ - # Get node number from a list of MODFLOW layer, row, column tuples. - # - # Returns - # ------- - # v : list of MODFLOW nodes for each layer (k), row (i), - # and column (j) tuple in the input list - # """ - # if not isinstance(lrc_list, list): - # lrc_list = [lrc_list] - # nrc = self.nrow * self.ncol - # v = [] - # for [k, i, j] in lrc_list: - # node = int(((k - 1) * nrc) + ((i - 1) * self.ncol) + j) - # v.append(node) - # return v - # - # def read_from_cnf(self, cnf_file_name, n_per_line=0): - # """ - # Read discretization information from an MT3D configuration file. - # - # """ - # - # def getn(ii, jj): - # if (jj == 0): - # n = 1 - # else: - # n = int(ii / jj) - # if (ii % jj != 0): - # n = n + 1 - # - # return n - # - # try: - # f_cnf = open(cnf_file_name, 'r') - # - # # nlay, nrow, ncol - # line = f_cnf.readline() - # s = line.split() - # cnf_nlay = int(s[0]) - # cnf_nrow = int(s[1]) - # cnf_ncol = int(s[2]) - # - # # ncol column widths delr[c] - # line = '' - # for dummy in range(getn(cnf_ncol, n_per_line)): - # line = line + f_cnf.readline() - # cnf_delr = [float(s) for s in line.split()] - # - # # nrow row widths delc[r] - # line = '' - # for dummy in range(getn(cnf_nrow, n_per_line)): - # line = line + f_cnf.readline() - # cnf_delc = [float(s) for s in line.split()] - # - # # nrow * ncol htop[r, c] - # line = '' - # for dummy in range(getn(cnf_nrow * cnf_ncol, n_per_line)): - # line = line + f_cnf.readline() - # cnf_top = [float(s) for s in line.split()] - # cnf_top = np.reshape(cnf_top, (cnf_nrow, cnf_ncol)) - # - # # nlay * nrow * ncol layer thickness dz[l, r, c] - # line = '' - # for dummy in range( - # getn(cnf_nlay * cnf_nrow * cnf_ncol, n_per_line)): - # line = line + f_cnf.readline() - # cnf_dz = [float(s) for s in line.split()] - # cnf_dz = np.reshape(cnf_dz, (cnf_nlay, cnf_nrow, cnf_ncol)) - # - # # cinact, cdry, not used here so commented - # '''line = f_cnf.readline() - # s = line.split() - # cinact = float(s[0]) - # cdry = float(s[1])''' - # - # f_cnf.close() - # finally: - # self.nlay = cnf_nlay - # self.nrow = cnf_nrow - # self.ncol = cnf_ncol - # - # self.delr = Util2d(model, (self.ncol,), np.float32, cnf_delr, - # name='delr', locat=self.unit_number[0]) - # self.delc = Util2d(model, (self.nrow,), np.float32, cnf_delc, - # name='delc', locat=self.unit_number[0]) - # self.top = Util2d(model, (self.nrow, self.ncol), np.float32, - # cnf_top, name='model_top', - # locat=self.unit_number[0]) - # - # cnf_botm = np.empty((self.nlay + sum(self.laycbd), self.nrow, - # self.ncol)) - # - # # First model layer - # cnf_botm[0:, :, :] = cnf_top - cnf_dz[0, :, :] - # # All other layers - # for l in range(1, self.nlay): - # cnf_botm[l, :, :] = cnf_botm[l - 1, :, :] - cnf_dz[l, :, :] - # - # self.botm = Util3d(model, (self.nlay + sum(self.laycbd), - # self.nrow, self.ncol), np.float32, - # cnf_botm, 'botm', - # locat=self.unit_number[0]) - # - # def gettop(self): - # """ - # Get the top array. - # - # Returns - # ------- - # top : array of floats (nrow, ncol) - # """ - # return self.top.array - # - # def getbotm(self, k=None): - # """ - # Get the bottom array. - # - # Returns - # ------- - # botm : array of floats (nlay, nrow, ncol), or - # - # botm : array of floats (nrow, ncol) if k is not none - # """ - # if k is None: - # return self.botm.array - # else: - # return self.botm.array[k, :, :] - # - # def check(self, f=None, verbose=True, level=1): - # """ - # Check dis package data for zero and negative thicknesses. - # - # Parameters - # ---------- - # f : str or file handle - # String defining file name or file handle for summary file - # of check method output. If a sting is passed a file handle - # is created. If f is None, check method does not write - # results to a summary file. (default is None) - # verbose : bool - # Boolean flag used to determine if check method results are - # written to the screen - # level : int - # Check method analysis level. If level=0, summary checks are - # performed. If level=1, full checks are performed. - # - # Returns - # ------- - # None - # - # Examples - # -------- - # - # >>> import flopy - # >>> m = flopy.modflow.Modflow.load('model.nam') - # >>> m.dis.check() - # """ - # if f is not None: - # if isinstance(f, str): - # pth = os.path.join(self.parent.model_ws, f) - # f = open(pth, 'w', 0) - # - # errors = False - # txt = '\n{} PACKAGE DATA VALIDATION:\n'.format(self.name[0]) - # t = '' - # t1 = '' - # inactive = self.parent.bas6.ibound.array == 0 - # # thickness errors - # d = self.thickness.array - # d[inactive] = 1. - # if d.min() <= 0: - # errors = True - # t = '{} ERROR: Negative or zero cell thickness specified.\n'.format( - # t) - # if level > 0: - # idx = np.column_stack(np.where(d <= 0.)) - # t1 = self.level1_arraylist(idx, d, self.thickness.name, t1) - # else: - # t = '{} Specified cell thickness is OK.\n'.format(t) - # - # # add header to level 0 text - # txt += t - # - # if level > 0: - # if errors: - # txt += '\n DETAILED SUMMARY OF {} ERRORS:\n'.format( - # self.name[0]) - # # add level 1 header to level 1 text - # txt += t1 - # - # # write errors to summary file - # if f is not None: - # f.write('{}\n'.format(txt)) - # - # # write errors to stdout - # if verbose: - # print(txt) +""" +mfdisu module. Contains the ModflowDisU class. Note that the user can access +the ModflowDisU class as `flopy.modflow.ModflowDisU`. + +""" + +import sys +import numpy as np +from ..pakbase import Package +from ..utils import Util2d, Util3d, read1d + +ITMUNI = {"u": 0, "s": 1, "m": 2, "h": 3, "d": 4, "y": 5} +LENUNI = {"u": 0, "f": 1, "m": 2, "c": 3} + + +class ModflowDisU(Package): + """ + MODFLOW Unstructured Discretization Package Class. + + Parameters + ---------- + model : model object + The model object (of type :class:`flopy.modflow.Modflow`) to which + this package will be added. + nodes : int + Number of nodes in the model grid (default is 2). + nlay : int + Number of layers in the model grid (default is 1). + njag : int + Total number of connections of an unstructured grid. njag is used to + dimension the sparse matrix in a compressed row storage format. For + symmetric arrays, only the upper triangle of the matrix may be + entered. For that case, the symmetric portion (minus the diagonal + terms) is dimensioned as njags = (njag - nodes) / 2. + (default is None). + ivsd : int + is the vertical sub-discretization index. For purposes of this flag, + vertical sub-discretization is defined to occur when all layers are + not a stacked representation of each other. + If IVSD = 0 there is no sub-discretization of layers within the model + domain. That is, grids are not nested in the vertical direction. + However, one layer may have a different grid structure from the next + due to different sub-gridding structures within each layer. + If IVSD = 1 there could be sub-discretization of layers with + vertically nested grids (as shown in Figure 5c in the MODFLOW-USG + document) within the domain. For this case, the vertical connection + index IVC is required to determine the vertical connections of every + node. Otherwise, the vertical connections are internally computed and + IVC is not read. + If IVSD = -1 there is no vertical sub-discretization of layers, and + further, the horizontal discretization of all layers is the same. For + this case, the cell areas (AREA) are read only for one layer and are + computed to be the same for all the stacked layers. A structured + finite-difference grid is an example of this condition. + (default is 0). + nper : int + Number of model stress periods (the default is 1). + itmuni : int + Time units, default is days (4) + lenuni : int + Length units, default is meters (2) + idsymrd : int + A flag indicating if the finite-volume connectivity information of an + unstructured grid is input as a full matrix or as a symmetric matrix + in the input file. + If idsymrd is 0 the finite-volume connectivity information is provided + for the full matrix of the porous matrix grid-block connections of an + unstructured grid. The code internally stores only the symmetric + portion of this information. This input structure (IDSYMRD=0) is easy + to organize but contains unwanted information which is parsed out + when the information is stored. + If idsymrd is 1 then finite-volume connectivity information is + provided only for the upper triangular portion of the porous matrix + grid-block connections within the unstructured grid. This input + structure (IDSYMRD=1) is compact but is slightly more complicated to + organize. Only the non-zero upper triangular items of each row are + read in sequence for all symmetric matrices. + (default is 0). + laycbd : int or array of ints (nlay), optional + An array of flags indicating whether or not a layer has a Quasi-3D + confining bed below it. 0 indicates no confining bed, and not zero + indicates a confining bed. LAYCBD for the bottom layer must be 0. (the + default is 0) + nodelay : int or array of ints (nlay) + The number of cells in each layer. (the default is None, which means + the number of cells in a layer is equal to nodes / nlay). + top : float or array of floats (nodes), optional + An array of the top elevation for every cell. For the situation in + which the top layer represents a water-table aquifer, it may be + reasonable to set Top equal to land-surface elevation (the default is + 1.0) + bot : float or array of floats (nodes), optional + An array of the bottom elevation for each model cell (the default is + 0.) + area : float or array of floats + Surface area for model cells. Area is for only one layer if IVSD = -1 + to indicate that the grid is vertically stacked. Otherwise, area is + required for each layer in the model grid. Note that there may be + different number of nodes per layer (ndslay) for an unstructured grid. + (default is 1.0) + iac : array of integers + is a vector indicating the number of connections plus 1 for each + node. Note that the IAC array is only supplied for the GWF cells; + the IAC array is internally expanded to include CLN or GNC nodes if + they are present in a simulation. + (default is None. iac must be provided). + ja : array of integers + is a list of cell number (n) followed by its connecting cell numbers + (m) for each of the m cells connected to cell n. This list is + sequentially provided for the first to the last GWF cell. Note that + the cell and its connections are only supplied for the GWF cells and + their connections to the other GWF cells. This connectivity is + internally expanded if CLN or GNC nodes are present in a simulation. + Also note that the JA list input may be chopped up to have every node + number and its connectivity list on a separate line for ease in + readability of the file. To further ease readability of the file, the + node number of the cell whose connectivity is subsequently listed, + may be expressed as a negative number the sign of which is + subsequently corrected by the code. + (default is None. ja must be provided). + ivc : int or array of integers + is an index array indicating the direction between a node n and all + its m connections. IVC = 0 if the connection between n and m is + horizontal. IVC = 1 if the connecting node m is vertically oriented + to node n. Note that if the CLN Process is active, the connection + between two CLN cells has IVC = 2 and the connection between a CLN + cell and a GWF cell has IVC = 3. + (default is None. ivc must be provided if ivsd = 1) + cl1 : float or array of floats + is the perpendicular length between the center of a node (node 1) and + the interface between the node and its adjoining node (node 2). + (default is None. cl1 and cl2 must be specified, or cl12 must be + specified) + cl2 : float or array of floats + is the perpendicular length between node 2 and the interface between + nodes 1 and 2, and is at the symmetric location of CL1. + (default is None. cl1 and cl2 must be specified, or cl12 must be + specified) + cl12 : float or array of floats + is the array containing CL1 and CL2 lengths, where CL1 is the + perpendicular length between the center of a node (node 1) and the + interface between the node and its adjoining node (node 2). CL2, + which is the perpendicular length between node 2 and the interface + between nodes 1 and 2 is at the symmetric location of CL1. The array + CL12 reads both CL1 and CL2 in the upper and lower triangular + portions of the matrix respectively. Note that the CL1 and CL2 arrays + are only supplied for the GWF cell connections and are internally + expanded if CLN or GNC nodes exist in a simulation. + (default is None. cl1 and cl2 must be specified, or cl12 must be + specified) + fahl : float or arry of floats + Area of the interface Anm between nodes n and m. + (default is None. fahl must be specified.) + perlen : float or array of floats (nper) + An array of the stress period lengths. + nstp : int or array of ints (nper) + Number of time steps in each stress period (default is 1). + tsmult : float or array of floats (nper) + Time step multiplier (default is 1.0). + steady : boolean or array of boolean (nper) + true or False indicating whether or not stress period is steady state + (default is True). + extension : string + Filename extension (default is 'dis') + unitnumber : int + File unit number (default is None). + filenames : str or list of str + Filenames to use for the package. If filenames=None the package name + will be created using the model name and package extension. If a + single string is passed the package will be set to the string. + Default is None. + + + Attributes + ---------- + heading : str + Text string written to top of package input file. + + Methods + ------- + + See Also + -------- + + Notes + ----- + Does not work yet for multi-layer USG models because top and bot cannot + be u3d instances until u3d is modified to handle multiple u2d instances + of different size. + + Examples + -------- + + >>> import flopy + >>> m = flopy.modflow.Modflow() + >>> disu = flopy.modflow.ModflowDisU(m) + + """ + + def __init__(self, model, nodes=2, nlay=1, njag=None, ivsd=0, nper=1, + itmuni=4, lenuni=2, idsymrd=0, laycbd=0, nodelay=None, + top=1, bot=0, area=1.0, iac=None, ja=None, ivc=None, + cl1=None, cl2=None, cl12=None, fahl=None, perlen=1, nstp=1, + tsmult=1, steady=True, extension='disu', + unitnumber=None, filenames=None, start_datetime="1/1/1970"): + + # set default unit number of one is not specified + if unitnumber is None: + unitnumber = ModflowDisU.defaultunit() + + # set filenames + if filenames is None: + filenames = [None] + elif isinstance(filenames, str): + filenames = [filenames] + + # Fill namefile items + name = [ModflowDisU.ftype()] + units = [unitnumber] + extra = [''] + + # set package name + fname = [filenames[0]] + + # Call ancestor's init to set self.parent, extension, name and unit number + Package.__init__(self, model, extension=extension, name=name, + unit_number=units, extra=extra, filenames=fname) + + # Set values of all parameters + self.url = 'dis.htm' + self.heading = '# {} package for '.format(self.name[0]) + \ + ' {}, '.format(model.version_types[model.version]) + \ + 'generated by Flopy.' + + self.nodes = nodes + self.nlay = nlay + self.njag = njag + self.ivsd = ivsd + self.nper = nper + try: + self.itmuni = int(itmuni) + except: + self.itmuni = ITMUNI[itmuni.lower()[0]] + try: + self.lenuni = int(lenuni) + except: + self.lenuni = LENUNI[lenuni.lower()[0]] + self.idsymrd = idsymrd + + # LAYCBD + self.laycbd = Util2d(model, (self.nlay,), np.int32, laycbd, + name='laycbd') + self.laycbd[-1] = 0 # bottom layer must be zero + + # NODELAY + if nodelay is None: + npl = int(nodes / nlay) + nodelay = [] + for k in range(self.nlay): + nodelay.append(npl) + self.nodelay = Util2d(model, (self.nlay,), np.int32, nodelay, + name='nodelay', locat=self.unit_number[0]) + + # set ncol and nrow for array readers + nrow = None + ncol = self.nodelay.array[:] + + # Top and bot are both 1d arrays of size nodes + self.top = Util3d(model, (nlay, nrow, ncol), np.float32, top, + name='top', + locat=self.unit_number[0]) + self.bot = Util3d(model, (nlay, nrow, ncol), np.float32, bot, + name='bot', + locat=self.unit_number[0]) + + # Area is Util2d if ivsd == -1, otherwise it is Util3d + if ivsd == -1: + self.area = Util2d(model, (self.nodelay[0],), np.float32, area, + 'area', locat=self.unit_number[0]) + else: + self.area = Util3d(model, (nlay, nrow, ncol), np.float32, area, + name='area', locat=self.unit_number[0]) + + # Connectivity and ivc + if iac is None: + raise Exception('iac must be provided') + self.iac = Util2d(model, (self.nodes,), np.int32, + iac, name='iac', locat=self.unit_number[0]) + assert self.iac.array.sum() == njag, 'The sum of iac must equal njag.' + if ja is None: + raise Exception('ja must be provided') + self.ja = Util2d(model, (self.njag,), np.int32, + ja, name='ja', locat=self.unit_number[0]) + self.ivc = None + if self.ivsd == 1: + if ivc is None: + raise Exception('ivc must be provided if ivsd is 1.') + self.ivc = Util2d(model, (self.njag,), np.int32, + ivc, name='ivc', locat=self.unit_number[0]) + + # Connection lengths + if idsymrd == 1: + njags = int((njag - nodes) / 2) + if cl1 is None: + raise Exception('idsymrd is 1 but cl1 was not specified.') + if cl2 is None: + raise Exception('idsymrd is 1 but cl2 was not specified.') + self.cl1 = Util2d(model, (njags,), np.float32, + cl1, name='cl1', locat=self.unit_number[0]) + self.cl2 = Util2d(model, (njags,), np.float32, + cl2, name='cl2', locat=self.unit_number[0]) + + if idsymrd == 0: + if cl12 is None: + raise Exception('idsymrd is 0 but cl12 was not specified') + self.cl12 = Util2d(model, (self.njag,), np.float32, + cl12, name='cl12', locat=self.unit_number[0]) + + # Flow area (set size of array to njag or njags depending on idsymrd) + if fahl is None: + raise Exception('fahl must be provided') + if idsymrd == 1: + n = njags + elif idsymrd == 0: + n = self.njag + self.fahl = Util2d(model, (n,), np.float32, + fahl, name='fahl', locat=self.unit_number[0]) + + # Stress period information + self.perlen = Util2d(model, (self.nper,), np.float32, perlen, + name='perlen') + self.nstp = Util2d(model, (self.nper,), np.int32, nstp, name='nstp') + self.tsmult = Util2d(model, (self.nper,), np.float32, tsmult, + name='tsmult') + self.steady = Util2d(model, (self.nper,), np.bool, + steady, name='steady') + + self.itmuni_dict = {0: "undefined", 1: "seconds", 2: "minutes", + 3: "hours", 4: "days", 5: "years"} + + # self.sr = reference.SpatialReference(self.delr.array, self.delc.array, + # self.lenuni, xul=xul, + # yul=yul, rotation=rotation) + self.start_datetime = start_datetime + + # calculate layer thicknesses + self.__calculate_thickness() + + # Add package and return + self.parent.add_package(self) + return + + def __calculate_thickness(self): + # set ncol and nrow for array readers + nrow = None + ncol = self.nodelay.array + nlay = self.nlay + thk = [] + for k in range(self.nlay): + thk.append(self.top[k] - self.bot[k]) + self.__thickness = Util3d(self.parent, (nlay, nrow, ncol), + np.float32, thk, name='thickness') + return + + @property + def thickness(self): + """ + Get a Util2d array of cell thicknesses. + + Returns + ------- + thickness : util2d array of floats (nodes,) + + """ + return self.__thickness + + def checklayerthickness(self): + """ + Check layer thickness. + + """ + return (self.thickness > 0).all() + + def get_cell_volumes(self): + """ + Get an array of cell volumes. + + Returns + ------- + vol : array of floats (nodes) + + """ + vol = np.empty((self.nodes)) + for n in range(self.nodes): + nn = n + if self.ivsd == -1: + nn = n % self.nodelay[0] + area = self.area[nn] + vol[n] = area * (self.top[n] - self.bot[n]) + return vol + + @property + def zcentroids(self): + """ + Return an array of size nodes that contains the vertical cell center + elevation. + + """ + z = np.empty((self.nodes)) + z[:] = (self.top[:] - self.bot[:]) / 2. + return z + + @property + def ncpl(self): + return self.nodes / self.nlay + + @staticmethod + def load(f, model, ext_unit_dict=None, check=False): + """ + Load an existing package. + + Parameters + ---------- + f : filename or file handle + File to load. + model : model object + The model object (of type :class:`flopy.modflow.mf.Modflow`) to + which this package will be added. + ext_unit_dict : dictionary, optional + If the arrays in the file are specified using EXTERNAL, + or older style array control records, then `f` should be a file + handle. In this case ext_unit_dict is required, which can be + constructed using the function + :class:`flopy.utils.mfreadnam.parsenamefile`. + check : boolean + Check package data for common errors. (default False; not setup yet) + + Returns + ------- + dis : ModflowDisU object + ModflowDisU object. + + Examples + -------- + + >>> import flopy + >>> m = flopy.modflow.Modflow() + >>> disu = flopy.modflow.ModflowDisU.load('test.disu', m) + + """ + + if model.verbose: + sys.stdout.write('loading disu package file...\n') + + if model.version != 'mfusg': + msg = "Warning: model version was reset from " + \ + "'{}' to 'mfusg' in order to load a DISU file".format( + model.version) + print(msg) + model.version = 'mfusg' + + openfile = not hasattr(f, 'read') + if openfile: + filename = f + f = open(filename, 'r') + + # dataset 0 -- header + while True: + line = f.readline() + if line[0] != '#': + break + + # dataset 1 + if model.verbose: + print(' loading NODES, NLAY, NJAG, IVSD, NPER, ITMUNI, LENUNI,' + ' IDSYMRD...') + ll = line.strip().split() + nodes = int(ll.pop(0)) + nlay = int(ll.pop(0)) + njag = int(ll.pop(0)) + ivsd = int(ll.pop(0)) + nper = int(ll.pop(0)) + # mimic urword behavior in case these values aren't present on line + if len(ll) > 0: + itmuni = int(ll.pop(0)) + else: + itmuni = 0 + if len(ll) > 0: + lenuni = int(ll.pop(0)) + else: + lenuni = 0 + if len(ll) > 0: + idsymrd = int(ll.pop(0)) + else: + idsymrd = 0 + if model.verbose: + print(' NODES {}'.format(nodes)) + print(' NLAY {}'.format(nlay)) + print(' NJAG {}'.format(njag)) + print(' IVSD {}'.format(ivsd)) + print(' NPER {}'.format(nper)) + print(' ITMUNI {}'.format(itmuni)) + print(' LENUNI {}'.format(lenuni)) + print(' IDSYMRD {}'.format(idsymrd)) + + # Calculate njags + njags = int((njag - nodes) / 2) + if model.verbose: + print(' NJAGS calculated as {}'.format(njags)) + + # dataset 2 -- laycbd + if model.verbose: + print(' loading LAYCBD...') + laycbd = np.empty((nlay,), np.int32) + laycbd = read1d(f, laycbd) + if model.verbose: + print(' LAYCBD {}'.format(laycbd)) + + # dataset 3 -- nodelay + if model.verbose: + print(' loading NODELAY...') + nodelay = Util2d.load(f, model, (nlay,), np.int32, 'nodelay', + ext_unit_dict) + if model.verbose: + print(' NODELAY {}'.format(nodelay)) + + # dataset 4 -- top + if model.verbose: + print(' loading TOP...') + top = [0] * nlay + for k in range(nlay): + tpk = Util2d.load(f, model, (nodelay[k],), np.float32, 'top', + ext_unit_dict) + top[k] = tpk + if model.verbose: + for k, tpk in enumerate(top): + print(' TOP layer {}: {}'.format(k, tpk.array)) + + # dataset 5 -- bot + if model.verbose: + print(' loading BOT...') + bot = [0] * nlay + for k in range(nlay): + btk = Util2d.load(f, model, (nodelay[k],), np.float32, 'btk', + ext_unit_dict) + bot[k] = btk + if model.verbose: + for k, btk in enumerate(bot): + print(' BOT layer {}: {}'.format(k, btk.array)) + + # dataset 6 -- area + if model.verbose: + print(' loading AREA...') + if ivsd == -1: + area = Util2d.load(f, model, (nodelay[0],), np.float32, 'area', + ext_unit_dict) + else: + area = [0] * nlay + for k in range(nlay): + ak = Util2d.load(f, model, (nodelay[k],), np.float32, 'ak', + ext_unit_dict) + area[k] = ak + if model.verbose: + for k, ak in enumerate(area): + print(' AREA layer {}: {}'.format(k, ak)) + + # dataset 7 -- iac + if model.verbose: + print(' loading IAC...') + iac = Util2d.load(f, model, (nodes,), np.int32, 'iac', ext_unit_dict) + if model.verbose: + print(' IAC {}'.format(iac)) + + # dataset 8 -- ja + if model.verbose: + print(' loading JA...') + ja = Util2d.load(f, model, (njag,), np.int32, 'ja', ext_unit_dict) + if model.verbose: + print(' JA {}'.format(ja)) + + # dataset 9 -- ivc + ivc = None + if ivsd == 1: + if model.verbose: + print(' loading IVC...') + ivc = Util2d.load(f, model, (njag,), np.int32, 'ivc', + ext_unit_dict) + if model.verbose: + print(' IVC {}'.format(ivc)) + + # dataset 10a -- cl1 + cl1 = None + if idsymrd == 1: + if model.verbose: + print(' loading CL1...') + cl1 = Util2d.load(f, model, (njags,), np.float32, 'cl1', + ext_unit_dict) + if model.verbose: + print(' CL1 {}'.format(cl1)) + + # dataset 10b -- cl2 + cl2 = None + if idsymrd == 1: + if model.verbose: + print(' loading CL2...') + cl2 = Util2d.load(f, model, (njags,), np.float32, 'cl2', + ext_unit_dict) + if model.verbose: + print(' CL2 {}'.format(cl2)) + + # dataset 11 -- cl12 + cl12 = None + if idsymrd == 0: + if model.verbose: + print(' loading CL12...') + cl12 = Util2d.load(f, model, (njag,), np.float32, 'cl12', + ext_unit_dict) + if model.verbose: + print(' CL12 {}'.format(cl12)) + + # dataset 12 -- fahl + fahl = None + if idsymrd == 0: + n = njag + elif idsymrd == 1: + n = njags + if model.verbose: + print(' loading FAHL...') + fahl = Util2d.load(f, model, (n,), np.float32, 'fahl', ext_unit_dict) + if model.verbose: + print(' FAHL {}'.format(fahl)) + + # dataset 7 -- stress period info + if model.verbose: + print(' loading stress period data...') + perlen = [] + nstp = [] + tsmult = [] + steady = [] + for k in range(nper): + line = f.readline() + a1, a2, a3, a4 = line.strip().split()[0:4] + a1 = float(a1) + a2 = int(a2) + a3 = float(a3) + if a4.upper() == 'TR': + a4 = False + else: + a4 = True + perlen.append(a1) + nstp.append(a2) + tsmult.append(a3) + steady.append(a4) + if model.verbose: + print(' PERLEN {}'.format(perlen)) + print(' NSTP {}'.format(nstp)) + print(' TSMULT {}'.format(tsmult)) + print(' STEADY {}'.format(steady)) + + if openfile: + f.close() + + # set package unit number + unitnumber = None + filenames = [None] + if ext_unit_dict is not None: + unitnumber, filenames[0] = \ + model.get_ext_dict_attr(ext_unit_dict, + filetype=ModflowDisU.ftype()) + + # create dis object instance + disu = ModflowDisU(model, nodes=nodes, nlay=nlay, njag=njag, ivsd=ivsd, + nper=nper, itmuni=itmuni, lenuni=lenuni, + idsymrd=idsymrd, laycbd=laycbd, nodelay=nodelay, + top=top, bot=bot, area=area, iac=iac, ja=ja, + ivc=ivc, cl1=cl1, cl2=cl2, cl12=cl12, fahl=fahl, + perlen=perlen, nstp=nstp, tsmult=tsmult, + steady=steady, unitnumber=unitnumber, + filenames=filenames) + + # return dis object instance + return disu + + def write_file(self): + """ + Write the package file. + + Returns + ------- + None + + """ + # Open file for writing + f_dis = open(self.fn_path, 'w') + + # Item 0: heading + f_dis.write('{0:s}\n'.format(self.heading)) + + # Item 1: NODES NLAY NJAG IVSD NPER ITMUNI LENUNI IDSYMRD + s = '' + for var in [self.nodes, self.nlay, self.njag, self.ivsd, self.nper, + self.itmuni, self.lenuni, self.idsymrd]: + s += '{} '.format(var) + f_dis.write(s + '\n') + + # Item 2: LAYCBD + for k in range(self.nlay): + f_dis.write('{0:3d}'.format(self.laycbd[k])) + f_dis.write('\n') + + # Item 3: NODELAY + f_dis.write(self.nodelay.get_file_entry()) + + # Item 4: TOP + f_dis.write(self.top.get_file_entry()) + + # Item 5: BOT + f_dis.write(self.bot.get_file_entry()) + + # Item 6: AREA + f_dis.write(self.area.get_file_entry()) + + # Item 7: IAC + f_dis.write(self.iac.get_file_entry()) + + # Item 8: JA + f_dis.write(self.ja.get_file_entry()) + + # Item 9: IVC + if self.ivsd == 1: + f_dis.write(self.ivc.get_file_entry()) + + # Item 10a: CL1 + if self.idsymrd == 1: + f_dis.write(self.cl1.get_file_entry()) + + # Item 10b: CL2 + if self.idsymrd == 1: + f_dis.write(self.cl2.get_file_entry()) + + # Item 11: CL12 + if self.idsymrd == 0: + f_dis.write(self.cl12.get_file_entry()) + + # Item 12: FAHL + f_dis.write(self.fahl.get_file_entry()) + + # Item 13: NPER, NSTP, TSMULT, Ss/tr + for t in range(self.nper): + f_dis.write('{0:14f}{1:14d}{2:10f} '.format(self.perlen[t], + self.nstp[t], + self.tsmult[t])) + if self.steady[t]: + f_dis.write(' {0:3s}\n'.format('SS')) + else: + f_dis.write(' {0:3s}\n'.format('TR')) + + # Close and return + f_dis.close() + return + + @staticmethod + def ftype(): + return 'DISU' + + @staticmethod + def defaultunit(): + return 11 + + # def get_node_coordinates(self): + # """ + # Get y, x, and z cell centroids. + # + # Returns + # ------- + # y : list of cell y-centroids + # + # x : list of cell x-centroids + # + # z : array of floats (nlay, nrow, ncol) + # """ + # # In row direction + # y = np.empty((self.nrow)) + # for r in range(self.nrow): + # if (r == 0): + # y[r] = self.delc[r] / 2. + # else: + # y[r] = y[r - 1] + (self.delc[r] + self.delc[r - 1]) / 2. + # # Invert y to convert to a cartesian coordinate system + # y = y[::-1] + # # In column direction + # x = np.empty((self.ncol)) + # for c in range(self.ncol): + # if (c == 0): + # x[c] = self.delr[c] / 2. + # else: + # x[c] = x[c - 1] + (self.delr[c] + self.delr[c - 1]) / 2. + # # In layer direction + # z = np.empty((self.nlay, self.nrow, self.ncol)) + # for l in range(self.nlay): + # if (l == 0): + # z[l, :, :] = (self.top[:, :] + self.botm[l, :, :]) / 2. + # else: + # z[l, :, :] = (self.botm[l - 1, :, :] + self.botm[l, :, :]) / 2. + # return y, x, z + # + # def get_lrc(self, nodes): + # """ + # Get layer, row, column from a list of MODFLOW node numbers. + # + # Returns + # ------- + # v : list of tuples containing the layer (k), row (i), + # and column (j) for each node in the input list + # """ + # if not isinstance(nodes, list): + # nodes = [nodes] + # nrc = self.nrow * self.ncol + # v = [] + # for node in nodes: + # k = int(node / nrc) + # if (k * nrc) < node: + # k += 1 + # ij = int(node - (k - 1) * nrc) + # i = int(ij / self.ncol) + # if (i * self.ncol) < ij: + # i += 1 + # j = ij - (i - 1) * self.ncol + # v.append((k, i, j)) + # return v + # + # def get_node(self, lrc_list): + # """ + # Get node number from a list of MODFLOW layer, row, column tuples. + # + # Returns + # ------- + # v : list of MODFLOW nodes for each layer (k), row (i), + # and column (j) tuple in the input list + # """ + # if not isinstance(lrc_list, list): + # lrc_list = [lrc_list] + # nrc = self.nrow * self.ncol + # v = [] + # for [k, i, j] in lrc_list: + # node = int(((k - 1) * nrc) + ((i - 1) * self.ncol) + j) + # v.append(node) + # return v + # + # def read_from_cnf(self, cnf_file_name, n_per_line=0): + # """ + # Read discretization information from an MT3D configuration file. + # + # """ + # + # def getn(ii, jj): + # if (jj == 0): + # n = 1 + # else: + # n = int(ii / jj) + # if (ii % jj != 0): + # n = n + 1 + # + # return n + # + # try: + # f_cnf = open(cnf_file_name, 'r') + # + # # nlay, nrow, ncol + # line = f_cnf.readline() + # s = line.split() + # cnf_nlay = int(s[0]) + # cnf_nrow = int(s[1]) + # cnf_ncol = int(s[2]) + # + # # ncol column widths delr[c] + # line = '' + # for dummy in range(getn(cnf_ncol, n_per_line)): + # line = line + f_cnf.readline() + # cnf_delr = [float(s) for s in line.split()] + # + # # nrow row widths delc[r] + # line = '' + # for dummy in range(getn(cnf_nrow, n_per_line)): + # line = line + f_cnf.readline() + # cnf_delc = [float(s) for s in line.split()] + # + # # nrow * ncol htop[r, c] + # line = '' + # for dummy in range(getn(cnf_nrow * cnf_ncol, n_per_line)): + # line = line + f_cnf.readline() + # cnf_top = [float(s) for s in line.split()] + # cnf_top = np.reshape(cnf_top, (cnf_nrow, cnf_ncol)) + # + # # nlay * nrow * ncol layer thickness dz[l, r, c] + # line = '' + # for dummy in range( + # getn(cnf_nlay * cnf_nrow * cnf_ncol, n_per_line)): + # line = line + f_cnf.readline() + # cnf_dz = [float(s) for s in line.split()] + # cnf_dz = np.reshape(cnf_dz, (cnf_nlay, cnf_nrow, cnf_ncol)) + # + # # cinact, cdry, not used here so commented + # '''line = f_cnf.readline() + # s = line.split() + # cinact = float(s[0]) + # cdry = float(s[1])''' + # + # f_cnf.close() + # finally: + # self.nlay = cnf_nlay + # self.nrow = cnf_nrow + # self.ncol = cnf_ncol + # + # self.delr = Util2d(model, (self.ncol,), np.float32, cnf_delr, + # name='delr', locat=self.unit_number[0]) + # self.delc = Util2d(model, (self.nrow,), np.float32, cnf_delc, + # name='delc', locat=self.unit_number[0]) + # self.top = Util2d(model, (self.nrow, self.ncol), np.float32, + # cnf_top, name='model_top', + # locat=self.unit_number[0]) + # + # cnf_botm = np.empty((self.nlay + sum(self.laycbd), self.nrow, + # self.ncol)) + # + # # First model layer + # cnf_botm[0:, :, :] = cnf_top - cnf_dz[0, :, :] + # # All other layers + # for l in range(1, self.nlay): + # cnf_botm[l, :, :] = cnf_botm[l - 1, :, :] - cnf_dz[l, :, :] + # + # self.botm = Util3d(model, (self.nlay + sum(self.laycbd), + # self.nrow, self.ncol), np.float32, + # cnf_botm, 'botm', + # locat=self.unit_number[0]) + # + # def gettop(self): + # """ + # Get the top array. + # + # Returns + # ------- + # top : array of floats (nrow, ncol) + # """ + # return self.top.array + # + # def getbotm(self, k=None): + # """ + # Get the bottom array. + # + # Returns + # ------- + # botm : array of floats (nlay, nrow, ncol), or + # + # botm : array of floats (nrow, ncol) if k is not none + # """ + # if k is None: + # return self.botm.array + # else: + # return self.botm.array[k, :, :] + # + # def check(self, f=None, verbose=True, level=1): + # """ + # Check dis package data for zero and negative thicknesses. + # + # Parameters + # ---------- + # f : str or file handle + # String defining file name or file handle for summary file + # of check method output. If a sting is passed a file handle + # is created. If f is None, check method does not write + # results to a summary file. (default is None) + # verbose : bool + # Boolean flag used to determine if check method results are + # written to the screen + # level : int + # Check method analysis level. If level=0, summary checks are + # performed. If level=1, full checks are performed. + # + # Returns + # ------- + # None + # + # Examples + # -------- + # + # >>> import flopy + # >>> m = flopy.modflow.Modflow.load('model.nam') + # >>> m.dis.check() + # """ + # if f is not None: + # if isinstance(f, str): + # pth = os.path.join(self.parent.model_ws, f) + # f = open(pth, 'w', 0) + # + # errors = False + # txt = '\n{} PACKAGE DATA VALIDATION:\n'.format(self.name[0]) + # t = '' + # t1 = '' + # inactive = self.parent.bas6.ibound.array == 0 + # # thickness errors + # d = self.thickness.array + # d[inactive] = 1. + # if d.min() <= 0: + # errors = True + # t = '{} ERROR: Negative or zero cell thickness specified.\n'.format( + # t) + # if level > 0: + # idx = np.column_stack(np.where(d <= 0.)) + # t1 = self.level1_arraylist(idx, d, self.thickness.name, t1) + # else: + # t = '{} Specified cell thickness is OK.\n'.format(t) + # + # # add header to level 0 text + # txt += t + # + # if level > 0: + # if errors: + # txt += '\n DETAILED SUMMARY OF {} ERRORS:\n'.format( + # self.name[0]) + # # add level 1 header to level 1 text + # txt += t1 + # + # # write errors to summary file + # if f is not None: + # f.write('{}\n'.format(txt)) + # + # # write errors to stdout + # if verbose: + # print(txt) diff --git a/flopy/modflow/mfdrn.py b/flopy/modflow/mfdrn.py index f75715b999..c16f4f3de1 100644 --- a/flopy/modflow/mfdrn.py +++ b/flopy/modflow/mfdrn.py @@ -1,295 +1,295 @@ -""" -mfdrn module. Contains the ModflowDrn class. Note that the user can access -the ModflowDrn class as `flopy.modflow.ModflowDrn`. - -Additional information for this MODFLOW package can be found at the `Online -MODFLOW Guide -`_. - -""" -import sys -import numpy as np -from ..pakbase import Package -from ..utils.util_list import MfList -from ..utils.recarray_utils import create_empty_recarray - - -class ModflowDrn(Package): - """ - MODFLOW Drain Package Class. - - Parameters - ---------- - model : model object - The model object (of type :class:`flopy.modflow.mf.Modflow`) to which - this package will be added. - ipakcb : int - A flag that is used to determine if cell-by-cell budget data should be - saved. If ipakcb is non-zero cell-by-cell budget data will be saved. - (default is None). - stress_period_data : list of boundaries, recarrays, or dictionary of - boundaries. - Each drain cell is defined through definition of - layer(int), row(int), column(int), elevation(float), - conductance(float). - The simplest form is a dictionary with a lists of boundaries for each - stress period, where each list of boundaries itself is a list of - boundaries. Indices of the dictionary are the numbers of the stress - period. This gives the form of:: - - stress_period_data = - {0: [ - [lay, row, col, stage, cond], - [lay, row, col, stage, cond], - [lay, row, col, stage, cond], - ], - 1: [ - [lay, row, col, stage, cond], - [lay, row, col, stage, cond], - [lay, row, col, stage, cond], - ], ... - kper: - [ - [lay, row, col, stage, cond], - [lay, row, col, stage, cond], - [lay, row, col, stage, cond], - ] - } - - Note that if no values are specified for a certain stress period, then - the list of boundaries for the previous stress period for which values - were defined is used. Full details of all options to specify - stress_period_data can be found in the flopy3boundaries Notebook in - the basic subdirectory of the examples directory. - dtype : dtype definition - if data type is different from default - options : list of strings - Package options. (default is None). - extension : string - Filename extension (default is 'drn') - unitnumber : int - File unit number (default is None). - filenames : str or list of str - Filenames to use for the package and the output files. If - filenames=None the package name will be created using the model name - and package extension and the cbc output name will be created using - the model name and .cbc extension (for example, modflowtest.cbc), - if ipakcbc is a number greater than zero. If a single string is passed - the package will be set to the string and cbc output names will be - created using the model name and .cbc extension, if ipakcbc is a - number greater than zero. To define the names for all package files - (input and output) the length of the list of strings should be 2. - Default is None. - - Attributes - ---------- - - Methods - ------- - - See Also - -------- - - Notes - ----- - Parameters are not supported in FloPy. - If "RETURNFLOW" in passed in options, the drain return package (DRT) activated, which expects - a different (longer) dtype for stress_period_data - - Examples - -------- - - >>> import flopy - >>> ml = flopy.modflow.Modflow() - >>> lrcec = {0:[2, 3, 4, 10., 100.]} #this drain will be applied to all - >>> #stress periods - >>> drn = flopy.modflow.ModflowDrn(ml, stress_period_data=lrcec) - - """ - - def __init__(self, model, ipakcb=None, stress_period_data=None, dtype=None, - extension='drn', unitnumber=None, options=None, - filenames=None, **kwargs): - - # set default unit number of one is not specified - if unitnumber is None: - unitnumber = ModflowDrn.defaultunit() - - # set filenames - if filenames is None: - filenames = [None, None] - elif isinstance(filenames, str): - filenames = [filenames, None] - elif isinstance(filenames, list): - if len(filenames) < 2: - filenames.append(None) - - # update external file information with cbc output, if necessary - if ipakcb is not None: - fname = filenames[1] - model.add_output_file(ipakcb, fname=fname, - package=ModflowDrn.ftype()) - else: - ipakcb = 0 - - if options is None: - options = [] - self.is_drt = False - for opt in options: - if opt.upper() == "RETURNFLOW": - self.is_drt = True - break - if self.is_drt: - name = ["DRT"] - else: - name = [ModflowDrn.ftype()] - units = [unitnumber] - extra = [''] - - # set package name - fname = [filenames[0]] - - # Call ancestor's init to set self.parent, extension, name and unit number - Package.__init__(self, model, extension=extension, name=name, - unit_number=units, extra=extra, filenames=fname) - - self.heading = '# {} package for '.format(self.name[0]) + \ - ' {}, '.format(model.version_types[model.version]) + \ - 'generated by Flopy.' - self.url = 'drn.htm' - - self.ipakcb = ipakcb - - self.np = 0 - - self.options = options - if dtype is not None: - self.dtype = dtype - else: - self.dtype = self.get_default_dtype( - structured=self.parent.structured, is_drt=self.is_drt) - self.stress_period_data = MfList(self, stress_period_data) - self.parent.add_package(self) - - @staticmethod - def get_default_dtype(structured=True, is_drt=False): - if structured: - if not is_drt: - dtype = np.dtype([("k", np.int), ("i", np.int), - ("j", np.int), ("elev", np.float32), - ("cond", np.float32)]) - else: - dtype = np.dtype([("k", np.int), ("i", np.int), - ("j", np.int), ("elev", np.float32), - ("cond", np.float32), ("layr", np.int), - ("rowr", np.int), ("colr", np.int), - ("rfprop", np.float32)]) - else: - dtype = np.dtype([("node", np.int), ("elev", np.float32), - ("cond", np.float32)]) - return dtype - - def ncells(self): - # Returns the maximum number of cells that have drains (developed for MT3DMS SSM package) - # print 'Function must be implemented properly for drn package' - return self.stress_period_data.mxact - - def write_file(self, check=True): - """ - Write the package file. - - Parameters - ---------- - check : boolean - Check package data for common errors. (default True) - - Returns - ------- - None - - """ - if check: # allows turning off package checks when writing files at model level - self.check(f='{}.chk'.format(self.name[0]), - verbose=self.parent.verbose, level=1) - f_drn = open(self.fn_path, 'w') - f_drn.write('{0}\n'.format(self.heading)) - # f_drn.write('%10i%10i\n' % (self.mxactd, self.idrncb)) - line = '{0:10d}{1:10d}'.format(self.stress_period_data.mxact, - self.ipakcb) - - if self.is_drt: - line += "{0:10d}{0:10d}".format(0) - for opt in self.options: - line += ' ' + str(opt) - line += '\n' - f_drn.write(line) - self.stress_period_data.write_transient(f_drn) - f_drn.close() - - def add_record(self, kper, index, values): - try: - self.stress_period_data.add_record(kper, index, values) - except Exception as e: - raise Exception("mfdrn error adding record to list: " + str(e)) - - @staticmethod - def get_empty(ncells=0, aux_names=None, structured=True, is_drt=False): - # get an empty recarray that corresponds to dtype - dtype = ModflowDrn.get_default_dtype(structured=structured, - is_drt=is_drt) - if aux_names is not None: - dtype = Package.add_to_dtype(dtype, aux_names, np.float32) - return create_empty_recarray(ncells, dtype, default_value=-1.0E+10) - - @staticmethod - def get_sfac_columns(): - return ['cond'] - - @staticmethod - def load(f, model, nper=None, ext_unit_dict=None, check=True): - """ - Load an existing package. - - Parameters - ---------- - f : filename or file handle - File to load. - model : model object - The model object (of type :class:`flopy.modflow.mf.Modflow`) to - which this package will be added. - ext_unit_dict : dictionary, optional - If the arrays in the file are specified using EXTERNAL, - or older style array control records, then `f` should be a file - handle. In this case ext_unit_dict is required, which can be - constructed using the function - :class:`flopy.utils.mfreadnam.parsenamefile`. - check : boolean - Check package data for common errors. (default True) - - Returns - ------- - drn : ModflowDrn object - ModflowDrn object. - - Examples - -------- - - >>> import flopy - >>> m = flopy.modflow.Modflow() - >>> drn = flopy.modflow.ModflowDrn.load('test.drn', m) - - """ - - if model.verbose: - sys.stdout.write('loading drn package file...\n') - - return Package.load(f, model, ModflowDrn, nper=nper, check=check, - ext_unit_dict=ext_unit_dict) - - @staticmethod - def ftype(): - return 'DRN' - - @staticmethod - def defaultunit(): - return 21 +""" +mfdrn module. Contains the ModflowDrn class. Note that the user can access +the ModflowDrn class as `flopy.modflow.ModflowDrn`. + +Additional information for this MODFLOW package can be found at the `Online +MODFLOW Guide +`_. + +""" +import sys +import numpy as np +from ..pakbase import Package +from ..utils.util_list import MfList +from ..utils.recarray_utils import create_empty_recarray + + +class ModflowDrn(Package): + """ + MODFLOW Drain Package Class. + + Parameters + ---------- + model : model object + The model object (of type :class:`flopy.modflow.mf.Modflow`) to which + this package will be added. + ipakcb : int + A flag that is used to determine if cell-by-cell budget data should be + saved. If ipakcb is non-zero cell-by-cell budget data will be saved. + (default is None). + stress_period_data : list of boundaries, recarrays, or dictionary of + boundaries. + Each drain cell is defined through definition of + layer(int), row(int), column(int), elevation(float), + conductance(float). + The simplest form is a dictionary with a lists of boundaries for each + stress period, where each list of boundaries itself is a list of + boundaries. Indices of the dictionary are the numbers of the stress + period. This gives the form of:: + + stress_period_data = + {0: [ + [lay, row, col, stage, cond], + [lay, row, col, stage, cond], + [lay, row, col, stage, cond], + ], + 1: [ + [lay, row, col, stage, cond], + [lay, row, col, stage, cond], + [lay, row, col, stage, cond], + ], ... + kper: + [ + [lay, row, col, stage, cond], + [lay, row, col, stage, cond], + [lay, row, col, stage, cond], + ] + } + + Note that if no values are specified for a certain stress period, then + the list of boundaries for the previous stress period for which values + were defined is used. Full details of all options to specify + stress_period_data can be found in the flopy3boundaries Notebook in + the basic subdirectory of the examples directory. + dtype : dtype definition + if data type is different from default + options : list of strings + Package options. (default is None). + extension : string + Filename extension (default is 'drn') + unitnumber : int + File unit number (default is None). + filenames : str or list of str + Filenames to use for the package and the output files. If + filenames=None the package name will be created using the model name + and package extension and the cbc output name will be created using + the model name and .cbc extension (for example, modflowtest.cbc), + if ipakcbc is a number greater than zero. If a single string is passed + the package will be set to the string and cbc output names will be + created using the model name and .cbc extension, if ipakcbc is a + number greater than zero. To define the names for all package files + (input and output) the length of the list of strings should be 2. + Default is None. + + Attributes + ---------- + + Methods + ------- + + See Also + -------- + + Notes + ----- + Parameters are not supported in FloPy. + If "RETURNFLOW" in passed in options, the drain return package (DRT) activated, which expects + a different (longer) dtype for stress_period_data + + Examples + -------- + + >>> import flopy + >>> ml = flopy.modflow.Modflow() + >>> lrcec = {0:[2, 3, 4, 10., 100.]} #this drain will be applied to all + >>> #stress periods + >>> drn = flopy.modflow.ModflowDrn(ml, stress_period_data=lrcec) + + """ + + def __init__(self, model, ipakcb=None, stress_period_data=None, dtype=None, + extension='drn', unitnumber=None, options=None, + filenames=None, **kwargs): + + # set default unit number of one is not specified + if unitnumber is None: + unitnumber = ModflowDrn.defaultunit() + + # set filenames + if filenames is None: + filenames = [None, None] + elif isinstance(filenames, str): + filenames = [filenames, None] + elif isinstance(filenames, list): + if len(filenames) < 2: + filenames.append(None) + + # update external file information with cbc output, if necessary + if ipakcb is not None: + fname = filenames[1] + model.add_output_file(ipakcb, fname=fname, + package=ModflowDrn.ftype()) + else: + ipakcb = 0 + + if options is None: + options = [] + self.is_drt = False + for opt in options: + if opt.upper() == "RETURNFLOW": + self.is_drt = True + break + if self.is_drt: + name = ["DRT"] + else: + name = [ModflowDrn.ftype()] + units = [unitnumber] + extra = [''] + + # set package name + fname = [filenames[0]] + + # Call ancestor's init to set self.parent, extension, name and unit number + Package.__init__(self, model, extension=extension, name=name, + unit_number=units, extra=extra, filenames=fname) + + self.heading = '# {} package for '.format(self.name[0]) + \ + ' {}, '.format(model.version_types[model.version]) + \ + 'generated by Flopy.' + self.url = 'drn.htm' + + self.ipakcb = ipakcb + + self.np = 0 + + self.options = options + if dtype is not None: + self.dtype = dtype + else: + self.dtype = self.get_default_dtype( + structured=self.parent.structured, is_drt=self.is_drt) + self.stress_period_data = MfList(self, stress_period_data) + self.parent.add_package(self) + + @staticmethod + def get_default_dtype(structured=True, is_drt=False): + if structured: + if not is_drt: + dtype = np.dtype([("k", np.int), ("i", np.int), + ("j", np.int), ("elev", np.float32), + ("cond", np.float32)]) + else: + dtype = np.dtype([("k", np.int), ("i", np.int), + ("j", np.int), ("elev", np.float32), + ("cond", np.float32), ("layr", np.int), + ("rowr", np.int), ("colr", np.int), + ("rfprop", np.float32)]) + else: + dtype = np.dtype([("node", np.int), ("elev", np.float32), + ("cond", np.float32)]) + return dtype + + def ncells(self): + # Returns the maximum number of cells that have drains (developed for MT3DMS SSM package) + # print 'Function must be implemented properly for drn package' + return self.stress_period_data.mxact + + def write_file(self, check=True): + """ + Write the package file. + + Parameters + ---------- + check : boolean + Check package data for common errors. (default True) + + Returns + ------- + None + + """ + if check: # allows turning off package checks when writing files at model level + self.check(f='{}.chk'.format(self.name[0]), + verbose=self.parent.verbose, level=1) + f_drn = open(self.fn_path, 'w') + f_drn.write('{0}\n'.format(self.heading)) + # f_drn.write('%10i%10i\n' % (self.mxactd, self.idrncb)) + line = '{0:10d}{1:10d}'.format(self.stress_period_data.mxact, + self.ipakcb) + + if self.is_drt: + line += "{0:10d}{0:10d}".format(0) + for opt in self.options: + line += ' ' + str(opt) + line += '\n' + f_drn.write(line) + self.stress_period_data.write_transient(f_drn) + f_drn.close() + + def add_record(self, kper, index, values): + try: + self.stress_period_data.add_record(kper, index, values) + except Exception as e: + raise Exception("mfdrn error adding record to list: " + str(e)) + + @staticmethod + def get_empty(ncells=0, aux_names=None, structured=True, is_drt=False): + # get an empty recarray that corresponds to dtype + dtype = ModflowDrn.get_default_dtype(structured=structured, + is_drt=is_drt) + if aux_names is not None: + dtype = Package.add_to_dtype(dtype, aux_names, np.float32) + return create_empty_recarray(ncells, dtype, default_value=-1.0E+10) + + @staticmethod + def get_sfac_columns(): + return ['cond'] + + @staticmethod + def load(f, model, nper=None, ext_unit_dict=None, check=True): + """ + Load an existing package. + + Parameters + ---------- + f : filename or file handle + File to load. + model : model object + The model object (of type :class:`flopy.modflow.mf.Modflow`) to + which this package will be added. + ext_unit_dict : dictionary, optional + If the arrays in the file are specified using EXTERNAL, + or older style array control records, then `f` should be a file + handle. In this case ext_unit_dict is required, which can be + constructed using the function + :class:`flopy.utils.mfreadnam.parsenamefile`. + check : boolean + Check package data for common errors. (default True) + + Returns + ------- + drn : ModflowDrn object + ModflowDrn object. + + Examples + -------- + + >>> import flopy + >>> m = flopy.modflow.Modflow() + >>> drn = flopy.modflow.ModflowDrn.load('test.drn', m) + + """ + + if model.verbose: + sys.stdout.write('loading drn package file...\n') + + return Package.load(f, model, ModflowDrn, nper=nper, check=check, + ext_unit_dict=ext_unit_dict) + + @staticmethod + def ftype(): + return 'DRN' + + @staticmethod + def defaultunit(): + return 21 diff --git a/flopy/modflow/mfdrt.py b/flopy/modflow/mfdrt.py index be5c82591a..74589524e9 100644 --- a/flopy/modflow/mfdrt.py +++ b/flopy/modflow/mfdrt.py @@ -1,282 +1,282 @@ -""" -mfdrt module. Contains the ModflowDrt class. Note that the user can access -the ModflowDrt class as `flopy.modflow.ModflowDrt`. - -Additional information for this MODFLOW package can be found at the `Online -MODFLOW Guide -`_. - -""" -import sys -import numpy as np -from ..pakbase import Package -from ..utils.util_list import MfList -from ..utils.recarray_utils import create_empty_recarray - - -class ModflowDrt(Package): - """ - MODFLOW Drain Return Package Class. - - Parameters - ---------- - model : model object - The model object (of type :class:`flopy.modflow.mf.Modflow`) to which - this package will be added. - ipakcb : int - A flag that is used to determine if cell-by-cell budget data should be - saved. If ipakcb is non-zero cell-by-cell budget data will be saved. - (default is None). - stress_period_data : list of boundaries, recarrays, or dictionary of - boundaries. - Each drain return cell is defined through definition of - layer(int), row(int), column(int), elevation(float), - conductance(float), layerR (int) , rowR (int), colR (int) and rfprop (float). - The simplest form is a dictionary with a lists of boundaries for each - stress period, where each list of boundaries itself is a list of - boundaries. Indices of the dictionary are the numbers of the stress - period. This gives the form of:: - - stress_period_data = - {0: [ - [lay, row, col, stage, cond, layerr, rowr, colr, rfprop], - [lay, row, col, stage, cond, layerr, rowr, colr, rfprop], - [lay, row, col, stage, cond, layerr, rowr, colr, rfprop], - ], - 1: [ - [lay, row, col, stage, cond, layerr, rowr, colr, rfprop], - [lay, row, col, stage, cond, layerr, rowr, colr, rfprop], - [lay, row, col, stage, cond, layerr, rowr, colr, rfprop], - ], ... - kper: - [ - [lay, row, col, stage, cond, layerr, rowr, colr, rfprop], - [lay, row, col, stage, cond, layerr, rowr, colr, rfprop], - [lay, row, col, stage, cond, layerr, rowr, colr, rfprop], - ] - } - - Note that if no values are specified for a certain stress period, then - the list of boundaries for the previous stress period for which values - were defined is used. Full details of all options to specify - stress_period_data can be found in the flopy3boundaries Notebook in - the basic subdirectory of the examples directory. - dtype : dtype definition - if data type is different from default - options : list of strings - Package options. (default is None). - extension : string - Filename extension (default is 'drt') - unitnumber : int - File unit number (default is None). - filenames : str or list of str - Filenames to use for the package and the output files. If - filenames=None the package name will be created using the model name - and package extension and the cbc output name will be created using - the model name and .cbc extension (for example, modflowtest.cbc), - if ipakcbc is a number greater than zero. If a single string is passed - the package will be set to the string and cbc output names will be - created using the model name and .cbc extension, if ipakcbc is a - number greater than zero. To define the names for all package files - (input and output) the length of the list of strings should be 2. - Default is None. - - Attributes - ---------- - - Methods - ------- - - See Also - -------- - - Notes - ----- - Parameters are not supported in FloPy. - - Examples - -------- - - >>> import flopy - >>> ml = flopy.modflow.Modflow() - >>> lrcec = {0:[2, 3, 4, 10., 100., 1 ,1 ,1, 1.0]} #this drain will be applied to all - >>> #stress periods - >>> drt = flopy.modflow.ModflowDrt(ml, stress_period_data=lrcec) - - """ - - def __init__(self, model, ipakcb=None, stress_period_data=None, dtype=None, - extension='drt', unitnumber=None, options=None, - filenames=None, **kwargs): - - # set default unit number of one is not specified - if unitnumber is None: - unitnumber = ModflowDrt.defaultunit() - - # set filenames - if filenames is None: - filenames = [None, None] - elif isinstance(filenames, str): - filenames = [filenames, None] - elif isinstance(filenames, list): - if len(filenames) < 2: - filenames.append(None) - - # update external file information with cbc output, if necessary - if ipakcb is not None: - fname = filenames[1] - model.add_output_file(ipakcb, fname=fname, - package=ModflowDrt.ftype()) - else: - ipakcb = 0 - - if options is None: - options = [] - found = False - for opt in options: - if opt.upper() == "RETURNFLOW": - found = True - break - if not found: - options.append("RETURNFLOW") - - name = [ModflowDrt.ftype()] - units = [unitnumber] - extra = [''] - - # set package name - fname = [filenames[0]] - - # Call ancestor's init to set self.parent, extension, name and unit number - Package.__init__(self, model, extension=extension, name=name, - unit_number=units, extra=extra, filenames=fname) - - self.heading = '# {} package for '.format(self.name[0]) + \ - ' {}, '.format(model.version_types[model.version]) + \ - 'generated by Flopy.' - self.url = 'drt.htm' - - self.ipakcb = ipakcb - - self.np = 0 - - self.options = options - if dtype is not None: - self.dtype = dtype - else: - self.dtype = self.get_default_dtype( - structured=self.parent.structured) - self.stress_period_data = MfList(self, stress_period_data) - self.parent.add_package(self) - - @staticmethod - def get_default_dtype(structured=True): - if structured: - dtype = np.dtype([("k", np.int), ("i", np.int), - ("j", np.int), ("elev", np.float32), - ("cond", np.float32), ("layr", np.int), - ("rowr", np.int), ("colr", np.int), - ("rfprop", np.float32)]) - else: - dtype = np.dtype([("inode", np.int), ("elev", np.float32), - ("cond", np.float32), ("layr", np.int), - ("rowr", np.int), ("colr", np.int), - ("rfprop", np.float32)]) - return dtype - - def ncells(self): - # Returns the maximum number of cells that have drains (developed for MT3DMS SSM package) - # print 'Function must be implemented properly for drt package' - return self.stress_period_data.mxact - - def write_file(self, check=True): - """ - Write the package file. - - Parameters - ---------- - check : boolean - Check package data for common errors. (default True) - - Returns - ------- - None - - """ - if check: # allows turning off package checks when writing files at model level - self.check(f='{}.chk'.format(self.name[0]), - verbose=self.parent.verbose, level=1) - f_drn = open(self.fn_path, 'w') - f_drn.write('{0}\n'.format(self.heading)) - # f_drn.write('%10i%10i\n' % (self.mxactd, self.idrncb)) - line = '{0:10d}{1:10d}{2:10d}{3:10d}'.format( - self.stress_period_data.mxact, self.ipakcb, 0, 0) - for opt in self.options: - line += ' ' + str(opt) - line += '\n' - f_drn.write(line) - self.stress_period_data.write_transient(f_drn) - f_drn.close() - - def add_record(self, kper, index, values): - try: - self.stress_period_data.add_record(kper, index, values) - except Exception as e: - raise Exception("mfdrt error adding record to list: " + str(e)) - - @staticmethod - def get_empty(ncells=0, aux_names=None, structured=True, is_drt=False): - # get an empty recarray that corresponds to dtype - dtype = ModflowDrt.get_default_dtype(structured=structured) - if aux_names is not None: - dtype = Package.add_to_dtype(dtype, aux_names, np.float32) - return create_empty_recarray(ncells, dtype, default_value=-1.0E+10) - - @staticmethod - def load(f, model, nper=None, ext_unit_dict=None, check=True): - """ - Load an existing package. - - Parameters - ---------- - f : filename or file handle - File to load. - model : model object - The model object (of type :class:`flopy.modflow.mf.Modflow`) to - which this package will be added. - ext_unit_dict : dictionary, optional - If the arrays in the file are specified using EXTERNAL, - or older style array control records, then `f` should be a file - handle. In this case ext_unit_dict is required, which can be - constructed using the function - :class:`flopy.utils.mfreadnam.parsenamefile`. - check : boolean - Check package data for common errors. (default True) - - Returns - ------- - drn : ModflowDrt object - ModflowDrt object. - - Examples - -------- - - >>> import flopy - >>> m = flopy.modflow.Modflow() - >>> drn = flopy.modflow.ModflowDrt.load('test.drt', m) - - """ - - if model.verbose: - sys.stdout.write('loading drt package file...\n') - - return Package.load(f, model, ModflowDrt, nper=nper, check=check, - ext_unit_dict=ext_unit_dict) - - @staticmethod - def ftype(): - return 'DRT' - - @staticmethod - def defaultunit(): - return 21 +""" +mfdrt module. Contains the ModflowDrt class. Note that the user can access +the ModflowDrt class as `flopy.modflow.ModflowDrt`. + +Additional information for this MODFLOW package can be found at the `Online +MODFLOW Guide +`_. + +""" +import sys +import numpy as np +from ..pakbase import Package +from ..utils.util_list import MfList +from ..utils.recarray_utils import create_empty_recarray + + +class ModflowDrt(Package): + """ + MODFLOW Drain Return Package Class. + + Parameters + ---------- + model : model object + The model object (of type :class:`flopy.modflow.mf.Modflow`) to which + this package will be added. + ipakcb : int + A flag that is used to determine if cell-by-cell budget data should be + saved. If ipakcb is non-zero cell-by-cell budget data will be saved. + (default is None). + stress_period_data : list of boundaries, recarrays, or dictionary of + boundaries. + Each drain return cell is defined through definition of + layer(int), row(int), column(int), elevation(float), + conductance(float), layerR (int) , rowR (int), colR (int) and rfprop (float). + The simplest form is a dictionary with a lists of boundaries for each + stress period, where each list of boundaries itself is a list of + boundaries. Indices of the dictionary are the numbers of the stress + period. This gives the form of:: + + stress_period_data = + {0: [ + [lay, row, col, stage, cond, layerr, rowr, colr, rfprop], + [lay, row, col, stage, cond, layerr, rowr, colr, rfprop], + [lay, row, col, stage, cond, layerr, rowr, colr, rfprop], + ], + 1: [ + [lay, row, col, stage, cond, layerr, rowr, colr, rfprop], + [lay, row, col, stage, cond, layerr, rowr, colr, rfprop], + [lay, row, col, stage, cond, layerr, rowr, colr, rfprop], + ], ... + kper: + [ + [lay, row, col, stage, cond, layerr, rowr, colr, rfprop], + [lay, row, col, stage, cond, layerr, rowr, colr, rfprop], + [lay, row, col, stage, cond, layerr, rowr, colr, rfprop], + ] + } + + Note that if no values are specified for a certain stress period, then + the list of boundaries for the previous stress period for which values + were defined is used. Full details of all options to specify + stress_period_data can be found in the flopy3boundaries Notebook in + the basic subdirectory of the examples directory. + dtype : dtype definition + if data type is different from default + options : list of strings + Package options. (default is None). + extension : string + Filename extension (default is 'drt') + unitnumber : int + File unit number (default is None). + filenames : str or list of str + Filenames to use for the package and the output files. If + filenames=None the package name will be created using the model name + and package extension and the cbc output name will be created using + the model name and .cbc extension (for example, modflowtest.cbc), + if ipakcbc is a number greater than zero. If a single string is passed + the package will be set to the string and cbc output names will be + created using the model name and .cbc extension, if ipakcbc is a + number greater than zero. To define the names for all package files + (input and output) the length of the list of strings should be 2. + Default is None. + + Attributes + ---------- + + Methods + ------- + + See Also + -------- + + Notes + ----- + Parameters are not supported in FloPy. + + Examples + -------- + + >>> import flopy + >>> ml = flopy.modflow.Modflow() + >>> lrcec = {0:[2, 3, 4, 10., 100., 1 ,1 ,1, 1.0]} #this drain will be applied to all + >>> #stress periods + >>> drt = flopy.modflow.ModflowDrt(ml, stress_period_data=lrcec) + + """ + + def __init__(self, model, ipakcb=None, stress_period_data=None, dtype=None, + extension='drt', unitnumber=None, options=None, + filenames=None, **kwargs): + + # set default unit number of one is not specified + if unitnumber is None: + unitnumber = ModflowDrt.defaultunit() + + # set filenames + if filenames is None: + filenames = [None, None] + elif isinstance(filenames, str): + filenames = [filenames, None] + elif isinstance(filenames, list): + if len(filenames) < 2: + filenames.append(None) + + # update external file information with cbc output, if necessary + if ipakcb is not None: + fname = filenames[1] + model.add_output_file(ipakcb, fname=fname, + package=ModflowDrt.ftype()) + else: + ipakcb = 0 + + if options is None: + options = [] + found = False + for opt in options: + if opt.upper() == "RETURNFLOW": + found = True + break + if not found: + options.append("RETURNFLOW") + + name = [ModflowDrt.ftype()] + units = [unitnumber] + extra = [''] + + # set package name + fname = [filenames[0]] + + # Call ancestor's init to set self.parent, extension, name and unit number + Package.__init__(self, model, extension=extension, name=name, + unit_number=units, extra=extra, filenames=fname) + + self.heading = '# {} package for '.format(self.name[0]) + \ + ' {}, '.format(model.version_types[model.version]) + \ + 'generated by Flopy.' + self.url = 'drt.htm' + + self.ipakcb = ipakcb + + self.np = 0 + + self.options = options + if dtype is not None: + self.dtype = dtype + else: + self.dtype = self.get_default_dtype( + structured=self.parent.structured) + self.stress_period_data = MfList(self, stress_period_data) + self.parent.add_package(self) + + @staticmethod + def get_default_dtype(structured=True): + if structured: + dtype = np.dtype([("k", np.int), ("i", np.int), + ("j", np.int), ("elev", np.float32), + ("cond", np.float32), ("layr", np.int), + ("rowr", np.int), ("colr", np.int), + ("rfprop", np.float32)]) + else: + dtype = np.dtype([("inode", np.int), ("elev", np.float32), + ("cond", np.float32), ("layr", np.int), + ("rowr", np.int), ("colr", np.int), + ("rfprop", np.float32)]) + return dtype + + def ncells(self): + # Returns the maximum number of cells that have drains (developed for MT3DMS SSM package) + # print 'Function must be implemented properly for drt package' + return self.stress_period_data.mxact + + def write_file(self, check=True): + """ + Write the package file. + + Parameters + ---------- + check : boolean + Check package data for common errors. (default True) + + Returns + ------- + None + + """ + if check: # allows turning off package checks when writing files at model level + self.check(f='{}.chk'.format(self.name[0]), + verbose=self.parent.verbose, level=1) + f_drn = open(self.fn_path, 'w') + f_drn.write('{0}\n'.format(self.heading)) + # f_drn.write('%10i%10i\n' % (self.mxactd, self.idrncb)) + line = '{0:10d}{1:10d}{2:10d}{3:10d}'.format( + self.stress_period_data.mxact, self.ipakcb, 0, 0) + for opt in self.options: + line += ' ' + str(opt) + line += '\n' + f_drn.write(line) + self.stress_period_data.write_transient(f_drn) + f_drn.close() + + def add_record(self, kper, index, values): + try: + self.stress_period_data.add_record(kper, index, values) + except Exception as e: + raise Exception("mfdrt error adding record to list: " + str(e)) + + @staticmethod + def get_empty(ncells=0, aux_names=None, structured=True, is_drt=False): + # get an empty recarray that corresponds to dtype + dtype = ModflowDrt.get_default_dtype(structured=structured) + if aux_names is not None: + dtype = Package.add_to_dtype(dtype, aux_names, np.float32) + return create_empty_recarray(ncells, dtype, default_value=-1.0E+10) + + @staticmethod + def load(f, model, nper=None, ext_unit_dict=None, check=True): + """ + Load an existing package. + + Parameters + ---------- + f : filename or file handle + File to load. + model : model object + The model object (of type :class:`flopy.modflow.mf.Modflow`) to + which this package will be added. + ext_unit_dict : dictionary, optional + If the arrays in the file are specified using EXTERNAL, + or older style array control records, then `f` should be a file + handle. In this case ext_unit_dict is required, which can be + constructed using the function + :class:`flopy.utils.mfreadnam.parsenamefile`. + check : boolean + Check package data for common errors. (default True) + + Returns + ------- + drn : ModflowDrt object + ModflowDrt object. + + Examples + -------- + + >>> import flopy + >>> m = flopy.modflow.Modflow() + >>> drn = flopy.modflow.ModflowDrt.load('test.drt', m) + + """ + + if model.verbose: + sys.stdout.write('loading drt package file...\n') + + return Package.load(f, model, ModflowDrt, nper=nper, check=check, + ext_unit_dict=ext_unit_dict) + + @staticmethod + def ftype(): + return 'DRT' + + @staticmethod + def defaultunit(): + return 21 diff --git a/flopy/modflow/mfevt.py b/flopy/modflow/mfevt.py index fd7cb991ff..b998c575d2 100644 --- a/flopy/modflow/mfevt.py +++ b/flopy/modflow/mfevt.py @@ -1,380 +1,380 @@ -""" -mfghb module. Contains the ModflowEvt class. Note that the user can access -the ModflowEvt class as `flopy.modflow.ModflowEvt`. - -Additional information for this MODFLOW package can be found at the `Online -MODFLOW Guide -`_. - -""" -import sys - -import numpy as np -from .mfparbc import ModflowParBc as mfparbc -from ..utils import Transient2d, Util2d - -from ..pakbase import Package - - -class ModflowEvt(Package): - """ - MODFLOW Evapotranspiration Package Class. - - Parameters - ---------- - model : model object - The model object (of type :class:`flopy.modflow.mf.ModflowEvt`) to which - this package will be added. - ipakcb : int - A flag that is used to determine if cell-by-cell budget data should be - saved. If ipakcb is non-zero cell-by-cell budget data will be saved. - (default is 0). - nevtop : int - is the recharge option code. - 1: ET is calculated only for cells in the top grid layer - 2: ET to layer defined in ievt - 3: ET to highest active cell (default is 3). - surf : float or filename or ndarray or dict keyed on kper (zero-based) - is the ET surface elevation. (default is 0.0, which is used for all - stress periods). - evtr: float or filename or ndarray or dict keyed on kper (zero-based) - is the maximum ET flux (default is 1e-3, which is used for all - stress periods). - exdp : float or filename or ndarray or dict keyed on kper (zero-based) - is the ET extinction depth (default is 1.0, which is used for all - stress periods). - ievt : int or filename or ndarray or dict keyed on kper (zero-based) - is the layer indicator variable (default is 1, which is used for all - stress periods). - extension : string - Filename extension (default is 'evt') - unitnumber : int - File unit number (default is None). - filenames : str or list of str - Filenames to use for the package and the output files. If - filenames=None the package name will be created using the model name - and package extension and the cbc output name will be created using - the model name and .cbc extension (for example, modflowtest.cbc), - if ipakcbc is a number greater than zero. If a single string is passed - the package will be set to the string and cbc output names will be - created using the model name and .cbc extension, if ipakcbc is a - number greater than zero. To define the names for all package files - (input and output) the length of the list of strings should be 2. - Default is None. - - Attributes - ---------- - - Methods - ------- - - See Also - -------- - - Notes - ----- - Parameters are not supported in FloPy. - - Examples - -------- - - >>> import flopy - >>> m = flopy.modflow.Modflow() - >>> evt = flopy.modflow.ModflowEvt(m, nevtop=3, evtr=1.2e-4) - - """ - - def __init__(self, model, nevtop=3, ipakcb=None, surf=0., evtr=1e-3, - exdp=1., - ievt=1, - extension='evt', unitnumber=None, filenames=None, - external=True): - - # set default unit number of one is not specified - if unitnumber is None: - unitnumber = ModflowEvt.defaultunit() - - # set filenames - if filenames is None: - filenames = [None, None] - elif isinstance(filenames, str): - filenames = [filenames, None] - elif isinstance(filenames, list): - if len(filenames) < 2: - filenames.append(None) - - # update external file information with cbc output, if necessary - if ipakcb is not None: - fname = filenames[1] - model.add_output_file(ipakcb, fname=fname, - package=ModflowEvt.ftype()) - else: - ipakcb = 0 - - # Fill namefile items - name = [ModflowEvt.ftype()] - units = [unitnumber] - extra = [''] - - # set package name - fname = [filenames[0]] - - # Call ancestor's init to set self.parent, extension, name and unit number - Package.__init__(self, model, extension=extension, name=name, - unit_number=units, extra=extra, filenames=fname) - - nrow, ncol, nlay, nper = self.parent.nrow_ncol_nlay_nper - self.heading = '# {} package for '.format(self.name[0]) + \ - ' {}, '.format(model.version_types[model.version]) + \ - 'generated by Flopy.' - self.url = 'evt.htm' - self.nevtop = nevtop - self.ipakcb = ipakcb - self.external = external - if self.external is False: - load = True - else: - load = model.load - - self.surf = Transient2d(model, (nrow, ncol), np.float32, - surf, name='surf') - self.evtr = Transient2d(model, (nrow, ncol), np.float32, - evtr, name='evtr') - self.exdp = Transient2d(model, (nrow, ncol), np.float32, - exdp, name='exdp') - self.ievt = Transient2d(model, (nrow, ncol), np.int32, - ievt, name='ievt') - self.np = 0 - self.parent.add_package(self) - - def ncells(self): - # Returns the maximum number of cells that have - # evapotranspiration (developed for MT3DMS SSM package) - nrow, ncol, nlay, nper = self.parent.nrow_ncol_nlay_nper - return (nrow * ncol) - - def write_file(self, f=None): - """ - Write the package file. - - Returns - ------- - None - - """ - nrow, ncol, nlay, nper = self.parent.nrow_ncol_nlay_nper - if f is not None: - f_evt = f - else: - f_evt = open(self.fn_path, 'w') - f_evt.write('{0:s}\n'.format(self.heading)) - f_evt.write('{0:10d}{1:10d}\n'.format(self.nevtop, self.ipakcb)) - for n in range(nper): - insurf, surf = self.surf.get_kper_entry(n) - inevtr, evtr = self.evtr.get_kper_entry(n) - inexdp, exdp = self.exdp.get_kper_entry(n) - inievt, ievt = self.ievt.get_kper_entry(n) - comment = 'Evapotranspiration dataset 5 for stress period ' + \ - str(n + 1) - f_evt.write('{0:10d}{1:10d}{2:10d}{3:10d} # {4:s}\n' - .format(insurf, inevtr, inexdp, inievt, comment)) - if (insurf >= 0): - f_evt.write(surf) - if (inevtr >= 0): - f_evt.write(evtr) - if (inexdp >= 0): - f_evt.write(exdp) - if self.nevtop == 2 and inievt >= 0: - f_evt.write(ievt) - f_evt.close() - - @staticmethod - def load(f, model, nper=None, ext_unit_dict=None): - """ - Load an existing package. - - Parameters - ---------- - f : filename or file handle - File to load. - model : model object - The model object (of type :class:`flopy.modflow.mf.Modflow`) to - which this package will be added. - nper : int - The number of stress periods. If nper is None, then nper will be - obtained from the model object. (default is None). - ext_unit_dict : dictionary, optional - If the arrays in the file are specified using EXTERNAL, - or older style array control records, then `f` should be a file - handle. In this case ext_unit_dict is required, which can be - constructed using the function - :class:`flopy.utils.mfreadnam.parsenamefile`. - - Returns - ------- - evt : ModflowEvt object - ModflowEvt object. - - Examples - -------- - - >>> import flopy - >>> m = flopy.modflow.Modflow() - >>> evt = flopy.modflow.mfevt.load('test.evt', m) - - """ - if model.verbose: - sys.stdout.write('loading evt package file...\n') - - openfile = not hasattr(f, 'read') - if openfile: - filename = f - f = open(filename, 'r') - - # Dataset 0 -- header - while True: - line = f.readline() - if line[0] != '#': - break - npar = 0 - if "parameter" in line.lower(): - raw = line.strip().split() - npar = int(raw[1]) - if npar > 0: - if model.verbose: - print(' Parameters detected. Number of parameters = ', - npar) - line = f.readline() - # Dataset 2 - t = line.strip().split() - nevtop = int(t[0]) - ipakcb = int(t[1]) - - # Dataset 3 and 4 - parameters data - pak_parms = None - if npar > 0: - pak_parms = mfparbc.loadarray(f, npar, model.verbose) - - if nper is None: - nrow, ncol, nlay, nper = model.get_nrow_ncol_nlay_nper() - - # Read data for every stress period - surf = {} - evtr = {} - exdp = {} - ievt = {} - current_surf = [] - current_evtr = [] - current_exdp = [] - current_ievt = [] - for iper in range(nper): - line = f.readline() - t = line.strip().split() - insurf = int(t[0]) - inevtr = int(t[1]) - inexdp = int(t[2]) - if (nevtop == 2): - inievt = int(t[3]) - if insurf >= 0: - if model.verbose: - print(' loading surf stress period {0:3d}...'.format( - iper + 1)) - t = Util2d.load(f, model, (nrow, ncol), np.float32, 'surf', - ext_unit_dict) - current_surf = t - surf[iper] = current_surf - - if inevtr >= 0: - if npar == 0: - if model.verbose: - print(' loading evtr stress period {0:3d}...'.format( - iper + 1)) - t = Util2d.load(f, model, (nrow, ncol), np.float32, 'evtr', - ext_unit_dict) - else: - parm_dict = {} - for ipar in range(inevtr): - line = f.readline() - t = line.strip().split() - c = t[0].lower() - if len(c) > 10: - c = c[0:10] - pname = c - try: - c = t[1].lower() - instance_dict = pak_parms.bc_parms[pname][1] - if c in instance_dict: - iname = c - else: - iname = 'static' - except: - iname = 'static' - parm_dict[pname] = iname - t = mfparbc.parameter_bcfill(model, (nrow, ncol), - parm_dict, pak_parms) - - current_evtr = t - evtr[iper] = current_evtr - if inexdp >= 0: - if model.verbose: - print(' loading exdp stress period {0:3d}...'.format( - iper + 1)) - t = Util2d.load(f, model, (nrow, ncol), np.float32, 'exdp', - ext_unit_dict) - current_exdp = t - exdp[iper] = current_exdp - if nevtop == 2: - if inievt >= 0: - if model.verbose: - print(' loading ievt stress period {0:3d}...'.format( - iper + 1)) - t = Util2d.load(f, model, (nrow, ncol), np.int32, 'ievt', - ext_unit_dict) - current_ievt = t - ievt[iper] = current_ievt - - if openfile: - f.close() - - # create evt object - args = {} - if ievt: - args["ievt"] = ievt - if nevtop: - args["nevtop"] = nevtop - if evtr: - args["evtr"] = evtr - if surf: - args["surf"] = surf - if exdp: - args["exdp"] = exdp - args["ipakcb"] = ipakcb - - # determine specified unit number - unitnumber = None - filenames = [None, None] - if ext_unit_dict is not None: - unitnumber, filenames[0] = \ - model.get_ext_dict_attr(ext_unit_dict, - filetype=ModflowEvt.ftype()) - if ipakcb > 0: - iu, filenames[1] = \ - model.get_ext_dict_attr(ext_unit_dict, unit=ipakcb) - model.add_pop_key_list(ipakcb) - - # set args for unitnumber and filenames - args["unitnumber"] = unitnumber - args["filenames"] = filenames - - evt = ModflowEvt(model, **args) - - # return evt object - return evt - - @staticmethod - def ftype(): - return 'EVT' - - @staticmethod - def defaultunit(): - return 22 +""" +mfghb module. Contains the ModflowEvt class. Note that the user can access +the ModflowEvt class as `flopy.modflow.ModflowEvt`. + +Additional information for this MODFLOW package can be found at the `Online +MODFLOW Guide +`_. + +""" +import sys + +import numpy as np +from .mfparbc import ModflowParBc as mfparbc +from ..utils import Transient2d, Util2d + +from ..pakbase import Package + + +class ModflowEvt(Package): + """ + MODFLOW Evapotranspiration Package Class. + + Parameters + ---------- + model : model object + The model object (of type :class:`flopy.modflow.mf.ModflowEvt`) to which + this package will be added. + ipakcb : int + A flag that is used to determine if cell-by-cell budget data should be + saved. If ipakcb is non-zero cell-by-cell budget data will be saved. + (default is 0). + nevtop : int + is the recharge option code. + 1: ET is calculated only for cells in the top grid layer + 2: ET to layer defined in ievt + 3: ET to highest active cell (default is 3). + surf : float or filename or ndarray or dict keyed on kper (zero-based) + is the ET surface elevation. (default is 0.0, which is used for all + stress periods). + evtr: float or filename or ndarray or dict keyed on kper (zero-based) + is the maximum ET flux (default is 1e-3, which is used for all + stress periods). + exdp : float or filename or ndarray or dict keyed on kper (zero-based) + is the ET extinction depth (default is 1.0, which is used for all + stress periods). + ievt : int or filename or ndarray or dict keyed on kper (zero-based) + is the layer indicator variable (default is 1, which is used for all + stress periods). + extension : string + Filename extension (default is 'evt') + unitnumber : int + File unit number (default is None). + filenames : str or list of str + Filenames to use for the package and the output files. If + filenames=None the package name will be created using the model name + and package extension and the cbc output name will be created using + the model name and .cbc extension (for example, modflowtest.cbc), + if ipakcbc is a number greater than zero. If a single string is passed + the package will be set to the string and cbc output names will be + created using the model name and .cbc extension, if ipakcbc is a + number greater than zero. To define the names for all package files + (input and output) the length of the list of strings should be 2. + Default is None. + + Attributes + ---------- + + Methods + ------- + + See Also + -------- + + Notes + ----- + Parameters are not supported in FloPy. + + Examples + -------- + + >>> import flopy + >>> m = flopy.modflow.Modflow() + >>> evt = flopy.modflow.ModflowEvt(m, nevtop=3, evtr=1.2e-4) + + """ + + def __init__(self, model, nevtop=3, ipakcb=None, surf=0., evtr=1e-3, + exdp=1., + ievt=1, + extension='evt', unitnumber=None, filenames=None, + external=True): + + # set default unit number of one is not specified + if unitnumber is None: + unitnumber = ModflowEvt.defaultunit() + + # set filenames + if filenames is None: + filenames = [None, None] + elif isinstance(filenames, str): + filenames = [filenames, None] + elif isinstance(filenames, list): + if len(filenames) < 2: + filenames.append(None) + + # update external file information with cbc output, if necessary + if ipakcb is not None: + fname = filenames[1] + model.add_output_file(ipakcb, fname=fname, + package=ModflowEvt.ftype()) + else: + ipakcb = 0 + + # Fill namefile items + name = [ModflowEvt.ftype()] + units = [unitnumber] + extra = [''] + + # set package name + fname = [filenames[0]] + + # Call ancestor's init to set self.parent, extension, name and unit number + Package.__init__(self, model, extension=extension, name=name, + unit_number=units, extra=extra, filenames=fname) + + nrow, ncol, nlay, nper = self.parent.nrow_ncol_nlay_nper + self.heading = '# {} package for '.format(self.name[0]) + \ + ' {}, '.format(model.version_types[model.version]) + \ + 'generated by Flopy.' + self.url = 'evt.htm' + self.nevtop = nevtop + self.ipakcb = ipakcb + self.external = external + if self.external is False: + load = True + else: + load = model.load + + self.surf = Transient2d(model, (nrow, ncol), np.float32, + surf, name='surf') + self.evtr = Transient2d(model, (nrow, ncol), np.float32, + evtr, name='evtr') + self.exdp = Transient2d(model, (nrow, ncol), np.float32, + exdp, name='exdp') + self.ievt = Transient2d(model, (nrow, ncol), np.int32, + ievt, name='ievt') + self.np = 0 + self.parent.add_package(self) + + def ncells(self): + # Returns the maximum number of cells that have + # evapotranspiration (developed for MT3DMS SSM package) + nrow, ncol, nlay, nper = self.parent.nrow_ncol_nlay_nper + return (nrow * ncol) + + def write_file(self, f=None): + """ + Write the package file. + + Returns + ------- + None + + """ + nrow, ncol, nlay, nper = self.parent.nrow_ncol_nlay_nper + if f is not None: + f_evt = f + else: + f_evt = open(self.fn_path, 'w') + f_evt.write('{0:s}\n'.format(self.heading)) + f_evt.write('{0:10d}{1:10d}\n'.format(self.nevtop, self.ipakcb)) + for n in range(nper): + insurf, surf = self.surf.get_kper_entry(n) + inevtr, evtr = self.evtr.get_kper_entry(n) + inexdp, exdp = self.exdp.get_kper_entry(n) + inievt, ievt = self.ievt.get_kper_entry(n) + comment = 'Evapotranspiration dataset 5 for stress period ' + \ + str(n + 1) + f_evt.write('{0:10d}{1:10d}{2:10d}{3:10d} # {4:s}\n' + .format(insurf, inevtr, inexdp, inievt, comment)) + if (insurf >= 0): + f_evt.write(surf) + if (inevtr >= 0): + f_evt.write(evtr) + if (inexdp >= 0): + f_evt.write(exdp) + if self.nevtop == 2 and inievt >= 0: + f_evt.write(ievt) + f_evt.close() + + @staticmethod + def load(f, model, nper=None, ext_unit_dict=None): + """ + Load an existing package. + + Parameters + ---------- + f : filename or file handle + File to load. + model : model object + The model object (of type :class:`flopy.modflow.mf.Modflow`) to + which this package will be added. + nper : int + The number of stress periods. If nper is None, then nper will be + obtained from the model object. (default is None). + ext_unit_dict : dictionary, optional + If the arrays in the file are specified using EXTERNAL, + or older style array control records, then `f` should be a file + handle. In this case ext_unit_dict is required, which can be + constructed using the function + :class:`flopy.utils.mfreadnam.parsenamefile`. + + Returns + ------- + evt : ModflowEvt object + ModflowEvt object. + + Examples + -------- + + >>> import flopy + >>> m = flopy.modflow.Modflow() + >>> evt = flopy.modflow.mfevt.load('test.evt', m) + + """ + if model.verbose: + sys.stdout.write('loading evt package file...\n') + + openfile = not hasattr(f, 'read') + if openfile: + filename = f + f = open(filename, 'r') + + # Dataset 0 -- header + while True: + line = f.readline() + if line[0] != '#': + break + npar = 0 + if "parameter" in line.lower(): + raw = line.strip().split() + npar = int(raw[1]) + if npar > 0: + if model.verbose: + print(' Parameters detected. Number of parameters = ', + npar) + line = f.readline() + # Dataset 2 + t = line.strip().split() + nevtop = int(t[0]) + ipakcb = int(t[1]) + + # Dataset 3 and 4 - parameters data + pak_parms = None + if npar > 0: + pak_parms = mfparbc.loadarray(f, npar, model.verbose) + + if nper is None: + nrow, ncol, nlay, nper = model.get_nrow_ncol_nlay_nper() + + # Read data for every stress period + surf = {} + evtr = {} + exdp = {} + ievt = {} + current_surf = [] + current_evtr = [] + current_exdp = [] + current_ievt = [] + for iper in range(nper): + line = f.readline() + t = line.strip().split() + insurf = int(t[0]) + inevtr = int(t[1]) + inexdp = int(t[2]) + if (nevtop == 2): + inievt = int(t[3]) + if insurf >= 0: + if model.verbose: + print(' loading surf stress period {0:3d}...'.format( + iper + 1)) + t = Util2d.load(f, model, (nrow, ncol), np.float32, 'surf', + ext_unit_dict) + current_surf = t + surf[iper] = current_surf + + if inevtr >= 0: + if npar == 0: + if model.verbose: + print(' loading evtr stress period {0:3d}...'.format( + iper + 1)) + t = Util2d.load(f, model, (nrow, ncol), np.float32, 'evtr', + ext_unit_dict) + else: + parm_dict = {} + for ipar in range(inevtr): + line = f.readline() + t = line.strip().split() + c = t[0].lower() + if len(c) > 10: + c = c[0:10] + pname = c + try: + c = t[1].lower() + instance_dict = pak_parms.bc_parms[pname][1] + if c in instance_dict: + iname = c + else: + iname = 'static' + except: + iname = 'static' + parm_dict[pname] = iname + t = mfparbc.parameter_bcfill(model, (nrow, ncol), + parm_dict, pak_parms) + + current_evtr = t + evtr[iper] = current_evtr + if inexdp >= 0: + if model.verbose: + print(' loading exdp stress period {0:3d}...'.format( + iper + 1)) + t = Util2d.load(f, model, (nrow, ncol), np.float32, 'exdp', + ext_unit_dict) + current_exdp = t + exdp[iper] = current_exdp + if nevtop == 2: + if inievt >= 0: + if model.verbose: + print(' loading ievt stress period {0:3d}...'.format( + iper + 1)) + t = Util2d.load(f, model, (nrow, ncol), np.int32, 'ievt', + ext_unit_dict) + current_ievt = t + ievt[iper] = current_ievt + + if openfile: + f.close() + + # create evt object + args = {} + if ievt: + args["ievt"] = ievt + if nevtop: + args["nevtop"] = nevtop + if evtr: + args["evtr"] = evtr + if surf: + args["surf"] = surf + if exdp: + args["exdp"] = exdp + args["ipakcb"] = ipakcb + + # determine specified unit number + unitnumber = None + filenames = [None, None] + if ext_unit_dict is not None: + unitnumber, filenames[0] = \ + model.get_ext_dict_attr(ext_unit_dict, + filetype=ModflowEvt.ftype()) + if ipakcb > 0: + iu, filenames[1] = \ + model.get_ext_dict_attr(ext_unit_dict, unit=ipakcb) + model.add_pop_key_list(ipakcb) + + # set args for unitnumber and filenames + args["unitnumber"] = unitnumber + args["filenames"] = filenames + + evt = ModflowEvt(model, **args) + + # return evt object + return evt + + @staticmethod + def ftype(): + return 'EVT' + + @staticmethod + def defaultunit(): + return 22 diff --git a/flopy/modflow/mffhb.py b/flopy/modflow/mffhb.py index 9e1ae01750..6541e46f4d 100644 --- a/flopy/modflow/mffhb.py +++ b/flopy/modflow/mffhb.py @@ -1,703 +1,703 @@ -""" -mffhb module. Contains the ModflowFhb class. Note that the user can access -the ModflowFhb class as `flopy.modflow.ModflowFhb`. - -Additional information for this MODFLOW package can be found at the `Online -MODFLOW Guide -`_. - -""" -import sys - -import numpy as np - -from ..pakbase import Package -from ..utils.recarray_utils import create_empty_recarray - - -class ModflowFhb(Package): - """ - MODFLOW Flow and Head Boundary Package Class. - - Parameters - ---------- - model : model object - The model object (of type :class:`flopy.modflow.mf.ModflowFhb`) to - which this package will be added. - nbdtim : int - The number of times at which flow and head will be specified for all - selected cells. (default is 1) - nflw : int - Number of cells at which flows will be specified. (default is 0) - nhed: int - Number of cells at which heads will be specified. (default is 0) - ifhbss : int - FHB steady-state option flag. If the simulation includes any - transient-state stress periods, the flag is read but not used; in - this case, specified-flow, specified-head, and auxiliary-variable - values will be interpolated for steady-state stress periods in the - same way that values are interpolated for transient stress periods. - If the simulation includes only steady-state stress periods, the flag - controls how flow, head, and auxiliary-variable values will be - computed for each steady-state solution. (default is 0) - ipakcb : int - A flag that is used to determine if cell-by-cell budget data should be - saved. If ipakcb is non-zero cell-by-cell budget data will be saved. - (default is None). - nfhbx1 : int - Number of auxiliary variables whose values will be computed for each - time step for each specified-flow cell. Auxiliary variables are - currently not supported. (default is 0) - nfhbx2 : int - Number of auxiliary variables whose values will be computed for each - time step for each specified-head cell. Auxiliary variables are - currently not supported. (default is 0) - ifhbpt : int - Flag for printing values of data list. Applies to datasets 4b, 5b, 6b, - 7b, and 8b. If ifhbpt > 0, datasets read at the beginning of the - simulation will be printed. Otherwise, the datasets will not be - printed. (default is 0). - bdtimecnstm : float - A constant multiplier for data list bdtime. (default is 1.0) - bdtime : float or list of floats - Simulation time at which values of specified flow and (or) values of - specified head will be read. nbdtim values are required. - (default is 0.0) - cnstm5 : float - A constant multiplier for data list flwrat. (default is 1.0) - ds5 : list or numpy array or recarray - Each FHB flwrat cell (dataset 5) is defined through definition of - layer(int), row(int), column(int), iaux(int), flwrat[nbdtime](float). - There should be nflw entries. (default is None) - The simplest form is a list of lists with the FHB flow boundaries. - This gives the form of:: - - ds5 = - [ - [lay, row, col, iaux, flwrat1, flwra2, ..., flwrat(nbdtime)], - [lay, row, col, iaux, flwrat1, flwra2, ..., flwrat(nbdtime)], - [lay, row, col, iaux, flwrat1, flwra2, ..., flwrat(nbdtime)], - [lay, row, col, iaux, flwrat1, flwra2, ..., flwrat(nbdtime)] - ] - - cnstm7 : float - A constant multiplier for data list sbhedt. (default is 1.0) - ds7 : list or numpy array or recarray - Each FHB sbhed cell (dataset 7) is defined through definition of - layer(int), row(int), column(int), iaux(int), sbhed[nbdtime](float). - There should be nhed entries. (default is None) - The simplest form is a list of lists with the FHB flow boundaries. - This gives the form of:: - - ds7 = - [ - [lay, row, col, iaux, sbhed1, sbhed2, ..., sbhed(nbdtime)], - [lay, row, col, iaux, sbhed1, sbhed2, ..., sbhed(nbdtime)], - [lay, row, col, iaux, sbhed1, sbhed2, ..., sbhed(nbdtime)], - [lay, row, col, iaux, sbhed1, sbhed2, ..., sbhed(nbdtime)] - ] - - extension : string - Filename extension (default is 'fhb') - unitnumber : int - File unit number (default is None). - filenames : str or list of str - Filenames to use for the package and the output files. If - filenames=None the package name will be created using the model name - and package extension and the cbc output name will be created using - the model name and .cbc extension (for example, modflowtest.cbc), - if ipakcbc is a number greater than zero. If a single string is passed - the package will be set to the string and cbc output names will be - created using the model name and .cbc extension, if ipakcbc is a - number greater than zero. To define the names for all package files - (input and output) the length of the list of strings should be 2. - Default is None. - - Attributes - ---------- - - Methods - ------- - - See Also - -------- - - Notes - ----- - Parameters are not supported in FloPy. - - Examples - -------- - - >>> import flopy - >>> m = flopy.modflow.Modflow() - >>> fhb = flopy.modflow.ModflowFhb(m) - - """ - - def __init__(self, model, nbdtim=1, nflw=0, nhed=0, ifhbss=0, ipakcb=None, - nfhbx1=0, nfhbx2=0, ifhbpt=0, bdtimecnstm=1.0, bdtime=[0.], - cnstm5=1.0, ds5=None, cnstm7=1.0, ds7=None, extension='fhb', - unitnumber=None, filenames=None): - - # set default unit number of one is not specified - if unitnumber is None: - unitnumber = ModflowFhb.defaultunit() - - # set filenames - if filenames is None: - filenames = [None, None] - elif isinstance(filenames, str): - filenames = [filenames, None] - elif isinstance(filenames, list): - if len(filenames) < 2: - filenames.append(None) - - # update external file information with cbc output, if necessary - if ipakcb is not None: - fname = filenames[1] - model.add_output_file(ipakcb, fname=fname, - package=ModflowFhb.ftype()) - else: - ipakcb = 0 - - # Fill namefile items - name = [ModflowFhb.ftype()] - units = [unitnumber] - extra = [''] - - # set package name - fname = [filenames[0]] - - # Call ancestor's init to set self.parent, extension, name and unit number - Package.__init__(self, model, extension=extension, name=name, - unit_number=units, extra=extra, filenames=fname) - - self.heading = '# {} package for '.format(self.name[0]) + \ - ' {}, '.format(model.version_types[model.version]) + \ - 'generated by Flopy.' - self.url = 'flow_and_head_boundary_packag2.htm' - - self.nbdtim = nbdtim - self.nflw = nflw - self.nhed = nhed - self.ifhbss = ifhbss - self.ipakcb = ipakcb - if nfhbx1 != 0: - nfhbx1 = 0 - self.nfhbx1 = nfhbx1 - if nfhbx2 != 0: - nfhbx2 = 0 - self.nfhbx2 = nfhbx2 - self.ifhbpt = ifhbpt - self.bdtimecnstm = bdtimecnstm - if isinstance(bdtime, float): - bdtime = [bdtime] - self.bdtime = bdtime - self.cnstm5 = cnstm5 - self.cnstm7 = cnstm7 - - # check the type of dataset 5 - if ds5 is not None: - dtype = ModflowFhb.get_default_dtype(nbdtim=nbdtim, head=False, - structured=model.structured) - if isinstance(ds5, (float, int, str)): - msg = 'dataset 5 must be a list of lists or a numpy array' - raise TypeError(msg) - elif isinstance(ds5, list): - ds5 = np.array(ds5) - # convert numpy array to a recarray - if ds5.dtype != dtype: - ds5 = np.core.records.fromarrays(ds5.transpose(), dtype=dtype) - - # assign dataset 5 - self.ds5 = ds5 - - # check the type of dataset 7 - if ds7 is not None: - dtype = ModflowFhb.get_default_dtype(nbdtim=nbdtim, head=True, - structured=model.structured) - if isinstance(ds7, (float, int, str)): - msg = 'dataset 7 must be a list of lists or a numpy array' - raise TypeError(msg) - elif isinstance(ds7, list): - ds7 = np.array(ds7) - # convert numpy array to a recarray - if ds7.dtype != dtype: - ds7 = np.core.records.fromarrays(ds7.transpose(), dtype=dtype) - - # assign dataset 7 - self.ds7 = ds7 - - # perform some simple verification - if len(self.bdtime) != self.nbdtim: - msg = 'bdtime has {} entries '.format(len(self.bdtime)) + \ - 'but requires {} entries.'.format(self.nbdtim) - raise ValueError(msg) - - if self.nflw > 0: - if self.ds5 is None: - msg = 'dataset 5 is not specified but ' + \ - 'nflw > 0 ({})'.format(self.nflw) - raise TypeError(msg) - - if self.ds5.shape[0] != self.nflw: - msg = 'dataset 5 has {} rows '.format(self.ds5.shape[0]) + \ - 'but requires {} rows.'.format(self.nflw) - raise ValueError(msg) - nc = self.nbdtim - if model.structured: - nc += 4 - else: - nc += 2 - if len(self.ds5.dtype.names) != nc: - msg = 'dataset 5 has {} '.format(len(self.ds5.dtype.names)) + \ - 'columns but requires {} columns.'.format(nc) - raise ValueError(msg) - - if self.nhed > 0: - if self.ds7 is None: - msg = 'dataset 7 is not specified but ' + \ - 'nhed > 0 ({})'.format(self.nhed) - raise TypeError(msg) - if self.ds7.shape[0] != self.nhed: - msg = 'dataset 7 has {} rows '.format(self.ds7.shape[0]) + \ - 'but requires {} rows.'.format(self.nhed) - raise ValueError(msg) - nc = self.nbdtim - if model.structured: - nc += 4 - else: - nc += 2 - if len(self.ds7.dtype.names) != nc: - msg = 'dataset 7 has {} '.format(len(self.ds7.dtype.names)) + \ - 'columns but requires {} columns.'.format(nc) - raise ValueError(msg) - - self.parent.add_package(self) - - @staticmethod - def get_empty(ncells=0, nbdtim=1, structured=True, head=False): - # get an empty recarray that corresponds to dtype - dtype = ModflowFhb.get_default_dtype(nbdtim=nbdtim, - structured=structured, head=head) - return create_empty_recarray(ncells, dtype, default_value=-1.0E+10) - - @staticmethod - def get_default_dtype(nbdtim=1, structured=True, head=False): - if structured: - dtype = [("k", np.int), ("i", np.int), ("j", np.int)] - else: - dtype = [("node", np.int)] - dtype.append(("iaux", np.int)) - for n in range(nbdtim): - if head: - name = ("sbhed{}".format(n + 1)) - else: - name = ("flwrat{}".format(n + 1)) - dtype.append((name, np.float32)) - return np.dtype(dtype) - - def ncells(self): - # Return the maximum number of cells that have a fhb flow or - # head boundary. (developed for MT3DMS SSM package) - return self.nflw + self.nhed - - def write_file(self): - """ - Write the package file. - - Returns - ------- - None - - """ - nrow, ncol, nlay, nper = self.parent.nrow_ncol_nlay_nper - f = open(self.fn_path, 'w') - # f.write('{0:s}\n'.format(self.heading)) - - # Data set 1 - f.write('{} '.format(self.nbdtim)) - f.write('{} '.format(self.nflw)) - f.write('{} '.format(self.nhed)) - f.write('{} '.format(self.ifhbss)) - f.write('{} '.format(self.ipakcb)) - f.write('{} '.format(self.nfhbx1)) - f.write('{}\n'.format(self.nfhbx2)) - - # Dataset 2 - flow auxiliary names - - # Dataset 3 - head auxiliary names - - # Dataset 4a IFHBUN CNSTM IFHBPT - f.write('{} '.format(self.unit_number[0])) - f.write('{} '.format(self.bdtimecnstm)) - f.write('{}\n'.format(self.ifhbpt)) - - # Dataset 4b - for n in range(self.nbdtim): - f.write('{} '.format(self.bdtime[n])) - f.write('\n') - - # Dataset 5 and 6 - if self.nflw > 0: - # Dataset 5a IFHBUN CNSTM IFHBPT - f.write('{} '.format(self.unit_number[0])) - f.write('{} '.format(self.cnstm5)) - f.write('{}\n'.format(self.ifhbpt)) - - # Dataset 5b - for n in range(self.nflw): - for name in self.ds5.dtype.names: - v = self.ds5[n][name] - if name in ['k', 'i', 'j', 'node']: - v += 1 - f.write('{} '.format(v)) - f.write('\n') - - # Dataset 6a and 6b - flow auxiliary data - if self.nfhbx1 > 0: - i = 0 - - # Dataset 7 - if self.nhed > 0: - # Dataset 7a IFHBUN CNSTM IFHBPT - f.write('{} '.format(self.unit_number[0])) - f.write('{} '.format(self.cnstm7)) - f.write('{}\n'.format(self.ifhbpt)) - - # Dataset 7b IFHBUN CNSTM IFHBPT - for n in range(self.nhed): - for name in self.ds7.dtype.names: - v = self.ds7[n][name] - if name in ['k', 'i', 'j', 'node']: - v += 1 - f.write('{} '.format(v)) - f.write('\n') - - # Dataset 8a and 8b - head auxiliary data - if self.nfhbx2 > 0: - i = 1 - - f.close() - - @staticmethod - def load(f, model, nper=None, ext_unit_dict=None): - """ - Load an existing package. - - Parameters - ---------- - f : filename or file handle - File to load. - model : model object - The model object (of type :class:`flopy.modflow.mf.Modflow`) to - which this package will be added. - nper : int - The number of stress periods. If nper is None, then nper will be - obtained from the model object. (default is None). - ext_unit_dict : dictionary, optional - If the arrays in the file are specified using EXTERNAL, - or older style array control records, then `f` should be a file - handle. In this case ext_unit_dict is required, which can be - constructed using the function - :class:`flopy.utils.mfreadnam.parsenamefile`. - - Returns - ------- - fhb : ModflowFhb object - ModflowFhb object. - - Examples - -------- - - >>> import flopy - >>> m = flopy.modflow.Modflow() - >>> fhb = flopy.modflow.ModflowFhb.load('test.fhb', m) - - """ - if model.verbose: - sys.stdout.write('loading fhb package file...\n') - - openfile = not hasattr(f, 'read') - if openfile: - filename = f - f = open(filename, 'r') - - # determine package unit number - iufhb = None - if ext_unit_dict is not None: - iufhb, fname = \ - model.get_ext_dict_attr(ext_unit_dict, - filetype=ModflowFhb.ftype()) - - # Dataset 0 -- header - while True: - line = f.readline() - if line[0] != '#': - break - - # dataset 1 - if model.verbose: - sys.stdout.write('loading fhb dataset 1\n') - raw = line.strip().split() - nbdtim = int(raw[0]) - nflw = int(raw[1]) - nhed = int(raw[2]) - ifhbss = int(raw[3]) - ipakcb = int(raw[4]) - nfhbx1 = int(raw[5]) - nfhbx2 = int(raw[6]) - - ifhbpt = 0 - - # Dataset 2 - flow_aux = [] - if nfhbx1 > 0: - if model.verbose: - sys.stdout.write('loading fhb dataset 2\n') - msg = 'dataset 2 will not be preserved ' + \ - 'in the created hfb object.\n' - sys.stdout.write(msg) - for idx in range(nfhbx1): - line = f.readline() - raw = line.strip().split() - varnam = raw[0] - if len(varnam) > 16: - varnam = varnam[0:16] - weight = float(raw[1]) - flow_aux.append([varnam, weight]) - - # Dataset 3 - head_aux = [] - if nfhbx2 > 0: - if model.verbose: - sys.stdout.write('loading fhb dataset 3\n') - msg = 'dataset 3 will not be preserved ' + \ - 'in the created hfb object.\n' - sys.stdout.write(msg) - for idx in range(nfhbx2): - line = f.readline() - raw = line.strip().split() - varnam = raw[0] - if len(varnam) > 16: - varnam = varnam[0:16] - weight = float(raw[1]) - head_aux.append([varnam, weight]) - - # Dataset 4a IFHBUN CNSTM IFHBPT - if model.verbose: - sys.stdout.write('loading fhb dataset 4a\n') - line = f.readline() - raw = line.strip().split() - ifhbun = int(raw[0]) - if ifhbun != iufhb: - msg = 'fhb dataset 4a must be in the fhb file ' - msg += '(unit={}) '.format(iufhb) - msg += 'fhb data is specified in unit={}'.format(ifhbun) - raise ValueError(msg) - bdtimecnstm = float(raw[1]) - ifhbpt = max(ifhbpt, int(raw[2])) - - # Dataset 4b - if model.verbose: - sys.stdout.write('loading fhb dataset 4b\n') - line = f.readline() - raw = line.strip().split() - bdtime = [] - for n in range(nbdtim): - bdtime.append(float(raw[n])) - - # Dataset 5 and 6 - cnstm5 = None - ds5 = None - cnstm6 = None - ds6 = None - if nflw > 0: - if model.verbose: - sys.stdout.write('loading fhb dataset 5a\n') - # Dataset 5a IFHBUN CNSTM IFHBPT - line = f.readline() - raw = line.strip().split() - ifhbun = int(raw[0]) - if ifhbun != iufhb: - msg = 'fhb dataset 5a must be in the fhb file ' - msg += '(unit={}) '.format(iufhb) - msg += 'fhb data is specified in unit={}'.format(ifhbun) - raise ValueError(msg) - cnstm5 = float(raw[1]) - ifhbpt = max(ifhbpt, int(raw[2])) - - if model.verbose: - sys.stdout.write('loading fhb dataset 5b\n') - dtype = ModflowFhb.get_default_dtype(nbdtim=nbdtim, head=False, - structured=model.structured) - ds5 = ModflowFhb.get_empty(ncells=nflw, nbdtim=nbdtim, head=False, - structured=model.structured) - for n in range(nflw): - line = f.readline() - raw = line.strip().split() - ds5[n] = tuple(raw[:len(dtype.names)]) - - if model.structured: - ds5['k'] -= 1 - ds5['i'] -= 1 - ds5['j'] -= 1 - else: - ds5['node'] -= 1 - - # Dataset 6 - if nfhbx1 > 0: - cnstm6 = [] - ds6 = [] - dtype = [] - for name, weight in flow_aux: - dtype.append((name, np.float32)) - for naux in range(nfhbx1): - if model.verbose: - sys.stdout.write('loading fhb dataset 6a - aux ' + - '{}\n'.format(naux + 1)) - msg = 'dataset 6a will not be preserved in ' + \ - 'the created hfb object.\n' - sys.stdout.write(msg) - # Dataset 6a IFHBUN CNSTM IFHBPT - line = f.readline() - raw = line.strip().split() - ifhbun = int(raw[0]) - if ifhbun != iufhb: - msg = 'fhb dataset 6a must be in the fhb file ' - msg += '(unit={}) '.format(iufhb) - msg += 'fhb data is specified in ' + \ - 'unit={}'.format(ifhbun) - raise ValueError(msg) - cnstm6.append(float(raw[1])) - ifhbpt = max(ifhbpt, int(raw[2])) - - if model.verbose: - sys.stdout.write('loading fhb dataset 6b - aux ' + - '{}\n'.format(naux + 1)) - msg = 'dataset 6b will not be preserved in ' + \ - 'the created hfb object.\n' - sys.stdout.write(msg) - current = np.recarray(nflw, dtype=dtype) - for n in range(nflw): - line = f.readline() - raw = line.strip().split() - current[n] = tuple(raw[:len(dtype.names)]) - ds6.append(current.copy()) - - # Dataset 7 - cnstm7 = None - ds7 = None - cnstm8 = None - ds8 = None - if nhed > 0: - if model.verbose: - sys.stdout.write('loading fhb dataset 7a\n') - # Dataset 7a IFHBUN CNSTM IFHBPT - line = f.readline() - raw = line.strip().split() - ifhbun = int(raw[0]) - if ifhbun != iufhb: - msg = 'fhb dataset 7a must be in the fhb file ' - msg += '(unit={}) '.format(iufhb) - msg += 'fhb data is specified in unit={}'.format(ifhbun) - raise ValueError(msg) - cnstm7 = float(raw[1]) - ifhbpt = max(ifhbpt, int(raw[2])) - - if model.verbose: - sys.stdout.write('loading fhb dataset 7b\n') - dtype = ModflowFhb.get_default_dtype(nbdtim=nbdtim, head=True, - structured=model.structured) - ds7 = ModflowFhb.get_empty(ncells=nhed, nbdtim=nbdtim, head=True, - structured=model.structured) - for n in range(nhed): - line = f.readline() - raw = line.strip().split() - ds7[n] = tuple(raw[:len(dtype.names)]) - - if model.structured: - ds7['k'] -= 1 - ds7['i'] -= 1 - ds7['j'] -= 1 - else: - ds7['node'] -= 1 - - # Dataset 8 - if nfhbx2 > 0: - cnstm8 = [] - ds8 = [] - dtype = [] - for name, weight in head_aux: - dtype.append((name, np.float32)) - for naux in range(nfhbx1): - if model.verbose: - sys.stdout.write('loading fhb dataset 8a - aux ' + - '{}\n'.format(naux + 1)) - msg = 'dataset 8a will not be preserved in ' + \ - 'the created hfb object.\n' - sys.stdout.write(msg) - # Dataset 6a IFHBUN CNSTM IFHBPT - line = f.readline() - raw = line.strip().split() - ifhbun = int(raw[0]) - if ifhbun != iufhb: - msg = 'fhb dataset 8a must be in the fhb file ' - msg += '(unit={}) '.format(iufhb) - msg += 'fhb data is specified in ' + \ - 'unit={}'.format(ifhbun) - raise ValueError(msg) - cnstm8.append(float(raw[1])) - ifhbpt6 = int(raw[2]) - ifhbpt = max(ifhbpt, ifhbpt6) - - if model.verbose: - sys.stdout.write('loading fhb dataset 8b - aux ' + - '{}\n'.format(naux + 1)) - msg = 'dataset 8b will not be preserved in ' + \ - 'the created hfb object.' - sys.stdout.write(msg) - current = np.recarray(nflw, dtype=dtype) - for n in range(nhed): - line = f.readline() - raw = line.strip().split() - current[n] = tuple(raw[:len(dtype.names)]) - ds8.append(current.copy()) - - if openfile: - f.close() - - # determine specified unit number - unitnumber = None - filenames = [None, None] - if ext_unit_dict is not None: - unitnumber, filenames[0] = \ - model.get_ext_dict_attr(ext_unit_dict, - filetype=ModflowFhb.ftype()) - if ipakcb > 0: - iu, filenames[1] = \ - model.get_ext_dict_attr(ext_unit_dict, unit=ipakcb) - model.add_pop_key_list(ipakcb) - - # auxiliary data are not passed to load instantiation - nfhbx1 = 0 - nfhbx2 = 0 - - fhb = ModflowFhb(model, nbdtim=nbdtim, nflw=nflw, nhed=nhed, - ifhbss=ifhbss, ipakcb=ipakcb, - nfhbx1=nfhbx1, nfhbx2=nfhbx2, ifhbpt=ifhbpt, - bdtimecnstm=bdtimecnstm, bdtime=bdtime, - cnstm5=cnstm5, ds5=ds5, cnstm7=cnstm7, ds7=ds7, - unitnumber=unitnumber, filenames=filenames) - - # return fhb object - return fhb - - @staticmethod - def ftype(): - return 'FHB' - - @staticmethod - def defaultunit(): - return 40 +""" +mffhb module. Contains the ModflowFhb class. Note that the user can access +the ModflowFhb class as `flopy.modflow.ModflowFhb`. + +Additional information for this MODFLOW package can be found at the `Online +MODFLOW Guide +`_. + +""" +import sys + +import numpy as np + +from ..pakbase import Package +from ..utils.recarray_utils import create_empty_recarray + + +class ModflowFhb(Package): + """ + MODFLOW Flow and Head Boundary Package Class. + + Parameters + ---------- + model : model object + The model object (of type :class:`flopy.modflow.mf.ModflowFhb`) to + which this package will be added. + nbdtim : int + The number of times at which flow and head will be specified for all + selected cells. (default is 1) + nflw : int + Number of cells at which flows will be specified. (default is 0) + nhed: int + Number of cells at which heads will be specified. (default is 0) + ifhbss : int + FHB steady-state option flag. If the simulation includes any + transient-state stress periods, the flag is read but not used; in + this case, specified-flow, specified-head, and auxiliary-variable + values will be interpolated for steady-state stress periods in the + same way that values are interpolated for transient stress periods. + If the simulation includes only steady-state stress periods, the flag + controls how flow, head, and auxiliary-variable values will be + computed for each steady-state solution. (default is 0) + ipakcb : int + A flag that is used to determine if cell-by-cell budget data should be + saved. If ipakcb is non-zero cell-by-cell budget data will be saved. + (default is None). + nfhbx1 : int + Number of auxiliary variables whose values will be computed for each + time step for each specified-flow cell. Auxiliary variables are + currently not supported. (default is 0) + nfhbx2 : int + Number of auxiliary variables whose values will be computed for each + time step for each specified-head cell. Auxiliary variables are + currently not supported. (default is 0) + ifhbpt : int + Flag for printing values of data list. Applies to datasets 4b, 5b, 6b, + 7b, and 8b. If ifhbpt > 0, datasets read at the beginning of the + simulation will be printed. Otherwise, the datasets will not be + printed. (default is 0). + bdtimecnstm : float + A constant multiplier for data list bdtime. (default is 1.0) + bdtime : float or list of floats + Simulation time at which values of specified flow and (or) values of + specified head will be read. nbdtim values are required. + (default is 0.0) + cnstm5 : float + A constant multiplier for data list flwrat. (default is 1.0) + ds5 : list or numpy array or recarray + Each FHB flwrat cell (dataset 5) is defined through definition of + layer(int), row(int), column(int), iaux(int), flwrat[nbdtime](float). + There should be nflw entries. (default is None) + The simplest form is a list of lists with the FHB flow boundaries. + This gives the form of:: + + ds5 = + [ + [lay, row, col, iaux, flwrat1, flwra2, ..., flwrat(nbdtime)], + [lay, row, col, iaux, flwrat1, flwra2, ..., flwrat(nbdtime)], + [lay, row, col, iaux, flwrat1, flwra2, ..., flwrat(nbdtime)], + [lay, row, col, iaux, flwrat1, flwra2, ..., flwrat(nbdtime)] + ] + + cnstm7 : float + A constant multiplier for data list sbhedt. (default is 1.0) + ds7 : list or numpy array or recarray + Each FHB sbhed cell (dataset 7) is defined through definition of + layer(int), row(int), column(int), iaux(int), sbhed[nbdtime](float). + There should be nhed entries. (default is None) + The simplest form is a list of lists with the FHB flow boundaries. + This gives the form of:: + + ds7 = + [ + [lay, row, col, iaux, sbhed1, sbhed2, ..., sbhed(nbdtime)], + [lay, row, col, iaux, sbhed1, sbhed2, ..., sbhed(nbdtime)], + [lay, row, col, iaux, sbhed1, sbhed2, ..., sbhed(nbdtime)], + [lay, row, col, iaux, sbhed1, sbhed2, ..., sbhed(nbdtime)] + ] + + extension : string + Filename extension (default is 'fhb') + unitnumber : int + File unit number (default is None). + filenames : str or list of str + Filenames to use for the package and the output files. If + filenames=None the package name will be created using the model name + and package extension and the cbc output name will be created using + the model name and .cbc extension (for example, modflowtest.cbc), + if ipakcbc is a number greater than zero. If a single string is passed + the package will be set to the string and cbc output names will be + created using the model name and .cbc extension, if ipakcbc is a + number greater than zero. To define the names for all package files + (input and output) the length of the list of strings should be 2. + Default is None. + + Attributes + ---------- + + Methods + ------- + + See Also + -------- + + Notes + ----- + Parameters are not supported in FloPy. + + Examples + -------- + + >>> import flopy + >>> m = flopy.modflow.Modflow() + >>> fhb = flopy.modflow.ModflowFhb(m) + + """ + + def __init__(self, model, nbdtim=1, nflw=0, nhed=0, ifhbss=0, ipakcb=None, + nfhbx1=0, nfhbx2=0, ifhbpt=0, bdtimecnstm=1.0, bdtime=[0.], + cnstm5=1.0, ds5=None, cnstm7=1.0, ds7=None, extension='fhb', + unitnumber=None, filenames=None): + + # set default unit number of one is not specified + if unitnumber is None: + unitnumber = ModflowFhb.defaultunit() + + # set filenames + if filenames is None: + filenames = [None, None] + elif isinstance(filenames, str): + filenames = [filenames, None] + elif isinstance(filenames, list): + if len(filenames) < 2: + filenames.append(None) + + # update external file information with cbc output, if necessary + if ipakcb is not None: + fname = filenames[1] + model.add_output_file(ipakcb, fname=fname, + package=ModflowFhb.ftype()) + else: + ipakcb = 0 + + # Fill namefile items + name = [ModflowFhb.ftype()] + units = [unitnumber] + extra = [''] + + # set package name + fname = [filenames[0]] + + # Call ancestor's init to set self.parent, extension, name and unit number + Package.__init__(self, model, extension=extension, name=name, + unit_number=units, extra=extra, filenames=fname) + + self.heading = '# {} package for '.format(self.name[0]) + \ + ' {}, '.format(model.version_types[model.version]) + \ + 'generated by Flopy.' + self.url = 'flow_and_head_boundary_packag2.htm' + + self.nbdtim = nbdtim + self.nflw = nflw + self.nhed = nhed + self.ifhbss = ifhbss + self.ipakcb = ipakcb + if nfhbx1 != 0: + nfhbx1 = 0 + self.nfhbx1 = nfhbx1 + if nfhbx2 != 0: + nfhbx2 = 0 + self.nfhbx2 = nfhbx2 + self.ifhbpt = ifhbpt + self.bdtimecnstm = bdtimecnstm + if isinstance(bdtime, float): + bdtime = [bdtime] + self.bdtime = bdtime + self.cnstm5 = cnstm5 + self.cnstm7 = cnstm7 + + # check the type of dataset 5 + if ds5 is not None: + dtype = ModflowFhb.get_default_dtype(nbdtim=nbdtim, head=False, + structured=model.structured) + if isinstance(ds5, (float, int, str)): + msg = 'dataset 5 must be a list of lists or a numpy array' + raise TypeError(msg) + elif isinstance(ds5, list): + ds5 = np.array(ds5) + # convert numpy array to a recarray + if ds5.dtype != dtype: + ds5 = np.core.records.fromarrays(ds5.transpose(), dtype=dtype) + + # assign dataset 5 + self.ds5 = ds5 + + # check the type of dataset 7 + if ds7 is not None: + dtype = ModflowFhb.get_default_dtype(nbdtim=nbdtim, head=True, + structured=model.structured) + if isinstance(ds7, (float, int, str)): + msg = 'dataset 7 must be a list of lists or a numpy array' + raise TypeError(msg) + elif isinstance(ds7, list): + ds7 = np.array(ds7) + # convert numpy array to a recarray + if ds7.dtype != dtype: + ds7 = np.core.records.fromarrays(ds7.transpose(), dtype=dtype) + + # assign dataset 7 + self.ds7 = ds7 + + # perform some simple verification + if len(self.bdtime) != self.nbdtim: + msg = 'bdtime has {} entries '.format(len(self.bdtime)) + \ + 'but requires {} entries.'.format(self.nbdtim) + raise ValueError(msg) + + if self.nflw > 0: + if self.ds5 is None: + msg = 'dataset 5 is not specified but ' + \ + 'nflw > 0 ({})'.format(self.nflw) + raise TypeError(msg) + + if self.ds5.shape[0] != self.nflw: + msg = 'dataset 5 has {} rows '.format(self.ds5.shape[0]) + \ + 'but requires {} rows.'.format(self.nflw) + raise ValueError(msg) + nc = self.nbdtim + if model.structured: + nc += 4 + else: + nc += 2 + if len(self.ds5.dtype.names) != nc: + msg = 'dataset 5 has {} '.format(len(self.ds5.dtype.names)) + \ + 'columns but requires {} columns.'.format(nc) + raise ValueError(msg) + + if self.nhed > 0: + if self.ds7 is None: + msg = 'dataset 7 is not specified but ' + \ + 'nhed > 0 ({})'.format(self.nhed) + raise TypeError(msg) + if self.ds7.shape[0] != self.nhed: + msg = 'dataset 7 has {} rows '.format(self.ds7.shape[0]) + \ + 'but requires {} rows.'.format(self.nhed) + raise ValueError(msg) + nc = self.nbdtim + if model.structured: + nc += 4 + else: + nc += 2 + if len(self.ds7.dtype.names) != nc: + msg = 'dataset 7 has {} '.format(len(self.ds7.dtype.names)) + \ + 'columns but requires {} columns.'.format(nc) + raise ValueError(msg) + + self.parent.add_package(self) + + @staticmethod + def get_empty(ncells=0, nbdtim=1, structured=True, head=False): + # get an empty recarray that corresponds to dtype + dtype = ModflowFhb.get_default_dtype(nbdtim=nbdtim, + structured=structured, head=head) + return create_empty_recarray(ncells, dtype, default_value=-1.0E+10) + + @staticmethod + def get_default_dtype(nbdtim=1, structured=True, head=False): + if structured: + dtype = [("k", np.int), ("i", np.int), ("j", np.int)] + else: + dtype = [("node", np.int)] + dtype.append(("iaux", np.int)) + for n in range(nbdtim): + if head: + name = ("sbhed{}".format(n + 1)) + else: + name = ("flwrat{}".format(n + 1)) + dtype.append((name, np.float32)) + return np.dtype(dtype) + + def ncells(self): + # Return the maximum number of cells that have a fhb flow or + # head boundary. (developed for MT3DMS SSM package) + return self.nflw + self.nhed + + def write_file(self): + """ + Write the package file. + + Returns + ------- + None + + """ + nrow, ncol, nlay, nper = self.parent.nrow_ncol_nlay_nper + f = open(self.fn_path, 'w') + # f.write('{0:s}\n'.format(self.heading)) + + # Data set 1 + f.write('{} '.format(self.nbdtim)) + f.write('{} '.format(self.nflw)) + f.write('{} '.format(self.nhed)) + f.write('{} '.format(self.ifhbss)) + f.write('{} '.format(self.ipakcb)) + f.write('{} '.format(self.nfhbx1)) + f.write('{}\n'.format(self.nfhbx2)) + + # Dataset 2 - flow auxiliary names + + # Dataset 3 - head auxiliary names + + # Dataset 4a IFHBUN CNSTM IFHBPT + f.write('{} '.format(self.unit_number[0])) + f.write('{} '.format(self.bdtimecnstm)) + f.write('{}\n'.format(self.ifhbpt)) + + # Dataset 4b + for n in range(self.nbdtim): + f.write('{} '.format(self.bdtime[n])) + f.write('\n') + + # Dataset 5 and 6 + if self.nflw > 0: + # Dataset 5a IFHBUN CNSTM IFHBPT + f.write('{} '.format(self.unit_number[0])) + f.write('{} '.format(self.cnstm5)) + f.write('{}\n'.format(self.ifhbpt)) + + # Dataset 5b + for n in range(self.nflw): + for name in self.ds5.dtype.names: + v = self.ds5[n][name] + if name in ['k', 'i', 'j', 'node']: + v += 1 + f.write('{} '.format(v)) + f.write('\n') + + # Dataset 6a and 6b - flow auxiliary data + if self.nfhbx1 > 0: + i = 0 + + # Dataset 7 + if self.nhed > 0: + # Dataset 7a IFHBUN CNSTM IFHBPT + f.write('{} '.format(self.unit_number[0])) + f.write('{} '.format(self.cnstm7)) + f.write('{}\n'.format(self.ifhbpt)) + + # Dataset 7b IFHBUN CNSTM IFHBPT + for n in range(self.nhed): + for name in self.ds7.dtype.names: + v = self.ds7[n][name] + if name in ['k', 'i', 'j', 'node']: + v += 1 + f.write('{} '.format(v)) + f.write('\n') + + # Dataset 8a and 8b - head auxiliary data + if self.nfhbx2 > 0: + i = 1 + + f.close() + + @staticmethod + def load(f, model, nper=None, ext_unit_dict=None): + """ + Load an existing package. + + Parameters + ---------- + f : filename or file handle + File to load. + model : model object + The model object (of type :class:`flopy.modflow.mf.Modflow`) to + which this package will be added. + nper : int + The number of stress periods. If nper is None, then nper will be + obtained from the model object. (default is None). + ext_unit_dict : dictionary, optional + If the arrays in the file are specified using EXTERNAL, + or older style array control records, then `f` should be a file + handle. In this case ext_unit_dict is required, which can be + constructed using the function + :class:`flopy.utils.mfreadnam.parsenamefile`. + + Returns + ------- + fhb : ModflowFhb object + ModflowFhb object. + + Examples + -------- + + >>> import flopy + >>> m = flopy.modflow.Modflow() + >>> fhb = flopy.modflow.ModflowFhb.load('test.fhb', m) + + """ + if model.verbose: + sys.stdout.write('loading fhb package file...\n') + + openfile = not hasattr(f, 'read') + if openfile: + filename = f + f = open(filename, 'r') + + # determine package unit number + iufhb = None + if ext_unit_dict is not None: + iufhb, fname = \ + model.get_ext_dict_attr(ext_unit_dict, + filetype=ModflowFhb.ftype()) + + # Dataset 0 -- header + while True: + line = f.readline() + if line[0] != '#': + break + + # dataset 1 + if model.verbose: + sys.stdout.write('loading fhb dataset 1\n') + raw = line.strip().split() + nbdtim = int(raw[0]) + nflw = int(raw[1]) + nhed = int(raw[2]) + ifhbss = int(raw[3]) + ipakcb = int(raw[4]) + nfhbx1 = int(raw[5]) + nfhbx2 = int(raw[6]) + + ifhbpt = 0 + + # Dataset 2 + flow_aux = [] + if nfhbx1 > 0: + if model.verbose: + sys.stdout.write('loading fhb dataset 2\n') + msg = 'dataset 2 will not be preserved ' + \ + 'in the created hfb object.\n' + sys.stdout.write(msg) + for idx in range(nfhbx1): + line = f.readline() + raw = line.strip().split() + varnam = raw[0] + if len(varnam) > 16: + varnam = varnam[0:16] + weight = float(raw[1]) + flow_aux.append([varnam, weight]) + + # Dataset 3 + head_aux = [] + if nfhbx2 > 0: + if model.verbose: + sys.stdout.write('loading fhb dataset 3\n') + msg = 'dataset 3 will not be preserved ' + \ + 'in the created hfb object.\n' + sys.stdout.write(msg) + for idx in range(nfhbx2): + line = f.readline() + raw = line.strip().split() + varnam = raw[0] + if len(varnam) > 16: + varnam = varnam[0:16] + weight = float(raw[1]) + head_aux.append([varnam, weight]) + + # Dataset 4a IFHBUN CNSTM IFHBPT + if model.verbose: + sys.stdout.write('loading fhb dataset 4a\n') + line = f.readline() + raw = line.strip().split() + ifhbun = int(raw[0]) + if ifhbun != iufhb: + msg = 'fhb dataset 4a must be in the fhb file ' + msg += '(unit={}) '.format(iufhb) + msg += 'fhb data is specified in unit={}'.format(ifhbun) + raise ValueError(msg) + bdtimecnstm = float(raw[1]) + ifhbpt = max(ifhbpt, int(raw[2])) + + # Dataset 4b + if model.verbose: + sys.stdout.write('loading fhb dataset 4b\n') + line = f.readline() + raw = line.strip().split() + bdtime = [] + for n in range(nbdtim): + bdtime.append(float(raw[n])) + + # Dataset 5 and 6 + cnstm5 = None + ds5 = None + cnstm6 = None + ds6 = None + if nflw > 0: + if model.verbose: + sys.stdout.write('loading fhb dataset 5a\n') + # Dataset 5a IFHBUN CNSTM IFHBPT + line = f.readline() + raw = line.strip().split() + ifhbun = int(raw[0]) + if ifhbun != iufhb: + msg = 'fhb dataset 5a must be in the fhb file ' + msg += '(unit={}) '.format(iufhb) + msg += 'fhb data is specified in unit={}'.format(ifhbun) + raise ValueError(msg) + cnstm5 = float(raw[1]) + ifhbpt = max(ifhbpt, int(raw[2])) + + if model.verbose: + sys.stdout.write('loading fhb dataset 5b\n') + dtype = ModflowFhb.get_default_dtype(nbdtim=nbdtim, head=False, + structured=model.structured) + ds5 = ModflowFhb.get_empty(ncells=nflw, nbdtim=nbdtim, head=False, + structured=model.structured) + for n in range(nflw): + line = f.readline() + raw = line.strip().split() + ds5[n] = tuple(raw[:len(dtype.names)]) + + if model.structured: + ds5['k'] -= 1 + ds5['i'] -= 1 + ds5['j'] -= 1 + else: + ds5['node'] -= 1 + + # Dataset 6 + if nfhbx1 > 0: + cnstm6 = [] + ds6 = [] + dtype = [] + for name, weight in flow_aux: + dtype.append((name, np.float32)) + for naux in range(nfhbx1): + if model.verbose: + sys.stdout.write('loading fhb dataset 6a - aux ' + + '{}\n'.format(naux + 1)) + msg = 'dataset 6a will not be preserved in ' + \ + 'the created hfb object.\n' + sys.stdout.write(msg) + # Dataset 6a IFHBUN CNSTM IFHBPT + line = f.readline() + raw = line.strip().split() + ifhbun = int(raw[0]) + if ifhbun != iufhb: + msg = 'fhb dataset 6a must be in the fhb file ' + msg += '(unit={}) '.format(iufhb) + msg += 'fhb data is specified in ' + \ + 'unit={}'.format(ifhbun) + raise ValueError(msg) + cnstm6.append(float(raw[1])) + ifhbpt = max(ifhbpt, int(raw[2])) + + if model.verbose: + sys.stdout.write('loading fhb dataset 6b - aux ' + + '{}\n'.format(naux + 1)) + msg = 'dataset 6b will not be preserved in ' + \ + 'the created hfb object.\n' + sys.stdout.write(msg) + current = np.recarray(nflw, dtype=dtype) + for n in range(nflw): + line = f.readline() + raw = line.strip().split() + current[n] = tuple(raw[:len(dtype.names)]) + ds6.append(current.copy()) + + # Dataset 7 + cnstm7 = None + ds7 = None + cnstm8 = None + ds8 = None + if nhed > 0: + if model.verbose: + sys.stdout.write('loading fhb dataset 7a\n') + # Dataset 7a IFHBUN CNSTM IFHBPT + line = f.readline() + raw = line.strip().split() + ifhbun = int(raw[0]) + if ifhbun != iufhb: + msg = 'fhb dataset 7a must be in the fhb file ' + msg += '(unit={}) '.format(iufhb) + msg += 'fhb data is specified in unit={}'.format(ifhbun) + raise ValueError(msg) + cnstm7 = float(raw[1]) + ifhbpt = max(ifhbpt, int(raw[2])) + + if model.verbose: + sys.stdout.write('loading fhb dataset 7b\n') + dtype = ModflowFhb.get_default_dtype(nbdtim=nbdtim, head=True, + structured=model.structured) + ds7 = ModflowFhb.get_empty(ncells=nhed, nbdtim=nbdtim, head=True, + structured=model.structured) + for n in range(nhed): + line = f.readline() + raw = line.strip().split() + ds7[n] = tuple(raw[:len(dtype.names)]) + + if model.structured: + ds7['k'] -= 1 + ds7['i'] -= 1 + ds7['j'] -= 1 + else: + ds7['node'] -= 1 + + # Dataset 8 + if nfhbx2 > 0: + cnstm8 = [] + ds8 = [] + dtype = [] + for name, weight in head_aux: + dtype.append((name, np.float32)) + for naux in range(nfhbx1): + if model.verbose: + sys.stdout.write('loading fhb dataset 8a - aux ' + + '{}\n'.format(naux + 1)) + msg = 'dataset 8a will not be preserved in ' + \ + 'the created hfb object.\n' + sys.stdout.write(msg) + # Dataset 6a IFHBUN CNSTM IFHBPT + line = f.readline() + raw = line.strip().split() + ifhbun = int(raw[0]) + if ifhbun != iufhb: + msg = 'fhb dataset 8a must be in the fhb file ' + msg += '(unit={}) '.format(iufhb) + msg += 'fhb data is specified in ' + \ + 'unit={}'.format(ifhbun) + raise ValueError(msg) + cnstm8.append(float(raw[1])) + ifhbpt6 = int(raw[2]) + ifhbpt = max(ifhbpt, ifhbpt6) + + if model.verbose: + sys.stdout.write('loading fhb dataset 8b - aux ' + + '{}\n'.format(naux + 1)) + msg = 'dataset 8b will not be preserved in ' + \ + 'the created hfb object.' + sys.stdout.write(msg) + current = np.recarray(nflw, dtype=dtype) + for n in range(nhed): + line = f.readline() + raw = line.strip().split() + current[n] = tuple(raw[:len(dtype.names)]) + ds8.append(current.copy()) + + if openfile: + f.close() + + # determine specified unit number + unitnumber = None + filenames = [None, None] + if ext_unit_dict is not None: + unitnumber, filenames[0] = \ + model.get_ext_dict_attr(ext_unit_dict, + filetype=ModflowFhb.ftype()) + if ipakcb > 0: + iu, filenames[1] = \ + model.get_ext_dict_attr(ext_unit_dict, unit=ipakcb) + model.add_pop_key_list(ipakcb) + + # auxiliary data are not passed to load instantiation + nfhbx1 = 0 + nfhbx2 = 0 + + fhb = ModflowFhb(model, nbdtim=nbdtim, nflw=nflw, nhed=nhed, + ifhbss=ifhbss, ipakcb=ipakcb, + nfhbx1=nfhbx1, nfhbx2=nfhbx2, ifhbpt=ifhbpt, + bdtimecnstm=bdtimecnstm, bdtime=bdtime, + cnstm5=cnstm5, ds5=ds5, cnstm7=cnstm7, ds7=ds7, + unitnumber=unitnumber, filenames=filenames) + + # return fhb object + return fhb + + @staticmethod + def ftype(): + return 'FHB' + + @staticmethod + def defaultunit(): + return 40 diff --git a/flopy/modflow/mfflwob.py b/flopy/modflow/mfflwob.py index b2c4417476..d30f83ab83 100755 --- a/flopy/modflow/mfflwob.py +++ b/flopy/modflow/mfflwob.py @@ -1,580 +1,580 @@ -import os -import sys -import numpy as np -from ..pakbase import Package -from ..utils import parsenamefile - - -class ModflowFlwob(Package): - """ - Head-dependent flow boundary Observation package class. Minimal working - example that will be refactored in a future version. - - Parameters - ---------- - nqfb : int - Number of cell groups for the head-dependent flow boundary - observations - nqcfb : int - Greater than or equal to the total number of cells in all cell groups - nqtfb : int - Total number of head-dependent flow boundary observations for all cell - groups - iufbobsv : int - unit number where output is saved - tomultfb : float - Time-offset multiplier for head-dependent flow boundary observations. - The product of tomultfb and toffset must produce a time value in units - consistent with other model input. tomultfb can be dimensionless or - can be used to convert the units of toffset to the time unit used in - the simulation. - nqobfb : int list of length nqfb - The number of times at which flows are observed for the group of cells - nqclfb : int list of length nqfb - Is a flag, and the absolute value of nqclfb is the number of cells in - the group. If nqclfb is less than zero, factor = 1.0 for all cells in - the group. - obsnam : string list of length nqtfb - Observation name - irefsp : int of length nqtfb - The zero-based stress period to which the observation time is - referenced. - The reference point is the beginning of the specified stress period. - toffset : float list of length nqtfb - Is the time from the beginning of the stress period irefsp to the time - of the observation. toffset must be in units such that the product of - toffset and tomultfb are consistent with other model input. For - steady state observations, specify irefsp as the steady state stress - period and toffset less than or equal to perlen of the stress period. - If perlen is zero, set toffset to zero. If the observation falls - within a time step, linearly interpolation is used between values at - the beginning and end of the time step. - flwobs : float list of length nqtfb - Observed flow value from the head-dependent flow boundary into the - aquifer (+) or the flow from the aquifer into the boundary (-) - layer : int list of length(nqfb, nqclfb) - The zero-based layer index for the cell included in the cell group. - row : int list of length(nqfb, nqclfb) - The zero-based row index for the cell included in the cell group. - column : int list of length(nqfb, nqclfb) - The zero-based column index of the cell included in the cell group. - factor : float list of length(nqfb, nqclfb) - Is the portion of the simulated gain or loss in the cell that is - included in the total gain or loss for this cell group (fn of eq. 5). - flowtype : string - String that corresponds to the head-dependent flow boundary condition - type (CHD, GHB, DRN, RIV) - extension : list of string - Filename extension. If extension is None, extension is set to - ['chob','obc','gbob','obg','drob','obd', 'rvob','obr'] - (default is None). - no_print : boolean - When True or 1, a list of flow observations will not be - written to the Listing File (default is False) - options : list of strings - Package options (default is None). - unitnumber : list of int - File unit number. If unitnumber is None, unitnumber is set to - [40, 140, 41, 141, 42, 142, 43, 143] (default is None). - filenames : str or list of str - Filenames to use for the package and the output files. If - filenames=None the package name will be created using the model name - and package extension and the flwob output name will be created using - the model name and .out extension (for example, - modflowtest.out), if iufbobsv is a number greater than zero. - If a single string is passed the package will be set to the string - and flwob output name will be created using the model name and .out - extension, if iufbobsv is a number greater than zero. To define the - names for all package files (input and output) the length of the list - of strings should be 2. Default is None. - - - Attributes - ---------- - - Methods - ------- - - See Also - -------- - - Notes - ----- - This represents a minimal working example that will be refactored in a - future version. - - """ - - def __init__(self, model, nqfb=0, nqcfb=0, nqtfb=0, iufbobsv=0, - tomultfb=1.0, nqobfb=None, nqclfb=None, obsnam=None, - irefsp=None, toffset=None, flwobs=None, layer=None, - row=None, column=None, factor=None, flowtype=None, - extension=None, no_print=False, options=None, - filenames=None, unitnumber=None): - - """ - Package constructor - """ - if nqobfb is None: - nqobfb = [] - if nqclfb is None: - nqclfb = [] - if obsnam is None: - obsnam = [] - if irefsp is None: - irefsp = [] - if toffset is None: - toffset = [] - if flwobs is None: - flwobs = [] - if layer is None: - layer = [] - if row is None: - row = [] - if column is None: - column = [] - if factor is None: - factor = [] - if extension is None: - extension = ['chob', 'obc', 'gbob', 'obg', 'drob', 'obd', - 'rvob', 'obr'] - pakunits = {'chob': 40, - 'gbob': 41, - 'drob': 42, - 'rvob': 43} - outunits = {'chob': 140, - 'gbob': 141, - 'drob': 142, - 'rvob': 143} - # if unitnumber is None: - # unitnumber = [40, 140, 41, 141, 42, 142, 43, 143] - - if flowtype.upper().strip() == 'CHD': - name = ['CHOB', 'DATA'] - extension = extension[0:2] - # unitnumber = unitnumber[0:2] - # iufbobsv = unitnumber[1] - self._ftype = 'CHOB' - self.url = 'chob.htm' - self.heading = '# CHOB for MODFLOW, generated by Flopy.' - elif flowtype.upper().strip() == 'GHB': - name = ['GBOB', 'DATA'] - extension = extension[2:4] - # unitnumber = unitnumber[2:4] - # iufbobsv = unitnumber[1] - self._ftype = 'GBOB' - self.url = 'gbob.htm' - self.heading = '# GBOB for MODFLOW, generated by Flopy.' - elif flowtype.upper().strip() == 'DRN': - name = ['DROB', 'DATA'] - extension = extension[4:6] - # unitnumber = unitnumber[4:6] - # iufbobsv = unitnumber[1] - self._ftype = 'DROB' - self.url = 'drob.htm' - self.heading = '# DROB for MODFLOW, generated by Flopy.' - elif flowtype.upper().strip() == 'RIV': - name = ['RVOB', 'DATA'] - extension = extension[6:8] - # unitnumber = unitnumber[6:8] - # iufbobsv = unitnumber[1] - self._ftype = 'RVOB' - self.url = 'rvob.htm' - self.heading = '# RVOB for MODFLOW, generated by Flopy.' - else: - msg = 'ModflowFlwob: flowtype must be CHD, GHB, DRN, or RIV' - raise KeyError(msg) - - if unitnumber is None: - unitnumber = [pakunits[name[0].lower()], - outunits[name[0].lower()]] - elif isinstance(unitnumber, int): - unitnumber = [unitnumber] - if len(unitnumber) == 1: - if unitnumber[0] in outunits.keys(): - unitnumber = [pakunits[name[0].lower()], - unitnumber[0]] - else: - unitnumber = [unitnumber[0], - outunits[name[0].lower()]] - iufbobsv = unitnumber[1] - - # set filenames - if filenames is None: - filenames = [None, None] - elif isinstance(filenames, str): - filenames = [filenames, None] - elif isinstance(filenames, list): - if len(filenames) < 2: - filenames.append(None) - - # call base package constructor - Package.__init__(self, model, extension=extension, name=name, - unit_number=unitnumber, - allowDuplicates=True, filenames=filenames) - - self.nqfb = nqfb - self.nqcfb = nqcfb - self.nqtfb = nqtfb - self.iufbobsv = iufbobsv - self.tomultfb = tomultfb - self.nqobfb = nqobfb - self.nqclfb = nqclfb - self.obsnam = obsnam - self.irefsp = irefsp - self.toffset = toffset - self.flwobs = flwobs - self.layer = layer - self.row = row - self.column = column - self.factor = factor - - # -create empty arrays of the correct size - self.layer = np.zeros((self.nqfb, max(np.abs(self.nqclfb))), - dtype='int32') - self.row = np.zeros((self.nqfb, max(np.abs(self.nqclfb))), - dtype='int32') - self.column = np.zeros((self.nqfb, max(np.abs(self.nqclfb))), - dtype='int32') - self.factor = np.zeros((self.nqfb, max(np.abs(self.nqclfb))), - dtype='float32') - self.nqobfb = np.zeros((self.nqfb), dtype='int32') - self.nqclfb = np.zeros((self.nqfb), dtype='int32') - self.irefsp = np.zeros((self.nqtfb), dtype='int32') - self.toffset = np.zeros((self.nqtfb), dtype='float32') - self.flwobs = np.zeros((self.nqtfb), dtype='float32') - - # -assign values to arrays - - self.nqobfb[:] = nqobfb - self.nqclfb[:] = nqclfb - self.obsnam[:] = obsnam - self.irefsp[:] = irefsp - self.toffset[:] = toffset - self.flwobs[:] = flwobs - for i in range(self.nqfb): - self.layer[i, :len(layer[i])] = layer[i] - self.row[i, :len(row[i])] = row[i] - self.column[i, :len(column[i])] = column[i] - self.factor[i, :len(factor[i])] = factor[i] - - # add more checks here - - self.no_print = no_print - self.np = 0 - if options is None: - options = [] - if self.no_print: - options.append('NOPRINT') - self.options = options - - # add checks for input compliance (obsnam length, etc.) - self.parent.add_package(self) - - def ftype(self): - return self._ftype - - def write_file(self): - """ - Write the package file - - Returns - ------- - None - - """ - # open file for writing - f_fbob = open(self.fn_path, 'w') - - # write header - f_fbob.write('{}\n'.format(self.heading)) - - # write sections 1 and 2 : NOTE- what about NOPRINT? - line = '{:10d}'.format(self.nqfb) - line += '{:10d}'.format(self.nqcfb) - line += '{:10d}'.format(self.nqtfb) - line += '{:10d}'.format(self.iufbobsv) - if self.no_print or 'NOPRINT' in self.options: - line += '{: >10}'.format('NOPRINT') - line += '\n' - f_fbob.write(line) - f_fbob.write('{:10e}\n'.format(self.tomultfb)) - - # write sections 3-5 looping through observations groups - c = 0 - for i in range(self.nqfb): - # while (i < self.nqfb): - # write section 3 - f_fbob.write('{:10d}{:10d}\n'.format(self.nqobfb[i], - self.nqclfb[i])) - - # Loop through observation times for the groups - for j in range(self.nqobfb[i]): - # write section 4 - line = '{:12}'.format(self.obsnam[c]) - line += '{:8d}'.format(self.irefsp[c] + 1) - line += '{:16.10g}'.format(self.toffset[c]) - line += ' {:10.4g}\n'.format(self.flwobs[c]) - f_fbob.write(line) - c += 1 # index variable - - # write section 5 - NOTE- need to adjust factor for multiple - # observations in the same cell - for j in range(abs(self.nqclfb[i])): - # set factor to 1.0 for all cells in group - if self.nqclfb[i] < 0: - self.factor[i, :] = 1.0 - line = '{:10d}'.format(self.layer[i, j] + 1) - line += '{:10d}'.format(self.row[i, j] + 1) - line += '{:10d}'.format(self.column[i, j] + 1) - line += ' '.format(self.factor[i, j]) - # note is 10f good enough here? - line += '{:10f}\n'.format(self.factor[i, j]) - f_fbob.write(line) - - f_fbob.close() - - # - # swm: BEGIN hack for writing standard file - sfname = self.fn_path - sfname += '_ins' - - # write header - f_ins = open(sfname, 'w') - f_ins.write('jif @\n') - f_ins.write('StandardFile 0 1 {}\n'.format(self.nqtfb)) - for i in range(0, self.nqtfb): - f_ins.write('{}\n'.format(self.obsnam[i])) - - f_ins.close() - # swm: END hack for writing standard file - - return - - @staticmethod - def load(f, model, ext_unit_dict=None, check=True): - """ - Load an existing package. - - Parameters - ---------- - f : filename or file handle - File to load. - model : model object - The model object (of type :class:`flopy.modflow.mf.Modflow`) to - which this package will be added. - ext_unit_dict : dictionary, optional - If the arrays in the file are specified using EXTERNAL, - or older style array control records, then `f` should be a file - handle. In this case ext_unit_dict is required, which can be - constructed using the function - :class:`flopy.utils.mfreadnam.parsenamefile`. - check : boolean - Check package data for common errors. (default True) - - Returns - ------- - flwob : ModflowFlwob package object - ModflowFlwob package object. - - Examples - -------- - - >>> import flopy - >>> m = flopy.modflow.Modflow() - >>> hobs = flopy.modflow.ModflowFlwob.load('test.drob', m) - - """ - - if model.verbose: - sys.stdout.write('loading flwob package file...\n') - - openfile = not hasattr(f, 'read') - if openfile: - filename = f - f = open(filename, 'r') - - # dataset 0 -- header - while True: - line = f.readline() - if line[0] != '#': - break - - # read dataset 1 -- NQFB NQCFB NQTFB IUFBOBSV Options - t = line.strip().split() - nqfb = int(t[0]) - nqcfb = int(t[1]) - nqtfb = int(t[2]) - iufbobsv = int(t[3]) - options = [] - if len(t) > 4: - options = t[4:] - - # read dataset 2 -- TOMULTFB - line = f.readline() - t = line.strip().split() - tomultfb = float(t[0]) - - nqobfb = np.zeros(nqfb, dtype=np.int32) - nqclfb = np.zeros(nqfb, dtype=np.int32) - obsnam = [] - irefsp = [] - toffset = [] - flwobs = [] - - layer = [] - row = [] - column = [] - factor = [] - - # read datasets 3, 4, and 5 for each of nqfb groups - # of cells - nobs = 0 - while True: - - # read dataset 3 -- NQOBFB NQCLFB - line = f.readline() - t = line.strip().split() - nqobfb[nobs] = int(t[0]) - nqclfb[nobs] = int(t[1]) - - # read dataset 4 -- OBSNAM IREFSP TOFFSET FLWOBS - ntimes = 0 - while True: - line = f.readline() - t = line.strip().split() - obsnam.append(t[0]) - irefsp.append(int(t[1])) - toffset.append(float(t[2])) - flwobs.append(float(t[3])) - ntimes += 1 - if ntimes == nqobfb[nobs]: - break - - # read dataset 5 -- Layer Row Column Factor - k = np.zeros(abs(nqclfb[nobs]), np.int32) - i = np.zeros(abs(nqclfb[nobs]), np.int32) - j = np.zeros(abs(nqclfb[nobs]), np.int32) - fac = np.zeros(abs(nqclfb[nobs]), np.float32) - - ncells = 0 - while True: - line = f.readline() - t = line.strip().split() - k[ncells] = int(t[0]) - i[ncells] = int(t[1]) - j[ncells] = int(t[2]) - fac[ncells] = float(t[3]) - - ncells += 1 - if ncells == abs(nqclfb[nobs]): - layer.append(k) - row.append(i) - column.append(j) - factor.append(fac) - break - - nobs += 1 - if nobs == nqfb: - break - - irefsp = np.array(irefsp) - 1 - layer = np.array(layer) - 1 - row = np.array(row) - 1 - column = np.array(column) - 1 - factor = np.array(factor) - - if openfile: - f.close() - - # get ext_unit_dict if none passed - if ext_unit_dict is None: - namefile = os.path.join(model.model_ws, model.namefile) - ext_unit_dict = parsenamefile(namefile, model.mfnam_packages) - - flowtype, ftype = _get_ftype_from_filename(f.name, ext_unit_dict) - - # set package unit number - unitnumber = None - filenames = [None, None] - if ext_unit_dict is not None: - unitnumber, filenames[0] = \ - model.get_ext_dict_attr(ext_unit_dict, - filetype=ftype.upper()) - if iufbobsv > 0: - iu, filenames[1] = \ - model.get_ext_dict_attr(ext_unit_dict, unit=iufbobsv) - model.add_pop_key_list(iufbobsv) - - # create ModflowFlwob object instance - flwob = ModflowFlwob(model, iufbobsv=iufbobsv, tomultfb=tomultfb, - nqfb=nqfb, nqcfb=nqcfb, - nqtfb=nqtfb, nqobfb=nqobfb, nqclfb=nqclfb, - obsnam=obsnam, irefsp=irefsp, toffset=toffset, - flwobs=flwobs, layer=layer, row=row, - column=column, factor=factor, options=options, - flowtype=flowtype, unitnumber=unitnumber, - filenames=filenames) - - return flwob - - -def _get_ftype_from_filename(fn, ext_unit_dict=None): - """ - Returns the boundary flowtype and filetype for a given ModflowFlwob - package filename. - - Parameters - ---------- - fn : str - The filename to be parsed. - ext_unit_dict : dictionary, optional - If the arrays in the file are specified using EXTERNAL, - or older style array control records, then `f` should be a file - handle. In this case ext_unit_dict is required, which can be - constructed using the function - :class:`flopy.utils.mfreadnam.parsenamefile`. - - Returns - ------- - flowtype : str - Corresponds to the type of the head-dependent boundary package for - which observations are desired (e.g. "CHD", "GHB", "DRN", or "RIV"). - ftype : str - Corresponds to the observation file type (e.g. "CHOB", "GBOB", - "DROB", or "RVOB"). - """ - - ftype = None - - # determine filetype from filename using ext_unit_dict - if ext_unit_dict is not None: - for key, value in ext_unit_dict.items(): - if value.filename == fn: - ftype = value.filetype - break - - # else, try to infer filetype from filename extension - else: - ext = fn.split('.')[-1].lower() - if 'ch' in ext.lower(): - ftype = 'CHOB' - elif 'gb' in ext.lower(): - ftype = 'GBOB' - elif 'dr' in ext.lower(): - ftype = 'DROB' - elif 'rv' in ext.lower(): - ftype = 'RVOB' - - msg = 'ModflowFlwob: filetype cannot be inferred ' \ - 'from file name {}'.format(fn) - if ftype is None: - raise AssertionError(msg) - - flowtype_dict = {'CHOB': 'CHD', - 'GOBO': 'GHB', - 'DROB': 'DRN', - 'RVOB': 'RIV'} - flowtype = flowtype_dict[ftype] - - return flowtype, ftype +import os +import sys +import numpy as np +from ..pakbase import Package +from ..utils import parsenamefile + + +class ModflowFlwob(Package): + """ + Head-dependent flow boundary Observation package class. Minimal working + example that will be refactored in a future version. + + Parameters + ---------- + nqfb : int + Number of cell groups for the head-dependent flow boundary + observations + nqcfb : int + Greater than or equal to the total number of cells in all cell groups + nqtfb : int + Total number of head-dependent flow boundary observations for all cell + groups + iufbobsv : int + unit number where output is saved + tomultfb : float + Time-offset multiplier for head-dependent flow boundary observations. + The product of tomultfb and toffset must produce a time value in units + consistent with other model input. tomultfb can be dimensionless or + can be used to convert the units of toffset to the time unit used in + the simulation. + nqobfb : int list of length nqfb + The number of times at which flows are observed for the group of cells + nqclfb : int list of length nqfb + Is a flag, and the absolute value of nqclfb is the number of cells in + the group. If nqclfb is less than zero, factor = 1.0 for all cells in + the group. + obsnam : string list of length nqtfb + Observation name + irefsp : int of length nqtfb + The zero-based stress period to which the observation time is + referenced. + The reference point is the beginning of the specified stress period. + toffset : float list of length nqtfb + Is the time from the beginning of the stress period irefsp to the time + of the observation. toffset must be in units such that the product of + toffset and tomultfb are consistent with other model input. For + steady state observations, specify irefsp as the steady state stress + period and toffset less than or equal to perlen of the stress period. + If perlen is zero, set toffset to zero. If the observation falls + within a time step, linearly interpolation is used between values at + the beginning and end of the time step. + flwobs : float list of length nqtfb + Observed flow value from the head-dependent flow boundary into the + aquifer (+) or the flow from the aquifer into the boundary (-) + layer : int list of length(nqfb, nqclfb) + The zero-based layer index for the cell included in the cell group. + row : int list of length(nqfb, nqclfb) + The zero-based row index for the cell included in the cell group. + column : int list of length(nqfb, nqclfb) + The zero-based column index of the cell included in the cell group. + factor : float list of length(nqfb, nqclfb) + Is the portion of the simulated gain or loss in the cell that is + included in the total gain or loss for this cell group (fn of eq. 5). + flowtype : string + String that corresponds to the head-dependent flow boundary condition + type (CHD, GHB, DRN, RIV) + extension : list of string + Filename extension. If extension is None, extension is set to + ['chob','obc','gbob','obg','drob','obd', 'rvob','obr'] + (default is None). + no_print : boolean + When True or 1, a list of flow observations will not be + written to the Listing File (default is False) + options : list of strings + Package options (default is None). + unitnumber : list of int + File unit number. If unitnumber is None, unitnumber is set to + [40, 140, 41, 141, 42, 142, 43, 143] (default is None). + filenames : str or list of str + Filenames to use for the package and the output files. If + filenames=None the package name will be created using the model name + and package extension and the flwob output name will be created using + the model name and .out extension (for example, + modflowtest.out), if iufbobsv is a number greater than zero. + If a single string is passed the package will be set to the string + and flwob output name will be created using the model name and .out + extension, if iufbobsv is a number greater than zero. To define the + names for all package files (input and output) the length of the list + of strings should be 2. Default is None. + + + Attributes + ---------- + + Methods + ------- + + See Also + -------- + + Notes + ----- + This represents a minimal working example that will be refactored in a + future version. + + """ + + def __init__(self, model, nqfb=0, nqcfb=0, nqtfb=0, iufbobsv=0, + tomultfb=1.0, nqobfb=None, nqclfb=None, obsnam=None, + irefsp=None, toffset=None, flwobs=None, layer=None, + row=None, column=None, factor=None, flowtype=None, + extension=None, no_print=False, options=None, + filenames=None, unitnumber=None): + + """ + Package constructor + """ + if nqobfb is None: + nqobfb = [] + if nqclfb is None: + nqclfb = [] + if obsnam is None: + obsnam = [] + if irefsp is None: + irefsp = [] + if toffset is None: + toffset = [] + if flwobs is None: + flwobs = [] + if layer is None: + layer = [] + if row is None: + row = [] + if column is None: + column = [] + if factor is None: + factor = [] + if extension is None: + extension = ['chob', 'obc', 'gbob', 'obg', 'drob', 'obd', + 'rvob', 'obr'] + pakunits = {'chob': 40, + 'gbob': 41, + 'drob': 42, + 'rvob': 43} + outunits = {'chob': 140, + 'gbob': 141, + 'drob': 142, + 'rvob': 143} + # if unitnumber is None: + # unitnumber = [40, 140, 41, 141, 42, 142, 43, 143] + + if flowtype.upper().strip() == 'CHD': + name = ['CHOB', 'DATA'] + extension = extension[0:2] + # unitnumber = unitnumber[0:2] + # iufbobsv = unitnumber[1] + self._ftype = 'CHOB' + self.url = 'chob.htm' + self.heading = '# CHOB for MODFLOW, generated by Flopy.' + elif flowtype.upper().strip() == 'GHB': + name = ['GBOB', 'DATA'] + extension = extension[2:4] + # unitnumber = unitnumber[2:4] + # iufbobsv = unitnumber[1] + self._ftype = 'GBOB' + self.url = 'gbob.htm' + self.heading = '# GBOB for MODFLOW, generated by Flopy.' + elif flowtype.upper().strip() == 'DRN': + name = ['DROB', 'DATA'] + extension = extension[4:6] + # unitnumber = unitnumber[4:6] + # iufbobsv = unitnumber[1] + self._ftype = 'DROB' + self.url = 'drob.htm' + self.heading = '# DROB for MODFLOW, generated by Flopy.' + elif flowtype.upper().strip() == 'RIV': + name = ['RVOB', 'DATA'] + extension = extension[6:8] + # unitnumber = unitnumber[6:8] + # iufbobsv = unitnumber[1] + self._ftype = 'RVOB' + self.url = 'rvob.htm' + self.heading = '# RVOB for MODFLOW, generated by Flopy.' + else: + msg = 'ModflowFlwob: flowtype must be CHD, GHB, DRN, or RIV' + raise KeyError(msg) + + if unitnumber is None: + unitnumber = [pakunits[name[0].lower()], + outunits[name[0].lower()]] + elif isinstance(unitnumber, int): + unitnumber = [unitnumber] + if len(unitnumber) == 1: + if unitnumber[0] in outunits.keys(): + unitnumber = [pakunits[name[0].lower()], + unitnumber[0]] + else: + unitnumber = [unitnumber[0], + outunits[name[0].lower()]] + iufbobsv = unitnumber[1] + + # set filenames + if filenames is None: + filenames = [None, None] + elif isinstance(filenames, str): + filenames = [filenames, None] + elif isinstance(filenames, list): + if len(filenames) < 2: + filenames.append(None) + + # call base package constructor + Package.__init__(self, model, extension=extension, name=name, + unit_number=unitnumber, + allowDuplicates=True, filenames=filenames) + + self.nqfb = nqfb + self.nqcfb = nqcfb + self.nqtfb = nqtfb + self.iufbobsv = iufbobsv + self.tomultfb = tomultfb + self.nqobfb = nqobfb + self.nqclfb = nqclfb + self.obsnam = obsnam + self.irefsp = irefsp + self.toffset = toffset + self.flwobs = flwobs + self.layer = layer + self.row = row + self.column = column + self.factor = factor + + # -create empty arrays of the correct size + self.layer = np.zeros((self.nqfb, max(np.abs(self.nqclfb))), + dtype='int32') + self.row = np.zeros((self.nqfb, max(np.abs(self.nqclfb))), + dtype='int32') + self.column = np.zeros((self.nqfb, max(np.abs(self.nqclfb))), + dtype='int32') + self.factor = np.zeros((self.nqfb, max(np.abs(self.nqclfb))), + dtype='float32') + self.nqobfb = np.zeros((self.nqfb), dtype='int32') + self.nqclfb = np.zeros((self.nqfb), dtype='int32') + self.irefsp = np.zeros((self.nqtfb), dtype='int32') + self.toffset = np.zeros((self.nqtfb), dtype='float32') + self.flwobs = np.zeros((self.nqtfb), dtype='float32') + + # -assign values to arrays + + self.nqobfb[:] = nqobfb + self.nqclfb[:] = nqclfb + self.obsnam[:] = obsnam + self.irefsp[:] = irefsp + self.toffset[:] = toffset + self.flwobs[:] = flwobs + for i in range(self.nqfb): + self.layer[i, :len(layer[i])] = layer[i] + self.row[i, :len(row[i])] = row[i] + self.column[i, :len(column[i])] = column[i] + self.factor[i, :len(factor[i])] = factor[i] + + # add more checks here + + self.no_print = no_print + self.np = 0 + if options is None: + options = [] + if self.no_print: + options.append('NOPRINT') + self.options = options + + # add checks for input compliance (obsnam length, etc.) + self.parent.add_package(self) + + def ftype(self): + return self._ftype + + def write_file(self): + """ + Write the package file + + Returns + ------- + None + + """ + # open file for writing + f_fbob = open(self.fn_path, 'w') + + # write header + f_fbob.write('{}\n'.format(self.heading)) + + # write sections 1 and 2 : NOTE- what about NOPRINT? + line = '{:10d}'.format(self.nqfb) + line += '{:10d}'.format(self.nqcfb) + line += '{:10d}'.format(self.nqtfb) + line += '{:10d}'.format(self.iufbobsv) + if self.no_print or 'NOPRINT' in self.options: + line += '{: >10}'.format('NOPRINT') + line += '\n' + f_fbob.write(line) + f_fbob.write('{:10e}\n'.format(self.tomultfb)) + + # write sections 3-5 looping through observations groups + c = 0 + for i in range(self.nqfb): + # while (i < self.nqfb): + # write section 3 + f_fbob.write('{:10d}{:10d}\n'.format(self.nqobfb[i], + self.nqclfb[i])) + + # Loop through observation times for the groups + for j in range(self.nqobfb[i]): + # write section 4 + line = '{:12}'.format(self.obsnam[c]) + line += '{:8d}'.format(self.irefsp[c] + 1) + line += '{:16.10g}'.format(self.toffset[c]) + line += ' {:10.4g}\n'.format(self.flwobs[c]) + f_fbob.write(line) + c += 1 # index variable + + # write section 5 - NOTE- need to adjust factor for multiple + # observations in the same cell + for j in range(abs(self.nqclfb[i])): + # set factor to 1.0 for all cells in group + if self.nqclfb[i] < 0: + self.factor[i, :] = 1.0 + line = '{:10d}'.format(self.layer[i, j] + 1) + line += '{:10d}'.format(self.row[i, j] + 1) + line += '{:10d}'.format(self.column[i, j] + 1) + line += ' '.format(self.factor[i, j]) + # note is 10f good enough here? + line += '{:10f}\n'.format(self.factor[i, j]) + f_fbob.write(line) + + f_fbob.close() + + # + # swm: BEGIN hack for writing standard file + sfname = self.fn_path + sfname += '_ins' + + # write header + f_ins = open(sfname, 'w') + f_ins.write('jif @\n') + f_ins.write('StandardFile 0 1 {}\n'.format(self.nqtfb)) + for i in range(0, self.nqtfb): + f_ins.write('{}\n'.format(self.obsnam[i])) + + f_ins.close() + # swm: END hack for writing standard file + + return + + @staticmethod + def load(f, model, ext_unit_dict=None, check=True): + """ + Load an existing package. + + Parameters + ---------- + f : filename or file handle + File to load. + model : model object + The model object (of type :class:`flopy.modflow.mf.Modflow`) to + which this package will be added. + ext_unit_dict : dictionary, optional + If the arrays in the file are specified using EXTERNAL, + or older style array control records, then `f` should be a file + handle. In this case ext_unit_dict is required, which can be + constructed using the function + :class:`flopy.utils.mfreadnam.parsenamefile`. + check : boolean + Check package data for common errors. (default True) + + Returns + ------- + flwob : ModflowFlwob package object + ModflowFlwob package object. + + Examples + -------- + + >>> import flopy + >>> m = flopy.modflow.Modflow() + >>> hobs = flopy.modflow.ModflowFlwob.load('test.drob', m) + + """ + + if model.verbose: + sys.stdout.write('loading flwob package file...\n') + + openfile = not hasattr(f, 'read') + if openfile: + filename = f + f = open(filename, 'r') + + # dataset 0 -- header + while True: + line = f.readline() + if line[0] != '#': + break + + # read dataset 1 -- NQFB NQCFB NQTFB IUFBOBSV Options + t = line.strip().split() + nqfb = int(t[0]) + nqcfb = int(t[1]) + nqtfb = int(t[2]) + iufbobsv = int(t[3]) + options = [] + if len(t) > 4: + options = t[4:] + + # read dataset 2 -- TOMULTFB + line = f.readline() + t = line.strip().split() + tomultfb = float(t[0]) + + nqobfb = np.zeros(nqfb, dtype=np.int32) + nqclfb = np.zeros(nqfb, dtype=np.int32) + obsnam = [] + irefsp = [] + toffset = [] + flwobs = [] + + layer = [] + row = [] + column = [] + factor = [] + + # read datasets 3, 4, and 5 for each of nqfb groups + # of cells + nobs = 0 + while True: + + # read dataset 3 -- NQOBFB NQCLFB + line = f.readline() + t = line.strip().split() + nqobfb[nobs] = int(t[0]) + nqclfb[nobs] = int(t[1]) + + # read dataset 4 -- OBSNAM IREFSP TOFFSET FLWOBS + ntimes = 0 + while True: + line = f.readline() + t = line.strip().split() + obsnam.append(t[0]) + irefsp.append(int(t[1])) + toffset.append(float(t[2])) + flwobs.append(float(t[3])) + ntimes += 1 + if ntimes == nqobfb[nobs]: + break + + # read dataset 5 -- Layer Row Column Factor + k = np.zeros(abs(nqclfb[nobs]), np.int32) + i = np.zeros(abs(nqclfb[nobs]), np.int32) + j = np.zeros(abs(nqclfb[nobs]), np.int32) + fac = np.zeros(abs(nqclfb[nobs]), np.float32) + + ncells = 0 + while True: + line = f.readline() + t = line.strip().split() + k[ncells] = int(t[0]) + i[ncells] = int(t[1]) + j[ncells] = int(t[2]) + fac[ncells] = float(t[3]) + + ncells += 1 + if ncells == abs(nqclfb[nobs]): + layer.append(k) + row.append(i) + column.append(j) + factor.append(fac) + break + + nobs += 1 + if nobs == nqfb: + break + + irefsp = np.array(irefsp) - 1 + layer = np.array(layer) - 1 + row = np.array(row) - 1 + column = np.array(column) - 1 + factor = np.array(factor) + + if openfile: + f.close() + + # get ext_unit_dict if none passed + if ext_unit_dict is None: + namefile = os.path.join(model.model_ws, model.namefile) + ext_unit_dict = parsenamefile(namefile, model.mfnam_packages) + + flowtype, ftype = _get_ftype_from_filename(f.name, ext_unit_dict) + + # set package unit number + unitnumber = None + filenames = [None, None] + if ext_unit_dict is not None: + unitnumber, filenames[0] = \ + model.get_ext_dict_attr(ext_unit_dict, + filetype=ftype.upper()) + if iufbobsv > 0: + iu, filenames[1] = \ + model.get_ext_dict_attr(ext_unit_dict, unit=iufbobsv) + model.add_pop_key_list(iufbobsv) + + # create ModflowFlwob object instance + flwob = ModflowFlwob(model, iufbobsv=iufbobsv, tomultfb=tomultfb, + nqfb=nqfb, nqcfb=nqcfb, + nqtfb=nqtfb, nqobfb=nqobfb, nqclfb=nqclfb, + obsnam=obsnam, irefsp=irefsp, toffset=toffset, + flwobs=flwobs, layer=layer, row=row, + column=column, factor=factor, options=options, + flowtype=flowtype, unitnumber=unitnumber, + filenames=filenames) + + return flwob + + +def _get_ftype_from_filename(fn, ext_unit_dict=None): + """ + Returns the boundary flowtype and filetype for a given ModflowFlwob + package filename. + + Parameters + ---------- + fn : str + The filename to be parsed. + ext_unit_dict : dictionary, optional + If the arrays in the file are specified using EXTERNAL, + or older style array control records, then `f` should be a file + handle. In this case ext_unit_dict is required, which can be + constructed using the function + :class:`flopy.utils.mfreadnam.parsenamefile`. + + Returns + ------- + flowtype : str + Corresponds to the type of the head-dependent boundary package for + which observations are desired (e.g. "CHD", "GHB", "DRN", or "RIV"). + ftype : str + Corresponds to the observation file type (e.g. "CHOB", "GBOB", + "DROB", or "RVOB"). + """ + + ftype = None + + # determine filetype from filename using ext_unit_dict + if ext_unit_dict is not None: + for key, value in ext_unit_dict.items(): + if value.filename == fn: + ftype = value.filetype + break + + # else, try to infer filetype from filename extension + else: + ext = fn.split('.')[-1].lower() + if 'ch' in ext.lower(): + ftype = 'CHOB' + elif 'gb' in ext.lower(): + ftype = 'GBOB' + elif 'dr' in ext.lower(): + ftype = 'DROB' + elif 'rv' in ext.lower(): + ftype = 'RVOB' + + msg = 'ModflowFlwob: filetype cannot be inferred ' \ + 'from file name {}'.format(fn) + if ftype is None: + raise AssertionError(msg) + + flowtype_dict = {'CHOB': 'CHD', + 'GOBO': 'GHB', + 'DROB': 'DRN', + 'RVOB': 'RIV'} + flowtype = flowtype_dict[ftype] + + return flowtype, ftype diff --git a/flopy/modflow/mfgage.py b/flopy/modflow/mfgage.py index 314e498604..4fb211cf92 100644 --- a/flopy/modflow/mfgage.py +++ b/flopy/modflow/mfgage.py @@ -1,377 +1,377 @@ -""" -mfgage module. Contains the ModflowGage class. Note that the user can access -the ModflowGage class as `flopy.modflow.ModflowGage`. - -Additional information for this MODFLOW package can be found at the `Online -MODFLOW Guide -`_. - -""" -import os -import sys - -import numpy as np - -from ..pakbase import Package -from ..utils import read_fixed_var, write_fixed_var -from ..utils.recarray_utils import create_empty_recarray - - -class ModflowGage(Package): - """ - MODFLOW Gage Package Class. - - Parameters - ---------- - model : model object - The model object (of type :class:`flopy.modflow.mf.Modflow`) to which - this package will be added. - numgage : int - The total number of gages included in the gage file (default is 0). - gage_data : list or numpy array - data for dataset 2a and 2b in the gage package. If a list is provided - then the list includes 2 to 3 entries (LAKE UNIT [OUTTYPE]) for each - LAK Package entry and 4 entries (GAGESEG GAGERCH UNIT OUTTYPE) for - each SFR Package entry. If a numpy array it passed each gage location - must have 4 entries, where LAK Package gages can have any value for the - second column. The numpy array can be created using the get_empty() - method available in ModflowGage. Default is None - files : list of strings - Names of gage output files. A file name must be provided for each gage. - If files are not provided and filenames=None then a gage name will be - created using the model name and the gage number (for example, - modflowtest.gage1.go). Default is None. - extension : string - Filename extension (default is 'gage') - unitnumber : int - File unit number (default is None). - filenames : str or list of str - Filenames to use for the package and the output files. If - filenames=None the package name will be created using the model name - and package extension and gage output names will be created using the - model name and the gage number (for example, modflowtest.gage1.go). - If a single string is passed the package will be set to the string - and gage output names will be created using the model name and the - gage number. To define the names for all gage files (input and output) - the length of the list of strings should be numgage + 1. - Default is None. - - Methods - ------- - - See Also - -------- - - Notes - ----- - Parameters are not supported in FloPy. - - Examples - -------- - - >>> import flopy - >>> m = flopy.modflow.Modflow() - >>> gages = [[-1, -26, 1], [-2, -27, 1]] - >>> files = ['gage1.go', 'gage2.go'] - >>> gage = flopy.modflow.ModflowGage(m, numgage=2, - >>> gage_data=gages, files=files) - - """ - - def __init__(self, model, numgage=0, gage_data=None, files=None, - extension='gage', unitnumber=None, - filenames=None, **kwargs): - """ - Package constructor. - - """ - # set default unit number of one is not specified - if unitnumber is None: - unitnumber = ModflowGage.defaultunit() - - # set filenames - if filenames is None: - filenames = [None for x in range(numgage + 1)] - elif isinstance(filenames, str): - filenames = [filenames] + [None for x in range(numgage)] - elif isinstance(filenames, list): - if len(filenames) < numgage + 1: - for idx in range(len(filenames), numgage + 2): - filenames.append(None) - - # process gage output files - dtype = ModflowGage.get_default_dtype() - if numgage > 0: - # check the provided file entries - if filenames[1] is None: - if files is None: - files = [] - for idx in range(numgage): - files.append( - '{}.gage{}.go'.format(model.name, idx + 1)) - if isinstance(files, np.ndarray): - files = files.flatten().tolist() - elif isinstance(files, str): - files = [files] - elif isinstance(files, int) or isinstance(files, float): - files = ['{}.go'.format(files)] - if len(files) < numgage: - err = 'a filename needs to be provided ' + \ - 'for {} gages '.format(numgage) + \ - '- {} filenames were provided'.format(len(files)) - raise Exception(err) - else: - if len(filenames) < numgage + 1: - err = "filenames must have a " + \ - "length of {} ".format(numgage + 1) + \ - "the length provided is {}".format(len(filenames)) - raise Exception(err) - else: - files = [] - for n in range(numgage): - files.append(filenames[n + 1]) - - # convert gage_data to a recarray, if necessary - if isinstance(gage_data, np.ndarray): - if not gage_data.dtype == dtype: - gage_data = np.core.records.fromarrays( - gage_data.transpose(), - dtype=dtype) - elif isinstance(gage_data, list): - d = ModflowGage.get_empty(ncells=numgage) - for n in range(len(gage_data)): - t = gage_data[n] - gageloc = int(t[0]) - if gageloc < 0: - gagerch = 0 - iu = int(t[1]) - outtype = 0 - if iu < 0: - outtype = int(t[2]) - else: - gagerch = int(t[1]) - iu = int(t[2]) - outtype = int(t[3]) - - d['gageloc'][n] = gageloc - d['gagerch'][n] = gagerch - d['unit'][n] = iu - d['outtype'][n] = outtype - gage_data = d - else: - err = 'gage_data must be a numpy record array, numpy array' + \ - 'or a list' - raise Exception(err) - - # add gage output files to model - for n in range(numgage): - iu = abs(gage_data['unit'][n]) - fname = files[n] - model.add_output_file(iu, fname=fname, binflag=False, - package=ModflowGage.ftype()) - - # Fill namefile items - name = [ModflowGage.ftype()] - units = [unitnumber] - extra = [''] - - # set package name - fname = [filenames[0]] - - # Call ancestor's init to set self.parent, extension, name and unit number - Package.__init__(self, model, extension=extension, name=name, - unit_number=units, extra=extra, filenames=fname) - - self.heading = '# {} package for '.format(self.name[0]) + \ - ' {}, '.format(model.version_types[model.version]) + \ - 'generated by Flopy.' - self.url = 'gage.htm' - - self.numgage = numgage - self.files = files - - self.dtype = self.get_default_dtype() - - self.gage_data = gage_data - - self.parent.add_package(self) - - return - - @staticmethod - def get_default_dtype(): - dtype = np.dtype([("gageloc", np.int), ("gagerch", np.int), - ("unit", np.int), ("outtype", np.int)]) - return dtype - - @staticmethod - def get_empty(ncells=0, aux_names=None, structured=True): - # get an empty recarray that corresponds to dtype - dtype = ModflowGage.get_default_dtype() - return create_empty_recarray(ncells, dtype, default_value=-1.0E+10) - - def ncells(self): - # Return 0 for the gage package - # (developed for MT3DMS SSM package) - return 0 - - def write_file(self): - """ - Write the package file. - - Returns - ------- - None - - """ - f = open(self.fn_path, 'w') - - # # dataset 0 - # vn = self.parent.version_types[self.parent.version] - # self.heading = '# {} package for '.format(self.name[0]) + \ - # '{}, generated by Flopy.'.format(vn) - # f.write('{0}\n'.format(self.heading)) - - # dataset 1 - f.write(write_fixed_var([self.numgage], free=True)) - - # dataset 2 - for n in range(self.numgage): - gageloc = self.gage_data['gageloc'][n] - gagerch = self.gage_data['gagerch'][n] - iu = self.gage_data['unit'][n] - outtype = self.gage_data['outtype'][n] - t = [gageloc] - if gageloc < 0: - t.append(iu) - if iu < 0: - t.append(outtype) - else: - t.append(gagerch) - t.append(iu) - t.append(outtype) - f.write(write_fixed_var(t, free=True)) - - # close the gage file - f.close() - - @staticmethod - def load(f, model, nper=None, ext_unit_dict=None): - """ - Load an existing package. - - Parameters - ---------- - f : filename or file handle - File to load. - model : model object - The model object (of type :class:`flopy.modflow.mf.Modflow`) to - which this package will be added. - nper : int - The number of stress periods. If nper is None, then nper will be - obtained from the model object. (default is None). - ext_unit_dict : dictionary, optional - If the arrays in the file are specified using EXTERNAL, - or older style array control records, then `f` should be a file - handle. In this case ext_unit_dict is required, which can be - constructed using the function - :class:`flopy.utils.mfreadnam.parsenamefile`. - - Returns - ------- - str : ModflowStr object - ModflowStr object. - - Examples - -------- - - >>> import flopy - >>> m = flopy.modflow.Modflow() - >>> gage = flopy.modflow.ModflowGage.load('test.gage', m) - - """ - - if model.verbose: - sys.stdout.write('loading gage package file...\n') - - openfile = not hasattr(f, 'read') - if openfile: - filename = f - f = open(filename, 'r', errors='replace') - - # dataset 0 -- header - while True: - line = f.readline().rstrip() - if line[0] != '#': - break - - # read dataset 1 - if model.verbose: - print(" reading gage dataset 1") - t = read_fixed_var(line, free=True) - numgage = int(t[0]) - - if numgage == 0: - gage_data = None - files = None - else: - # read dataset 2 - if model.verbose: - print(" reading gage dataset 2") - - gage_data = ModflowGage.get_empty(ncells=numgage) - files = [] - - for n in range(numgage): - line = f.readline().rstrip() - t = read_fixed_var(line, free=True) - gageloc = int(t[0]) - if gageloc < 0: - gagerch = 0 - iu = int(t[1]) - outtype = 0 - if iu < 0: - outtype = int(t[2]) - else: - gagerch = int(t[1]) - iu = int(t[2]) - outtype = int(t[3]) - gage_data['gageloc'][n] = gageloc - gage_data['gagerch'][n] = gagerch - gage_data['unit'][n] = iu - gage_data['outtype'][n] = outtype - - for key, value in ext_unit_dict.items(): - if key == abs(iu): - model.add_pop_key_list(abs(iu)) - relpth = os.path.relpath(value.filename, - model.model_ws) - files.append(relpth) - break - - if openfile: - f.close() - - # determine specified unit number - unitnumber = None - filenames = [] - if ext_unit_dict is not None: - for key, value in ext_unit_dict.items(): - if value.filetype == ModflowGage.ftype(): - unitnumber = key - filenames.append(os.path.basename(value.filename)) - for file in files: - filenames.append(os.path.basename(file)) - - gagepak = ModflowGage(model, numgage=numgage, - gage_data=gage_data, filenames=filenames, - unitnumber=unitnumber) - return gagepak - - @staticmethod - def ftype(): - return 'GAGE' - - @staticmethod - def defaultunit(): - return 120 +""" +mfgage module. Contains the ModflowGage class. Note that the user can access +the ModflowGage class as `flopy.modflow.ModflowGage`. + +Additional information for this MODFLOW package can be found at the `Online +MODFLOW Guide +`_. + +""" +import os +import sys + +import numpy as np + +from ..pakbase import Package +from ..utils import read_fixed_var, write_fixed_var +from ..utils.recarray_utils import create_empty_recarray + + +class ModflowGage(Package): + """ + MODFLOW Gage Package Class. + + Parameters + ---------- + model : model object + The model object (of type :class:`flopy.modflow.mf.Modflow`) to which + this package will be added. + numgage : int + The total number of gages included in the gage file (default is 0). + gage_data : list or numpy array + data for dataset 2a and 2b in the gage package. If a list is provided + then the list includes 2 to 3 entries (LAKE UNIT [OUTTYPE]) for each + LAK Package entry and 4 entries (GAGESEG GAGERCH UNIT OUTTYPE) for + each SFR Package entry. If a numpy array it passed each gage location + must have 4 entries, where LAK Package gages can have any value for the + second column. The numpy array can be created using the get_empty() + method available in ModflowGage. Default is None + files : list of strings + Names of gage output files. A file name must be provided for each gage. + If files are not provided and filenames=None then a gage name will be + created using the model name and the gage number (for example, + modflowtest.gage1.go). Default is None. + extension : string + Filename extension (default is 'gage') + unitnumber : int + File unit number (default is None). + filenames : str or list of str + Filenames to use for the package and the output files. If + filenames=None the package name will be created using the model name + and package extension and gage output names will be created using the + model name and the gage number (for example, modflowtest.gage1.go). + If a single string is passed the package will be set to the string + and gage output names will be created using the model name and the + gage number. To define the names for all gage files (input and output) + the length of the list of strings should be numgage + 1. + Default is None. + + Methods + ------- + + See Also + -------- + + Notes + ----- + Parameters are not supported in FloPy. + + Examples + -------- + + >>> import flopy + >>> m = flopy.modflow.Modflow() + >>> gages = [[-1, -26, 1], [-2, -27, 1]] + >>> files = ['gage1.go', 'gage2.go'] + >>> gage = flopy.modflow.ModflowGage(m, numgage=2, + >>> gage_data=gages, files=files) + + """ + + def __init__(self, model, numgage=0, gage_data=None, files=None, + extension='gage', unitnumber=None, + filenames=None, **kwargs): + """ + Package constructor. + + """ + # set default unit number of one is not specified + if unitnumber is None: + unitnumber = ModflowGage.defaultunit() + + # set filenames + if filenames is None: + filenames = [None for x in range(numgage + 1)] + elif isinstance(filenames, str): + filenames = [filenames] + [None for x in range(numgage)] + elif isinstance(filenames, list): + if len(filenames) < numgage + 1: + for idx in range(len(filenames), numgage + 2): + filenames.append(None) + + # process gage output files + dtype = ModflowGage.get_default_dtype() + if numgage > 0: + # check the provided file entries + if filenames[1] is None: + if files is None: + files = [] + for idx in range(numgage): + files.append( + '{}.gage{}.go'.format(model.name, idx + 1)) + if isinstance(files, np.ndarray): + files = files.flatten().tolist() + elif isinstance(files, str): + files = [files] + elif isinstance(files, int) or isinstance(files, float): + files = ['{}.go'.format(files)] + if len(files) < numgage: + err = 'a filename needs to be provided ' + \ + 'for {} gages '.format(numgage) + \ + '- {} filenames were provided'.format(len(files)) + raise Exception(err) + else: + if len(filenames) < numgage + 1: + err = "filenames must have a " + \ + "length of {} ".format(numgage + 1) + \ + "the length provided is {}".format(len(filenames)) + raise Exception(err) + else: + files = [] + for n in range(numgage): + files.append(filenames[n + 1]) + + # convert gage_data to a recarray, if necessary + if isinstance(gage_data, np.ndarray): + if not gage_data.dtype == dtype: + gage_data = np.core.records.fromarrays( + gage_data.transpose(), + dtype=dtype) + elif isinstance(gage_data, list): + d = ModflowGage.get_empty(ncells=numgage) + for n in range(len(gage_data)): + t = gage_data[n] + gageloc = int(t[0]) + if gageloc < 0: + gagerch = 0 + iu = int(t[1]) + outtype = 0 + if iu < 0: + outtype = int(t[2]) + else: + gagerch = int(t[1]) + iu = int(t[2]) + outtype = int(t[3]) + + d['gageloc'][n] = gageloc + d['gagerch'][n] = gagerch + d['unit'][n] = iu + d['outtype'][n] = outtype + gage_data = d + else: + err = 'gage_data must be a numpy record array, numpy array' + \ + 'or a list' + raise Exception(err) + + # add gage output files to model + for n in range(numgage): + iu = abs(gage_data['unit'][n]) + fname = files[n] + model.add_output_file(iu, fname=fname, binflag=False, + package=ModflowGage.ftype()) + + # Fill namefile items + name = [ModflowGage.ftype()] + units = [unitnumber] + extra = [''] + + # set package name + fname = [filenames[0]] + + # Call ancestor's init to set self.parent, extension, name and unit number + Package.__init__(self, model, extension=extension, name=name, + unit_number=units, extra=extra, filenames=fname) + + self.heading = '# {} package for '.format(self.name[0]) + \ + ' {}, '.format(model.version_types[model.version]) + \ + 'generated by Flopy.' + self.url = 'gage.htm' + + self.numgage = numgage + self.files = files + + self.dtype = self.get_default_dtype() + + self.gage_data = gage_data + + self.parent.add_package(self) + + return + + @staticmethod + def get_default_dtype(): + dtype = np.dtype([("gageloc", np.int), ("gagerch", np.int), + ("unit", np.int), ("outtype", np.int)]) + return dtype + + @staticmethod + def get_empty(ncells=0, aux_names=None, structured=True): + # get an empty recarray that corresponds to dtype + dtype = ModflowGage.get_default_dtype() + return create_empty_recarray(ncells, dtype, default_value=-1.0E+10) + + def ncells(self): + # Return 0 for the gage package + # (developed for MT3DMS SSM package) + return 0 + + def write_file(self): + """ + Write the package file. + + Returns + ------- + None + + """ + f = open(self.fn_path, 'w') + + # # dataset 0 + # vn = self.parent.version_types[self.parent.version] + # self.heading = '# {} package for '.format(self.name[0]) + \ + # '{}, generated by Flopy.'.format(vn) + # f.write('{0}\n'.format(self.heading)) + + # dataset 1 + f.write(write_fixed_var([self.numgage], free=True)) + + # dataset 2 + for n in range(self.numgage): + gageloc = self.gage_data['gageloc'][n] + gagerch = self.gage_data['gagerch'][n] + iu = self.gage_data['unit'][n] + outtype = self.gage_data['outtype'][n] + t = [gageloc] + if gageloc < 0: + t.append(iu) + if iu < 0: + t.append(outtype) + else: + t.append(gagerch) + t.append(iu) + t.append(outtype) + f.write(write_fixed_var(t, free=True)) + + # close the gage file + f.close() + + @staticmethod + def load(f, model, nper=None, ext_unit_dict=None): + """ + Load an existing package. + + Parameters + ---------- + f : filename or file handle + File to load. + model : model object + The model object (of type :class:`flopy.modflow.mf.Modflow`) to + which this package will be added. + nper : int + The number of stress periods. If nper is None, then nper will be + obtained from the model object. (default is None). + ext_unit_dict : dictionary, optional + If the arrays in the file are specified using EXTERNAL, + or older style array control records, then `f` should be a file + handle. In this case ext_unit_dict is required, which can be + constructed using the function + :class:`flopy.utils.mfreadnam.parsenamefile`. + + Returns + ------- + str : ModflowStr object + ModflowStr object. + + Examples + -------- + + >>> import flopy + >>> m = flopy.modflow.Modflow() + >>> gage = flopy.modflow.ModflowGage.load('test.gage', m) + + """ + + if model.verbose: + sys.stdout.write('loading gage package file...\n') + + openfile = not hasattr(f, 'read') + if openfile: + filename = f + f = open(filename, 'r', errors='replace') + + # dataset 0 -- header + while True: + line = f.readline().rstrip() + if line[0] != '#': + break + + # read dataset 1 + if model.verbose: + print(" reading gage dataset 1") + t = read_fixed_var(line, free=True) + numgage = int(t[0]) + + if numgage == 0: + gage_data = None + files = None + else: + # read dataset 2 + if model.verbose: + print(" reading gage dataset 2") + + gage_data = ModflowGage.get_empty(ncells=numgage) + files = [] + + for n in range(numgage): + line = f.readline().rstrip() + t = read_fixed_var(line, free=True) + gageloc = int(t[0]) + if gageloc < 0: + gagerch = 0 + iu = int(t[1]) + outtype = 0 + if iu < 0: + outtype = int(t[2]) + else: + gagerch = int(t[1]) + iu = int(t[2]) + outtype = int(t[3]) + gage_data['gageloc'][n] = gageloc + gage_data['gagerch'][n] = gagerch + gage_data['unit'][n] = iu + gage_data['outtype'][n] = outtype + + for key, value in ext_unit_dict.items(): + if key == abs(iu): + model.add_pop_key_list(abs(iu)) + relpth = os.path.relpath(value.filename, + model.model_ws) + files.append(relpth) + break + + if openfile: + f.close() + + # determine specified unit number + unitnumber = None + filenames = [] + if ext_unit_dict is not None: + for key, value in ext_unit_dict.items(): + if value.filetype == ModflowGage.ftype(): + unitnumber = key + filenames.append(os.path.basename(value.filename)) + for file in files: + filenames.append(os.path.basename(file)) + + gagepak = ModflowGage(model, numgage=numgage, + gage_data=gage_data, filenames=filenames, + unitnumber=unitnumber) + return gagepak + + @staticmethod + def ftype(): + return 'GAGE' + + @staticmethod + def defaultunit(): + return 120 diff --git a/flopy/modflow/mfghb.py b/flopy/modflow/mfghb.py index 6bb55bbe19..8d76a307b8 100644 --- a/flopy/modflow/mfghb.py +++ b/flopy/modflow/mfghb.py @@ -1,283 +1,283 @@ -""" -mfghb module. Contains the ModflowGhb class. Note that the user can access -the ModflowGhb class as `flopy.modflow.ModflowGhb`. - -Additional information for this MODFLOW package can be found at the `Online -MODFLOW Guide -`_. - -""" -import sys -import numpy as np -from ..pakbase import Package -from ..utils import MfList -from ..utils.recarray_utils import create_empty_recarray - - -class ModflowGhb(Package): - """ - MODFLOW General-Head Boundary Package Class. - - Parameters - ---------- - model : model object - The model object (of type :class:`flopy.modflow.mf.Modflow`) to which - this package will be added. - ipakcb : int - A flag that is used to determine if cell-by-cell budget data should be - saved. If ipakcb is non-zero cell-by-cell budget data will be saved. - (default is 0). - stress_period_data : list of boundaries, recarray of boundaries or, - dictionary of boundaries. - - Each ghb cell is defined through definition of - layer(int), row(int), column(int), stage(float), conductance(float) - The simplest form is a dictionary with a lists of boundaries for each - stress period, where each list of boundaries itself is a list of - boundaries. Indices of the dictionary are the numbers of the stress - period. This gives the form of:: - - stress_period_data = - {0: [ - [lay, row, col, stage, cond], - [lay, row, col, stage, cond], - [lay, row, col, stage, cond], - ], - 1: [ - [lay, row, col, stage, cond], - [lay, row, col, stage, cond], - [lay, row, col, stage, cond], - ], ... - kper: - [ - [lay, row, col, stage, cond], - [lay, row, col, stage, cond], - [lay, row, col, stage, cond], - ] - } - - Note that if no values are specified for a certain stress period, then - the list of boundaries for the previous stress period for which values - were defined is used. Full details of all options to specify - stress_period_data can be found in the flopy3boundaries Notebook in - the basic subdirectory of the examples directory - dtype : dtype definition - if data type is different from default - options : list of strings - Package options. (default is None). - extension : string - Filename extension (default is 'ghb') - unitnumber : int - File unit number (default is None). - filenames : str or list of str - Filenames to use for the package and the output files. If - filenames=None the package name will be created using the model name - and package extension and the cbc output name will be created using - the model name and .cbc extension (for example, modflowtest.cbc), - if ipakcbc is a number greater than zero. If a single string is passed - the package will be set to the string and cbc output names will be - created using the model name and .cbc extension, if ipakcbc is a - number greater than zero. To define the names for all package files - (input and output) the length of the list of strings should be 2. - Default is None. - - Attributes - ---------- - - Methods - ------- - - See Also - -------- - - Notes - ----- - Parameters are not supported in FloPy. - - Examples - -------- - - >>> import flopy - >>> ml = flopy.modflow.Modflow() - >>> lrcsc = {0:[2, 3, 4, 10., 100.]} #this ghb will be applied to all - >>> #stress periods - >>> ghb = flopy.modflow.ModflowGhb(ml, stress_period_data=lrcsc) - - """ - - def __init__(self, model, ipakcb=None, stress_period_data=None, dtype=None, - no_print=False, options=None, extension='ghb', - unitnumber=None, filenames=None): - """ - Package constructor. - - """ - - # set default unit number of one is not specified - if unitnumber is None: - unitnumber = ModflowGhb.defaultunit() - - # set filenames - if filenames is None: - filenames = [None, None] - elif isinstance(filenames, str): - filenames = [filenames, None] - elif isinstance(filenames, list): - if len(filenames) < 2: - filenames.append(None) - - # update external file information with cbc output, if necessary - if ipakcb is not None: - fname = filenames[1] - model.add_output_file(ipakcb, fname=fname, - package=ModflowGhb.ftype()) - else: - ipakcb = 0 - - # Fill namefile items - name = [ModflowGhb.ftype()] - units = [unitnumber] - extra = [''] - - # set package name - fname = [filenames[0]] - - # Call ancestor's init to set self.parent, extension, name and unit number - Package.__init__(self, model, extension=extension, name=name, - unit_number=units, extra=extra, filenames=fname) - - self.heading = '# {} package for '.format(self.name[0]) + \ - ' {}, '.format(model.version_types[model.version]) + \ - 'generated by Flopy.' - self.url = 'ghb.htm' - - self.ipakcb = ipakcb - self.no_print = no_print - self.np = 0 - if options is None: - options = [] - if self.no_print: - options.append('NOPRINT') - self.options = options - self.parent.add_package(self) - if dtype is not None: - self.dtype = dtype - else: - self.dtype = self.get_default_dtype( - structured=self.parent.structured) - self.stress_period_data = MfList(self, stress_period_data) - - def ncells(self): - """ - Returns the maximum number of cells that have a ghb cell - (developed for MT3DMS SSM package) - """ - return self.stress_period_data.mxact - - def write_file(self, check=True): - """ - Write the package file. - - Parameters - ---------- - check : boolean - Check package data for common errors. (default True) - - Returns - ------- - None - - """ - if check: # allows turning off package checks when writing files at model level - self.check(f='{}.chk'.format(self.name[0]), - verbose=self.parent.verbose, level=1) - f_ghb = open(self.fn_path, 'w') - f_ghb.write('{}\n'.format(self.heading)) - f_ghb.write( - '{:10d}{:10d}'.format(self.stress_period_data.mxact, self.ipakcb)) - for option in self.options: - f_ghb.write(' {}'.format(option)) - f_ghb.write('\n') - self.stress_period_data.write_transient(f_ghb) - f_ghb.close() - - def add_record(self, kper, index, values): - try: - self.stress_period_data.add_record(kper, index, values) - except Exception as e: - raise Exception("mfghb error adding record to list: " + str(e)) - - @staticmethod - def get_empty(ncells=0, aux_names=None, structured=True): - # get an empty recarray that corresponds to dtype - dtype = ModflowGhb.get_default_dtype(structured=structured) - if aux_names is not None: - dtype = Package.add_to_dtype(dtype, aux_names, np.float32) - return create_empty_recarray(ncells, dtype, default_value=-1.0E+10) - - @staticmethod - def get_default_dtype(structured=True): - if structured: - dtype = np.dtype([("k", np.int), ("i", np.int), - ("j", np.int), ("bhead", np.float32), - ("cond", np.float32)]) - else: - dtype = np.dtype([("node", np.int), ("bhead", np.float32), - ("cond", np.float32)]) - return dtype - - @staticmethod - def get_sfac_columns(): - return ['cond'] - - @staticmethod - def load(f, model, nper=None, ext_unit_dict=None, check=True): - """ - Load an existing package. - - Parameters - ---------- - f : filename or file handle - File to load. - model : model object - The model object (of type :class:`flopy.modflow.mf.Modflow`) to - which this package will be added. - nper : int - The number of stress periods. If nper is None, then nper will be - obtained from the model object. (default is None). - ext_unit_dict : dictionary, optional - If the arrays in the file are specified using EXTERNAL, - or older style array control records, then `f` should be a file - handle. In this case ext_unit_dict is required, which can be - constructed using the function - :class:`flopy.utils.mfreadnam.parsenamefile`. - check : boolean - Check package data for common errors. (default True) - - Returns - ------- - ghb : ModflowGhb object - ModflowGhb object. - - Examples - -------- - - >>> import flopy - >>> m = flopy.modflow.Modflow() - >>> ghb = flopy.modflow.ModflowGhb.load('test.ghb', m) - - """ - - if model.verbose: - sys.stdout.write('loading ghb package file...\n') - - return Package.load(f, model, ModflowGhb, nper=nper, check=check, - ext_unit_dict=ext_unit_dict) - - @staticmethod - def ftype(): - return 'GHB' - - @staticmethod - def defaultunit(): - return 23 +""" +mfghb module. Contains the ModflowGhb class. Note that the user can access +the ModflowGhb class as `flopy.modflow.ModflowGhb`. + +Additional information for this MODFLOW package can be found at the `Online +MODFLOW Guide +`_. + +""" +import sys +import numpy as np +from ..pakbase import Package +from ..utils import MfList +from ..utils.recarray_utils import create_empty_recarray + + +class ModflowGhb(Package): + """ + MODFLOW General-Head Boundary Package Class. + + Parameters + ---------- + model : model object + The model object (of type :class:`flopy.modflow.mf.Modflow`) to which + this package will be added. + ipakcb : int + A flag that is used to determine if cell-by-cell budget data should be + saved. If ipakcb is non-zero cell-by-cell budget data will be saved. + (default is 0). + stress_period_data : list of boundaries, recarray of boundaries or, + dictionary of boundaries. + + Each ghb cell is defined through definition of + layer(int), row(int), column(int), stage(float), conductance(float) + The simplest form is a dictionary with a lists of boundaries for each + stress period, where each list of boundaries itself is a list of + boundaries. Indices of the dictionary are the numbers of the stress + period. This gives the form of:: + + stress_period_data = + {0: [ + [lay, row, col, stage, cond], + [lay, row, col, stage, cond], + [lay, row, col, stage, cond], + ], + 1: [ + [lay, row, col, stage, cond], + [lay, row, col, stage, cond], + [lay, row, col, stage, cond], + ], ... + kper: + [ + [lay, row, col, stage, cond], + [lay, row, col, stage, cond], + [lay, row, col, stage, cond], + ] + } + + Note that if no values are specified for a certain stress period, then + the list of boundaries for the previous stress period for which values + were defined is used. Full details of all options to specify + stress_period_data can be found in the flopy3boundaries Notebook in + the basic subdirectory of the examples directory + dtype : dtype definition + if data type is different from default + options : list of strings + Package options. (default is None). + extension : string + Filename extension (default is 'ghb') + unitnumber : int + File unit number (default is None). + filenames : str or list of str + Filenames to use for the package and the output files. If + filenames=None the package name will be created using the model name + and package extension and the cbc output name will be created using + the model name and .cbc extension (for example, modflowtest.cbc), + if ipakcbc is a number greater than zero. If a single string is passed + the package will be set to the string and cbc output names will be + created using the model name and .cbc extension, if ipakcbc is a + number greater than zero. To define the names for all package files + (input and output) the length of the list of strings should be 2. + Default is None. + + Attributes + ---------- + + Methods + ------- + + See Also + -------- + + Notes + ----- + Parameters are not supported in FloPy. + + Examples + -------- + + >>> import flopy + >>> ml = flopy.modflow.Modflow() + >>> lrcsc = {0:[2, 3, 4, 10., 100.]} #this ghb will be applied to all + >>> #stress periods + >>> ghb = flopy.modflow.ModflowGhb(ml, stress_period_data=lrcsc) + + """ + + def __init__(self, model, ipakcb=None, stress_period_data=None, dtype=None, + no_print=False, options=None, extension='ghb', + unitnumber=None, filenames=None): + """ + Package constructor. + + """ + + # set default unit number of one is not specified + if unitnumber is None: + unitnumber = ModflowGhb.defaultunit() + + # set filenames + if filenames is None: + filenames = [None, None] + elif isinstance(filenames, str): + filenames = [filenames, None] + elif isinstance(filenames, list): + if len(filenames) < 2: + filenames.append(None) + + # update external file information with cbc output, if necessary + if ipakcb is not None: + fname = filenames[1] + model.add_output_file(ipakcb, fname=fname, + package=ModflowGhb.ftype()) + else: + ipakcb = 0 + + # Fill namefile items + name = [ModflowGhb.ftype()] + units = [unitnumber] + extra = [''] + + # set package name + fname = [filenames[0]] + + # Call ancestor's init to set self.parent, extension, name and unit number + Package.__init__(self, model, extension=extension, name=name, + unit_number=units, extra=extra, filenames=fname) + + self.heading = '# {} package for '.format(self.name[0]) + \ + ' {}, '.format(model.version_types[model.version]) + \ + 'generated by Flopy.' + self.url = 'ghb.htm' + + self.ipakcb = ipakcb + self.no_print = no_print + self.np = 0 + if options is None: + options = [] + if self.no_print: + options.append('NOPRINT') + self.options = options + self.parent.add_package(self) + if dtype is not None: + self.dtype = dtype + else: + self.dtype = self.get_default_dtype( + structured=self.parent.structured) + self.stress_period_data = MfList(self, stress_period_data) + + def ncells(self): + """ + Returns the maximum number of cells that have a ghb cell + (developed for MT3DMS SSM package) + """ + return self.stress_period_data.mxact + + def write_file(self, check=True): + """ + Write the package file. + + Parameters + ---------- + check : boolean + Check package data for common errors. (default True) + + Returns + ------- + None + + """ + if check: # allows turning off package checks when writing files at model level + self.check(f='{}.chk'.format(self.name[0]), + verbose=self.parent.verbose, level=1) + f_ghb = open(self.fn_path, 'w') + f_ghb.write('{}\n'.format(self.heading)) + f_ghb.write( + '{:10d}{:10d}'.format(self.stress_period_data.mxact, self.ipakcb)) + for option in self.options: + f_ghb.write(' {}'.format(option)) + f_ghb.write('\n') + self.stress_period_data.write_transient(f_ghb) + f_ghb.close() + + def add_record(self, kper, index, values): + try: + self.stress_period_data.add_record(kper, index, values) + except Exception as e: + raise Exception("mfghb error adding record to list: " + str(e)) + + @staticmethod + def get_empty(ncells=0, aux_names=None, structured=True): + # get an empty recarray that corresponds to dtype + dtype = ModflowGhb.get_default_dtype(structured=structured) + if aux_names is not None: + dtype = Package.add_to_dtype(dtype, aux_names, np.float32) + return create_empty_recarray(ncells, dtype, default_value=-1.0E+10) + + @staticmethod + def get_default_dtype(structured=True): + if structured: + dtype = np.dtype([("k", np.int), ("i", np.int), + ("j", np.int), ("bhead", np.float32), + ("cond", np.float32)]) + else: + dtype = np.dtype([("node", np.int), ("bhead", np.float32), + ("cond", np.float32)]) + return dtype + + @staticmethod + def get_sfac_columns(): + return ['cond'] + + @staticmethod + def load(f, model, nper=None, ext_unit_dict=None, check=True): + """ + Load an existing package. + + Parameters + ---------- + f : filename or file handle + File to load. + model : model object + The model object (of type :class:`flopy.modflow.mf.Modflow`) to + which this package will be added. + nper : int + The number of stress periods. If nper is None, then nper will be + obtained from the model object. (default is None). + ext_unit_dict : dictionary, optional + If the arrays in the file are specified using EXTERNAL, + or older style array control records, then `f` should be a file + handle. In this case ext_unit_dict is required, which can be + constructed using the function + :class:`flopy.utils.mfreadnam.parsenamefile`. + check : boolean + Check package data for common errors. (default True) + + Returns + ------- + ghb : ModflowGhb object + ModflowGhb object. + + Examples + -------- + + >>> import flopy + >>> m = flopy.modflow.Modflow() + >>> ghb = flopy.modflow.ModflowGhb.load('test.ghb', m) + + """ + + if model.verbose: + sys.stdout.write('loading ghb package file...\n') + + return Package.load(f, model, ModflowGhb, nper=nper, check=check, + ext_unit_dict=ext_unit_dict) + + @staticmethod + def ftype(): + return 'GHB' + + @staticmethod + def defaultunit(): + return 23 diff --git a/flopy/modflow/mfgmg.py b/flopy/modflow/mfgmg.py index 1a0ce0bd9b..07e742c4ee 100644 --- a/flopy/modflow/mfgmg.py +++ b/flopy/modflow/mfgmg.py @@ -1,393 +1,393 @@ -""" -mfgmg module. Contains the ModflowGmg class. Note that the user can access -the ModflowGmg class as `flopy.modflow.ModflowGmg`. - -Additional information for this MODFLOW package can be found at the `Online -MODFLOW Guide -`_. - -""" -import sys -from ..pakbase import Package - - -class ModflowGmg(Package): - """ - MODFLOW GMG Package Class. - - Parameters - ---------- - model : model object - The model object (of type :class:`flopy.modflow.mf.Modflow`) to which - this package will be added. - mxiter : int - maximum number of outer iterations. (default is 50) - iiter : int - maximum number of inner iterations. (default is 30) - iadamp : int - is a flag that controls adaptive damping. The possible values - of iadamp are. - - If iadamp = 0, then the value assigned to DAMP is used as a constant - damping parameter. - - If iadamp = 1, the value of damp is used for the first nonlinear - iteration. The damping parameter is adaptively varied on the basis - of the head change, using Cooley's method as described in Mehl - and Hill (2001), for subsequent iterations. - - If iadamp = 2, the relative reduced residual damping method documented - in Mehl and Hill (2001) and modified by Banta (2006) is used. - - When iadamp is specified as 2 and the value specified for DAMP is less - than 0.5, the closure criterion for the inner iterations (drclose) is - assigned simply as rclose. When damp is between 0.5 and 1.0, inclusive, - or when iadamp is specified as 0 or 1, drclose is calculated according - to equation 20 on p. 9 of Wilson and Naff (2004). - hclose : float - is the head change criterion for convergence. (default is 1e-5). - rclose : float - is the residual criterion for convergence. (default is 1e-5) - relax : float - is a relaxation parameter for the ILU preconditioned conjugate - gradient method. The relax parameter can be used to improve the - spectral condition number of the ILU preconditioned system. The value - of relax should be approximately one. However, the relaxation parameter - can cause the factorization to break down. If this happens, then the - gmg solver will report an assembly error and a value smaller than one - for relax should be tried. This item is read only if isc = 4. - ioutgmg : int - is a flag that controls the output of the gmg solver. The - possible values of ioutgmg are. - - If ioutgmg = 0, then only the solver inputs are printed. - - If ioutgmg = 1, then for each linear solve, the number of pcg - iterations, the value of the damping parameter, the l2norm of - the residual, and the maxnorm of the head change and its location - (column, row, layer) are printed. At the end of a time/stress period, - the total number of gmg calls, pcg iterations, and a running total - of pcg iterations for all time/stress periods are printed. - - If ioutgmg = 2, then the convergence history of the pcg iteration is - printed, showing the l2norm of the residual and the convergence factor - for each iteration. - - ioutgmg = 3 is the same as ioutgmg = 1 except output is sent to the - terminal instead of the modflow list output file. - - ioutgmg = 4 is the same as ioutgmg = 2 except output is sent to the - terminal instead of the modflow list output file. - - (default is 0) - iunitmhc : int - is a flag and a unit number, which controls output of maximum - head change values. If iunitmhc = 0, maximum head change values - are not written to an output file. If iunitmhc > 0, maximum head - change values are written to unit iunitmhc. Unit iunitmhc should - be listed in the Name file with 'DATA' as the file type. If - iunitmhc < 0 or is not present, iunitmhc defaults to 0. - (default is 0) - ism : int - is a flag that controls the type of smoother used in the multigrid - preconditioner. If ism = 0, then ilu(0) smoothing is implemented in - the multigrid preconditioner; this smoothing requires an additional - ector on each multigrid level to store the pivots in the ilu - factorization. If ism = 1, then symmetric gaussseidel (sgs) smoothing - is implemented in the multigrid preconditioner. No additional storage - is required if ism = 1; users may want to use this option if available - memory is exceeded or nearly exceeded when using ism = 0. Using sgs - smoothing is not as robust as ilu smoothing; additional iterations are - likely to be required in reducing the residuals. In extreme cases, the - solver may fail to converge as the residuals cannot be reduced - sufficiently. (default is 0) - isc : int - is a flag that controls semicoarsening in the multigrid - preconditioner. If isc = 0, then the rows, columns and layers are - all coarsened. If isc = 1, then the rows and columns are coarsened, - but the layers are not. If isc = 2, then the columns and layers are - coarsened, but the rows are not. If isc = 3, then the rows and layers - are coarsened, but the columns are not. If isc = 4, then there is no - coarsening. Typically, the value of isc should be 0 or 1. In the case - that there are large vertical variations in the hydraulic - conductivities, then a value of 1 should be used. If no coarsening is - implemented (isc = 4), then the gmg solver is comparable to the pcg2 - ilu(0) solver described in Hill (1990) and uses the least amount of - memory. (default is 0) - damp : float - is the value of the damping parameter. For linear problems, a value - of 1.0 should be used. For nonlinear problems, a value less than 1.0 - but greater than 0.0 may be necessary to achieve convergence. A typical - value for nonlinear problems is 0.5. Damping also helps control the - convergence criterion of the linear solve to alleviate excessive pcg - iterations. (default 1.) - dup : float - is the maximum damping value that should be applied at any iteration - when the solver is not oscillating; it is dimensionless. An appropriate - value for dup will be problem-dependent. For moderately nonlinear - problems, reasonable values for dup would be in the range 0.5 to 1.0. - For a highly nonlinear problem, a reasonable value for dup could be as - small as 0.1. When the solver is oscillating, a damping value as large - as 2.0 x DUP may be applied. (default is 0.75) - dlow : float - is the minimum damping value to be generated by the adaptive-damping - procedure; it is dimensionless. An appropriate value for dlow will be - problem-dependent and will be smaller than the value specified for dup. - For a highly nonlinear problem, an appropriate value for dlow might be - as small as 0.001. Note that the value specified for the variable, - chglimit, could result in application of a damping value smaller than - dlow. (default is 0.01) - chglimit : float - is the maximum allowed head change at any cell between outer - iterations; it has units of length. The effect of chglimit is to - determine a damping value that, when applied to all elements of the - head-change vector, will produce an absolute maximum head change equal - to chglimit. (default is 1.0) - extension : list string - Filename extension (default is 'gmg') - unitnumber : int - File unit number (default is None). - filenames : str or list of str - Filenames to use for the package and the output files. If - filenames=None the package name will be created using the model name - and package extension and the gmg output name will be created using - the model name and .cbc extension (for example, modflowtest.gmg.out), - if iunitmhc is a number greater than zero. If a single string is passed - the package will be set to the string and gmg output names will be - created using the model name and .gmg.out extension, if iunitmhc is a - number greater than zero. To define the names for all package files - (input and output) the length of the list of strings should be 2. - Default is None. - - Returns - ------- - None - - Attributes - ---------- - - Methods - ------- - - See Also - -------- - - Notes - ----- - - Examples - -------- - - >>> import flopy - >>> m = flopy.modflow.Modflow() - >>> gmg = flopy.modflow.ModflowGmg(m) - - - """ - - def __init__(self, model, mxiter=50, iiter=30, iadamp=0, - hclose=1e-5, rclose=1e-5, relax=1.0, ioutgmg=0, - iunitmhc=None, ism=0, isc=0, damp=1.0, dup=0.75, - dlow=0.01, chglimit=1.0, extension='gmg', - unitnumber=None, filenames=None): - """ - Package constructor. - - """ - - # set default unit number of one is not specified - if unitnumber is None: - unitnumber = ModflowGmg.defaultunit() - - # set filenames - if filenames is None: - filenames = [None, None] - elif isinstance(filenames, str): - filenames = [filenames, None] - elif isinstance(filenames, list): - if len(filenames) < 2: - filenames.append(None) - - # update external file information with gmg output, if necessary - if iunitmhc is not None: - fname = filenames[1] - model.add_output_file(iunitmhc, fname=fname, extension='gmg.out', - binflag=False, - package=ModflowGmg.ftype()) - else: - iunitmhc = 0 - - # Fill namefile items - name = [ModflowGmg.ftype()] - units = [unitnumber] - extra = [''] - - # set package name - fname = [filenames[0]] - - # Call ancestor's init to set self.parent, extension, name and unit number - Package.__init__(self, model, extension=extension, name=name, - unit_number=units, extra=extra, filenames=fname) - - # check if a valid model version has been specified - if model.version == 'mfusg': - err = 'Error: cannot use {} package with model version {}'.format( - self.name, model.version) - raise Exception(err) - - self.heading = '# {} package for '.format(self.name[0]) + \ - ' {}, '.format(model.version_types[model.version]) + \ - 'generated by Flopy.' - self.url = 'gmg.htm' - - self.mxiter = mxiter - self.iiter = iiter - self.iadamp = iadamp - self.hclose = hclose - self.rclose = rclose - self.relax = relax - self.ism = ism - self.isc = isc - self.dup = dup - self.dlow = dlow - self.chglimit = chglimit - self.damp = damp - self.ioutgmg = ioutgmg - self.iunitmhc = iunitmhc - self.parent.add_package(self) - - def write_file(self): - """ - Write the package file. - - Returns - ------- - None - - """ - f_gmg = open(self.fn_path, 'w') - f_gmg.write('%s\n' % self.heading) - # dataset 0 - f_gmg.write('{} {} {} {}\n' \ - .format(self.rclose, self.iiter, self.hclose, self.mxiter)) - # dataset 1 - f_gmg.write('{} {} {} {}\n' \ - .format(self.damp, self.iadamp, self.ioutgmg, - self.iunitmhc)) - # dataset 2 - f_gmg.write('{} {} '.format(self.ism, self.isc)) - if self.iadamp == 2: - f_gmg.write('{} {} {}' \ - .format(self.dup, self.dlow, self.chglimit)) - f_gmg.write('\n') - # dataset 3 - f_gmg.write('{}\n'.format(self.relax)) - f_gmg.close() - - @staticmethod - def load(f, model, ext_unit_dict=None): - """ - Load an existing package. - - Parameters - ---------- - f : filename or file handle - File to load. - model : model object - The model object (of type :class:`flopy.modflow.mf.Modflow`) to - which this package will be added. - ext_unit_dict : dictionary, optional - If the arrays in the file are specified using EXTERNAL, - or older style array control records, then `f` should be a file - handle. In this case ext_unit_dict is required, which can be - constructed using the function - :class:`flopy.utils.mfreadnam.parsenamefile`. - - Returns - ------- - gmg : ModflowGmg object - - Examples - -------- - - >>> import flopy - >>> m = flopy.modflow.Modflow() - >>> gmg = flopy.modflow.ModflowGmg.load('test.gmg', m) - - """ - - if model.verbose: - sys.stdout.write('loading gmg package file...\n') - - openfile = not hasattr(f, 'read') - if openfile: - filename = f - f = open(filename, 'r') - - # dataset 0 -- header - while True: - line = f.readline() - if line[0] != '#': - break - # dataset 0 - t = line.strip().split() - rclose = float(t[0]) - iiter = int(t[1]) - hclose = float(t[2]) - mxiter = int(t[3]) - # dataset 1 - line = f.readline() - t = line.strip().split() - damp = float(t[0]) - iadamp = int(t[1]) - ioutgmg = int(t[2]) - try: - iunitmhc = int(t[3]) - except: - iunitmhc = 0 - # dataset 2 - line = f.readline() - t = line.strip().split() - ism = int(t[0]) - isc = int(t[1]) - dup, dlow, chglimit = 0.75, 0.01, 1.0 - if iadamp == 2: - dup = float(t[2]) - dlow = float(t[3]) - chglimit = float(t[4]) - # dataset 3 - line = f.readline() - t = line.strip().split() - relax = float(t[0]) - - if openfile: - f.close() - - # determine specified unit number - unitnumber = None - filenames = [None, None] - if ext_unit_dict is not None: - unitnumber, filenames[0] = \ - model.get_ext_dict_attr(ext_unit_dict, - filetype=ModflowGmg.ftype()) - if iunitmhc > 0: - iu, filenames[1] = \ - model.get_ext_dict_attr(ext_unit_dict, unit=iunitmhc) - model.add_pop_key_list(iunitmhc) - - # create the gmg object - gmg = ModflowGmg(model, mxiter=mxiter, iiter=iiter, iadamp=iadamp, - hclose=hclose, rclose=rclose, relax=relax, - ioutgmg=ioutgmg, iunitmhc=iunitmhc, - ism=ism, isc=isc, damp=damp, - dup=dup, dlow=dlow, chglimit=chglimit, - unitnumber=unitnumber) - return gmg - - @staticmethod - def ftype(): - return 'GMG' - - @staticmethod - def defaultunit(): - return 27 +""" +mfgmg module. Contains the ModflowGmg class. Note that the user can access +the ModflowGmg class as `flopy.modflow.ModflowGmg`. + +Additional information for this MODFLOW package can be found at the `Online +MODFLOW Guide +`_. + +""" +import sys +from ..pakbase import Package + + +class ModflowGmg(Package): + """ + MODFLOW GMG Package Class. + + Parameters + ---------- + model : model object + The model object (of type :class:`flopy.modflow.mf.Modflow`) to which + this package will be added. + mxiter : int + maximum number of outer iterations. (default is 50) + iiter : int + maximum number of inner iterations. (default is 30) + iadamp : int + is a flag that controls adaptive damping. The possible values + of iadamp are. + + If iadamp = 0, then the value assigned to DAMP is used as a constant + damping parameter. + + If iadamp = 1, the value of damp is used for the first nonlinear + iteration. The damping parameter is adaptively varied on the basis + of the head change, using Cooley's method as described in Mehl + and Hill (2001), for subsequent iterations. + + If iadamp = 2, the relative reduced residual damping method documented + in Mehl and Hill (2001) and modified by Banta (2006) is used. + + When iadamp is specified as 2 and the value specified for DAMP is less + than 0.5, the closure criterion for the inner iterations (drclose) is + assigned simply as rclose. When damp is between 0.5 and 1.0, inclusive, + or when iadamp is specified as 0 or 1, drclose is calculated according + to equation 20 on p. 9 of Wilson and Naff (2004). + hclose : float + is the head change criterion for convergence. (default is 1e-5). + rclose : float + is the residual criterion for convergence. (default is 1e-5) + relax : float + is a relaxation parameter for the ILU preconditioned conjugate + gradient method. The relax parameter can be used to improve the + spectral condition number of the ILU preconditioned system. The value + of relax should be approximately one. However, the relaxation parameter + can cause the factorization to break down. If this happens, then the + gmg solver will report an assembly error and a value smaller than one + for relax should be tried. This item is read only if isc = 4. + ioutgmg : int + is a flag that controls the output of the gmg solver. The + possible values of ioutgmg are. + + If ioutgmg = 0, then only the solver inputs are printed. + + If ioutgmg = 1, then for each linear solve, the number of pcg + iterations, the value of the damping parameter, the l2norm of + the residual, and the maxnorm of the head change and its location + (column, row, layer) are printed. At the end of a time/stress period, + the total number of gmg calls, pcg iterations, and a running total + of pcg iterations for all time/stress periods are printed. + + If ioutgmg = 2, then the convergence history of the pcg iteration is + printed, showing the l2norm of the residual and the convergence factor + for each iteration. + + ioutgmg = 3 is the same as ioutgmg = 1 except output is sent to the + terminal instead of the modflow list output file. + + ioutgmg = 4 is the same as ioutgmg = 2 except output is sent to the + terminal instead of the modflow list output file. + + (default is 0) + iunitmhc : int + is a flag and a unit number, which controls output of maximum + head change values. If iunitmhc = 0, maximum head change values + are not written to an output file. If iunitmhc > 0, maximum head + change values are written to unit iunitmhc. Unit iunitmhc should + be listed in the Name file with 'DATA' as the file type. If + iunitmhc < 0 or is not present, iunitmhc defaults to 0. + (default is 0) + ism : int + is a flag that controls the type of smoother used in the multigrid + preconditioner. If ism = 0, then ilu(0) smoothing is implemented in + the multigrid preconditioner; this smoothing requires an additional + ector on each multigrid level to store the pivots in the ilu + factorization. If ism = 1, then symmetric gaussseidel (sgs) smoothing + is implemented in the multigrid preconditioner. No additional storage + is required if ism = 1; users may want to use this option if available + memory is exceeded or nearly exceeded when using ism = 0. Using sgs + smoothing is not as robust as ilu smoothing; additional iterations are + likely to be required in reducing the residuals. In extreme cases, the + solver may fail to converge as the residuals cannot be reduced + sufficiently. (default is 0) + isc : int + is a flag that controls semicoarsening in the multigrid + preconditioner. If isc = 0, then the rows, columns and layers are + all coarsened. If isc = 1, then the rows and columns are coarsened, + but the layers are not. If isc = 2, then the columns and layers are + coarsened, but the rows are not. If isc = 3, then the rows and layers + are coarsened, but the columns are not. If isc = 4, then there is no + coarsening. Typically, the value of isc should be 0 or 1. In the case + that there are large vertical variations in the hydraulic + conductivities, then a value of 1 should be used. If no coarsening is + implemented (isc = 4), then the gmg solver is comparable to the pcg2 + ilu(0) solver described in Hill (1990) and uses the least amount of + memory. (default is 0) + damp : float + is the value of the damping parameter. For linear problems, a value + of 1.0 should be used. For nonlinear problems, a value less than 1.0 + but greater than 0.0 may be necessary to achieve convergence. A typical + value for nonlinear problems is 0.5. Damping also helps control the + convergence criterion of the linear solve to alleviate excessive pcg + iterations. (default 1.) + dup : float + is the maximum damping value that should be applied at any iteration + when the solver is not oscillating; it is dimensionless. An appropriate + value for dup will be problem-dependent. For moderately nonlinear + problems, reasonable values for dup would be in the range 0.5 to 1.0. + For a highly nonlinear problem, a reasonable value for dup could be as + small as 0.1. When the solver is oscillating, a damping value as large + as 2.0 x DUP may be applied. (default is 0.75) + dlow : float + is the minimum damping value to be generated by the adaptive-damping + procedure; it is dimensionless. An appropriate value for dlow will be + problem-dependent and will be smaller than the value specified for dup. + For a highly nonlinear problem, an appropriate value for dlow might be + as small as 0.001. Note that the value specified for the variable, + chglimit, could result in application of a damping value smaller than + dlow. (default is 0.01) + chglimit : float + is the maximum allowed head change at any cell between outer + iterations; it has units of length. The effect of chglimit is to + determine a damping value that, when applied to all elements of the + head-change vector, will produce an absolute maximum head change equal + to chglimit. (default is 1.0) + extension : list string + Filename extension (default is 'gmg') + unitnumber : int + File unit number (default is None). + filenames : str or list of str + Filenames to use for the package and the output files. If + filenames=None the package name will be created using the model name + and package extension and the gmg output name will be created using + the model name and .cbc extension (for example, modflowtest.gmg.out), + if iunitmhc is a number greater than zero. If a single string is passed + the package will be set to the string and gmg output names will be + created using the model name and .gmg.out extension, if iunitmhc is a + number greater than zero. To define the names for all package files + (input and output) the length of the list of strings should be 2. + Default is None. + + Returns + ------- + None + + Attributes + ---------- + + Methods + ------- + + See Also + -------- + + Notes + ----- + + Examples + -------- + + >>> import flopy + >>> m = flopy.modflow.Modflow() + >>> gmg = flopy.modflow.ModflowGmg(m) + + + """ + + def __init__(self, model, mxiter=50, iiter=30, iadamp=0, + hclose=1e-5, rclose=1e-5, relax=1.0, ioutgmg=0, + iunitmhc=None, ism=0, isc=0, damp=1.0, dup=0.75, + dlow=0.01, chglimit=1.0, extension='gmg', + unitnumber=None, filenames=None): + """ + Package constructor. + + """ + + # set default unit number of one is not specified + if unitnumber is None: + unitnumber = ModflowGmg.defaultunit() + + # set filenames + if filenames is None: + filenames = [None, None] + elif isinstance(filenames, str): + filenames = [filenames, None] + elif isinstance(filenames, list): + if len(filenames) < 2: + filenames.append(None) + + # update external file information with gmg output, if necessary + if iunitmhc is not None: + fname = filenames[1] + model.add_output_file(iunitmhc, fname=fname, extension='gmg.out', + binflag=False, + package=ModflowGmg.ftype()) + else: + iunitmhc = 0 + + # Fill namefile items + name = [ModflowGmg.ftype()] + units = [unitnumber] + extra = [''] + + # set package name + fname = [filenames[0]] + + # Call ancestor's init to set self.parent, extension, name and unit number + Package.__init__(self, model, extension=extension, name=name, + unit_number=units, extra=extra, filenames=fname) + + # check if a valid model version has been specified + if model.version == 'mfusg': + err = 'Error: cannot use {} package with model version {}'.format( + self.name, model.version) + raise Exception(err) + + self.heading = '# {} package for '.format(self.name[0]) + \ + ' {}, '.format(model.version_types[model.version]) + \ + 'generated by Flopy.' + self.url = 'gmg.htm' + + self.mxiter = mxiter + self.iiter = iiter + self.iadamp = iadamp + self.hclose = hclose + self.rclose = rclose + self.relax = relax + self.ism = ism + self.isc = isc + self.dup = dup + self.dlow = dlow + self.chglimit = chglimit + self.damp = damp + self.ioutgmg = ioutgmg + self.iunitmhc = iunitmhc + self.parent.add_package(self) + + def write_file(self): + """ + Write the package file. + + Returns + ------- + None + + """ + f_gmg = open(self.fn_path, 'w') + f_gmg.write('%s\n' % self.heading) + # dataset 0 + f_gmg.write('{} {} {} {}\n' \ + .format(self.rclose, self.iiter, self.hclose, self.mxiter)) + # dataset 1 + f_gmg.write('{} {} {} {}\n' \ + .format(self.damp, self.iadamp, self.ioutgmg, + self.iunitmhc)) + # dataset 2 + f_gmg.write('{} {} '.format(self.ism, self.isc)) + if self.iadamp == 2: + f_gmg.write('{} {} {}' \ + .format(self.dup, self.dlow, self.chglimit)) + f_gmg.write('\n') + # dataset 3 + f_gmg.write('{}\n'.format(self.relax)) + f_gmg.close() + + @staticmethod + def load(f, model, ext_unit_dict=None): + """ + Load an existing package. + + Parameters + ---------- + f : filename or file handle + File to load. + model : model object + The model object (of type :class:`flopy.modflow.mf.Modflow`) to + which this package will be added. + ext_unit_dict : dictionary, optional + If the arrays in the file are specified using EXTERNAL, + or older style array control records, then `f` should be a file + handle. In this case ext_unit_dict is required, which can be + constructed using the function + :class:`flopy.utils.mfreadnam.parsenamefile`. + + Returns + ------- + gmg : ModflowGmg object + + Examples + -------- + + >>> import flopy + >>> m = flopy.modflow.Modflow() + >>> gmg = flopy.modflow.ModflowGmg.load('test.gmg', m) + + """ + + if model.verbose: + sys.stdout.write('loading gmg package file...\n') + + openfile = not hasattr(f, 'read') + if openfile: + filename = f + f = open(filename, 'r') + + # dataset 0 -- header + while True: + line = f.readline() + if line[0] != '#': + break + # dataset 0 + t = line.strip().split() + rclose = float(t[0]) + iiter = int(t[1]) + hclose = float(t[2]) + mxiter = int(t[3]) + # dataset 1 + line = f.readline() + t = line.strip().split() + damp = float(t[0]) + iadamp = int(t[1]) + ioutgmg = int(t[2]) + try: + iunitmhc = int(t[3]) + except: + iunitmhc = 0 + # dataset 2 + line = f.readline() + t = line.strip().split() + ism = int(t[0]) + isc = int(t[1]) + dup, dlow, chglimit = 0.75, 0.01, 1.0 + if iadamp == 2: + dup = float(t[2]) + dlow = float(t[3]) + chglimit = float(t[4]) + # dataset 3 + line = f.readline() + t = line.strip().split() + relax = float(t[0]) + + if openfile: + f.close() + + # determine specified unit number + unitnumber = None + filenames = [None, None] + if ext_unit_dict is not None: + unitnumber, filenames[0] = \ + model.get_ext_dict_attr(ext_unit_dict, + filetype=ModflowGmg.ftype()) + if iunitmhc > 0: + iu, filenames[1] = \ + model.get_ext_dict_attr(ext_unit_dict, unit=iunitmhc) + model.add_pop_key_list(iunitmhc) + + # create the gmg object + gmg = ModflowGmg(model, mxiter=mxiter, iiter=iiter, iadamp=iadamp, + hclose=hclose, rclose=rclose, relax=relax, + ioutgmg=ioutgmg, iunitmhc=iunitmhc, + ism=ism, isc=isc, damp=damp, + dup=dup, dlow=dlow, chglimit=chglimit, + unitnumber=unitnumber) + return gmg + + @staticmethod + def ftype(): + return 'GMG' + + @staticmethod + def defaultunit(): + return 27 diff --git a/flopy/modflow/mfhfb.py b/flopy/modflow/mfhfb.py index 3c694fb865..a8f25f5e96 100644 --- a/flopy/modflow/mfhfb.py +++ b/flopy/modflow/mfhfb.py @@ -1,392 +1,392 @@ -""" -mfhfb module. Contains the ModflowHfb class. Note that the user can access -the ModflowHfb class as `flopy.modflow.ModflowHfb`. - -Additional information for this MODFLOW package can be found at the `Online -MODFLOW Guide -`_. - -""" -import sys -import numpy as np -from ..pakbase import Package -from .mfparbc import ModflowParBc as mfparbc -from numpy.lib.recfunctions import stack_arrays -from ..utils.flopy_io import line_parse -from ..utils.recarray_utils import create_empty_recarray - - -class ModflowHfb(Package): - """ - MODFLOW HFB6 - Horizontal Flow Barrier Package - - Parameters - ---------- - model : model object - The model object (of type: class:`flopy.modflow.mf.Modflow`) to - which this package will be added. - nphfb : int - Number of horizontal-flow barrier parameters. Note that for an HFB - parameter to have an effect in the simulation, it must be defined - and made active using NACTHFB to have an effect in the simulation - (default is 0). - mxfb : int - Maximum number of horizontal-flow barrier barriers that will be - defined using parameters (default is 0). - nhfbnp: int - Number of horizontal-flow barriers not defined by parameters. This - is calculated automatically by FloPy based on the information in - layer_row_column_data (default is 0). - hfb_data : list of records - - In its most general form, this is a list of horizontal-flow - barrier records. A barrier is conceptualized as being located on - the boundary between two adjacent finite difference cells in the - same layer. The innermost list is the layer, row1, column1, row2, - column2, and hydrologic characteristics for a single hfb between - the cells. The hydraulic characteristic is the barrier hydraulic - conductivity divided by the width of the horizontal-flow barrier. - (default is None). - This gives the form of:: - - hfb_data = [ - [lay, row1, col1, row2, col2, hydchr], - [lay, row1, col1, row2, col2, hydchr], - [lay, row1, col1, row2, col2, hydchr], - ]. - - nacthfb : int - The number of active horizontal-flow barrier parameters - (default is 0). - no_print : boolean - When True or 1, a list of horizontal flow barriers will not be - written to the Listing File (default is False) - options : list of strings - Package options (default is None). - extension : string - Filename extension (default is 'hfb'). - unitnumber : int - File unit number (default is None). - filenames : str or list of str - Filenames to use for the package. If filenames=None the package name - will be created using the model name and package extension. If a - single string is passed the package will be set to the string. - Default is None. - - - Attributes - ---------- - - Methods - ------- - - See Also - -------- - - Notes - ----- - Parameters are supported in Flopy only when reading in existing models. - Parameter values are converted to native values in Flopy and the - connection to "parameters" is thus nonexistent. - - Examples - -------- - - >>> import flopy - >>> m = flopy.modflow.Modflow() - >>> hfb_data = [[0, 10, 4, 10, 5, 0.01],[1, 10, 4, 10, 5, 0.01]] - >>> hfb = flopy.modflow.ModflowHfb(m, hfb_data=hfb_data) - - """ - - def __init__(self, model, nphfb=0, mxfb=0, nhfbnp=0, - hfb_data=None, nacthfb=0, no_print=False, - options=None, extension='hfb', unitnumber=None, - filenames=None): - - # set default unit number of one is not specified - if unitnumber is None: - unitnumber = ModflowHfb.defaultunit() - - # set filenames - if filenames is None: - filenames = [None] - elif isinstance(filenames, str): - filenames = [filenames] - - # Fill namefile items - name = [ModflowHfb.ftype()] - units = [unitnumber] - extra = [''] - - # set package name - fname = [filenames[0]] - - # Call ancestor's init to set self.parent, extension, name and unit number - Package.__init__(self, model, extension=extension, name=name, - unit_number=units, extra=extra, filenames=fname) - - self.heading = '# {} package for '.format(self.name[0]) + \ - ' {}, '.format(model.version_types[model.version]) + \ - 'generated by Flopy.' - self.url = 'hfb6.htm' - - self.nphfb = nphfb - self.mxfb = mxfb - - self.nacthfb = nacthfb - - self.no_print = no_print - self.np = 0 - if options is None: - options = [] - if self.no_print: - options.append('NOPRINT') - self.options = options - - aux_names = [] - it = 0 - while it < len(options): - if 'aux' in options[it].lower(): - aux_names.append(options[it + 1].lower()) - it += 1 - it += 1 - - if hfb_data is None: - raise Exception('Failed to specify hfb_data.') - - self.nhfbnp = len(hfb_data) - self.hfb_data = ModflowHfb.get_empty(self.nhfbnp) - for ibnd, t in enumerate(hfb_data): - self.hfb_data[ibnd] = tuple(t) - - self.parent.add_package(self) - - def ncells(self): - """ - Returns the maximum number of cell pairs that have horizontal - flow barriers (developed for MT3DMS SSM package) - - """ - return self.nhfbnp - - def write_file(self): - """ - Write the package file. - - Returns - ------- - None - - """ - f_hfb = open(self.fn_path, 'w') - f_hfb.write('{}\n'.format(self.heading)) - f_hfb.write( - '{:10d}{:10d}{:10d}'.format(self.nphfb, self.mxfb, self.nhfbnp)) - for option in self.options: - f_hfb.write(' {}'.format(option)) - f_hfb.write('\n') - for a in self.hfb_data: - f_hfb.write( - '{:10d}{:10d}{:10d}{:10d}{:10d}{:13.6g}\n'.format(a[0] + 1, - a[1] + 1, - a[2] + 1, - a[3] + 1, - a[4] + 1, - a[5])) - f_hfb.write('{:10d}'.format(self.nacthfb)) - f_hfb.close() - - @staticmethod - def get_empty(ncells=0, aux_names=None, structured=True): - """ - Get an empty recarray that corresponds to hfb dtype and has - been extended to include aux variables and associated - aux names. - - """ - dtype = ModflowHfb.get_default_dtype(structured=structured) - if aux_names is not None: - dtype = Package.add_to_dtype(dtype, aux_names, np.float32) - return create_empty_recarray(ncells, dtype, default_value=-1.0E+10) - - @staticmethod - def get_default_dtype(structured=True): - """ - Get the default dtype for hfb data - - """ - if structured: - dtype = np.dtype([("k", np.int), - ("irow1", np.int), ("icol1", np.int), - ("irow2", np.int), ("icol2", np.int), - ("hydchr", np.float32)]) - else: - assert not structured, 'is there an unstructured HFB???' - return dtype - - @staticmethod - def get_sfac_columns(): - return ['hydchr'] - - @staticmethod - def load(f, model, ext_unit_dict=None): - """ - Load an existing package. - - Parameters - ---------- - f : filename or file handle - File to load. - model : model object - The model object (of type: class:`flopy.modflow.mf.Modflow`) - to which this package will be added. - ext_unit_dict : dictionary, optional - If the arrays in the file are specified using EXTERNAL, - or older style array control records, then `f` should be a file - handle. In this case ext_unit_dict is required, which can be - constructed using the function - :class:`flopy.utils.mfreadnam.parsenamefile`. - - Returns - ------- - hfb : ModflowHfb object - ModflowHfb object (of type :class:`flopy.modflow.mfbas.ModflowHfb`) - - Examples - -------- - - >>> import flopy - >>> m = flopy.modflow.Modflow() - >>> hfb = flopy.modflow.ModflowHfb.load('test.hfb', m) - - """ - - if model.verbose: - sys.stdout.write('loading hfb6 package file...\n') - - openfile = not hasattr(f, 'read') - if openfile: - filename = f - f = open(filename, 'r') - - # dataset 0 -- header - while True: - line = f.readline() - if line[0] != '#': - break - # dataset 1 - t = line_parse(line) - nphfb = int(t[0]) - mxfb = int(t[1]) - nhfbnp = int(t[2]) - # check for no-print suppressor - options = [] - aux_names = [] - if len(t) > 2: - it = 2 - while it < len(t): - toption = t[it] - # print it, t[it] - if toption.lower() == 'noprint': - options.append(toption) - elif 'aux' in toption.lower(): - options.append(' '.join(t[it:it + 2])) - aux_names.append(t[it + 1].lower()) - it += 1 - it += 1 - # data set 2 and 3 - if nphfb > 0: - dt = ModflowHfb.get_empty(1).dtype - pak_parms = mfparbc.load(f, nphfb, dt, model, - ext_unit_dict=ext_unit_dict, - verbose=model.verbose) - # data set 4 - bnd_output = None - if nhfbnp > 0: - specified = ModflowHfb.get_empty(nhfbnp) - for ibnd in range(nhfbnp): - line = f.readline() - if "open/close" in line.lower(): - raise NotImplementedError( - "load() method does not support \'open/close\'") - t = line.strip().split() - specified[ibnd] = tuple(t[:len(specified.dtype.names)]) - - # convert indices to zero-based - specified['k'] -= 1 - specified['irow1'] -= 1 - specified['icol1'] -= 1 - specified['irow2'] -= 1 - specified['icol2'] -= 1 - - bnd_output = np.recarray.copy(specified) - - if nphfb > 0: - partype = ['hydchr'] - line = f.readline() - t = line.strip().split() - nacthfb = int(t[0]) - for iparm in range(nacthfb): - line = f.readline() - t = line.strip().split() - pname = t[0].lower() - iname = 'static' - par_dict, current_dict = pak_parms.get(pname) - data_dict = current_dict[iname] - par_current = ModflowHfb.get_empty(par_dict['nlst']) - - # - if model.mfpar.pval is None: - parval = np.float(par_dict['parval']) - else: - try: - parval = np.float(model.mfpar.pval.pval_dict[pname]) - except: - parval = np.float(par_dict['parval']) - - # fill current parameter data (par_current) - for ibnd, t in enumerate(data_dict): - t = tuple(t) - par_current[ibnd] = tuple(t[:len(par_current.dtype.names)]) - - # convert indices to zero-based - par_current['k'] -= 1 - par_current['irow1'] -= 1 - par_current['icol1'] -= 1 - par_current['irow2'] -= 1 - par_current['icol2'] -= 1 - - for ptype in partype: - par_current[ptype] *= parval - - if bnd_output is None: - bnd_output = np.recarray.copy(par_current) - else: - bnd_output = stack_arrays((bnd_output, par_current), - asrecarray=True, usemask=False) - - if openfile: - f.close() - - # set package unit number - unitnumber = None - filenames = [None] - if ext_unit_dict is not None: - unitnumber, filenames[0] = \ - model.get_ext_dict_attr(ext_unit_dict, - filetype=ModflowHfb.ftype()) - - hfb = ModflowHfb(model, nphfb=0, mxfb=0, nhfbnp=len(bnd_output), - hfb_data=bnd_output, - nacthfb=0, options=options, unitnumber=unitnumber, - filenames=filenames) - return hfb - - @staticmethod - def ftype(): - return 'HFB6' - - @staticmethod - def defaultunit(): - return 29 +""" +mfhfb module. Contains the ModflowHfb class. Note that the user can access +the ModflowHfb class as `flopy.modflow.ModflowHfb`. + +Additional information for this MODFLOW package can be found at the `Online +MODFLOW Guide +`_. + +""" +import sys +import numpy as np +from ..pakbase import Package +from .mfparbc import ModflowParBc as mfparbc +from numpy.lib.recfunctions import stack_arrays +from ..utils.flopy_io import line_parse +from ..utils.recarray_utils import create_empty_recarray + + +class ModflowHfb(Package): + """ + MODFLOW HFB6 - Horizontal Flow Barrier Package + + Parameters + ---------- + model : model object + The model object (of type: class:`flopy.modflow.mf.Modflow`) to + which this package will be added. + nphfb : int + Number of horizontal-flow barrier parameters. Note that for an HFB + parameter to have an effect in the simulation, it must be defined + and made active using NACTHFB to have an effect in the simulation + (default is 0). + mxfb : int + Maximum number of horizontal-flow barrier barriers that will be + defined using parameters (default is 0). + nhfbnp: int + Number of horizontal-flow barriers not defined by parameters. This + is calculated automatically by FloPy based on the information in + layer_row_column_data (default is 0). + hfb_data : list of records + + In its most general form, this is a list of horizontal-flow + barrier records. A barrier is conceptualized as being located on + the boundary between two adjacent finite difference cells in the + same layer. The innermost list is the layer, row1, column1, row2, + column2, and hydrologic characteristics for a single hfb between + the cells. The hydraulic characteristic is the barrier hydraulic + conductivity divided by the width of the horizontal-flow barrier. + (default is None). + This gives the form of:: + + hfb_data = [ + [lay, row1, col1, row2, col2, hydchr], + [lay, row1, col1, row2, col2, hydchr], + [lay, row1, col1, row2, col2, hydchr], + ]. + + nacthfb : int + The number of active horizontal-flow barrier parameters + (default is 0). + no_print : boolean + When True or 1, a list of horizontal flow barriers will not be + written to the Listing File (default is False) + options : list of strings + Package options (default is None). + extension : string + Filename extension (default is 'hfb'). + unitnumber : int + File unit number (default is None). + filenames : str or list of str + Filenames to use for the package. If filenames=None the package name + will be created using the model name and package extension. If a + single string is passed the package will be set to the string. + Default is None. + + + Attributes + ---------- + + Methods + ------- + + See Also + -------- + + Notes + ----- + Parameters are supported in Flopy only when reading in existing models. + Parameter values are converted to native values in Flopy and the + connection to "parameters" is thus nonexistent. + + Examples + -------- + + >>> import flopy + >>> m = flopy.modflow.Modflow() + >>> hfb_data = [[0, 10, 4, 10, 5, 0.01],[1, 10, 4, 10, 5, 0.01]] + >>> hfb = flopy.modflow.ModflowHfb(m, hfb_data=hfb_data) + + """ + + def __init__(self, model, nphfb=0, mxfb=0, nhfbnp=0, + hfb_data=None, nacthfb=0, no_print=False, + options=None, extension='hfb', unitnumber=None, + filenames=None): + + # set default unit number of one is not specified + if unitnumber is None: + unitnumber = ModflowHfb.defaultunit() + + # set filenames + if filenames is None: + filenames = [None] + elif isinstance(filenames, str): + filenames = [filenames] + + # Fill namefile items + name = [ModflowHfb.ftype()] + units = [unitnumber] + extra = [''] + + # set package name + fname = [filenames[0]] + + # Call ancestor's init to set self.parent, extension, name and unit number + Package.__init__(self, model, extension=extension, name=name, + unit_number=units, extra=extra, filenames=fname) + + self.heading = '# {} package for '.format(self.name[0]) + \ + ' {}, '.format(model.version_types[model.version]) + \ + 'generated by Flopy.' + self.url = 'hfb6.htm' + + self.nphfb = nphfb + self.mxfb = mxfb + + self.nacthfb = nacthfb + + self.no_print = no_print + self.np = 0 + if options is None: + options = [] + if self.no_print: + options.append('NOPRINT') + self.options = options + + aux_names = [] + it = 0 + while it < len(options): + if 'aux' in options[it].lower(): + aux_names.append(options[it + 1].lower()) + it += 1 + it += 1 + + if hfb_data is None: + raise Exception('Failed to specify hfb_data.') + + self.nhfbnp = len(hfb_data) + self.hfb_data = ModflowHfb.get_empty(self.nhfbnp) + for ibnd, t in enumerate(hfb_data): + self.hfb_data[ibnd] = tuple(t) + + self.parent.add_package(self) + + def ncells(self): + """ + Returns the maximum number of cell pairs that have horizontal + flow barriers (developed for MT3DMS SSM package) + + """ + return self.nhfbnp + + def write_file(self): + """ + Write the package file. + + Returns + ------- + None + + """ + f_hfb = open(self.fn_path, 'w') + f_hfb.write('{}\n'.format(self.heading)) + f_hfb.write( + '{:10d}{:10d}{:10d}'.format(self.nphfb, self.mxfb, self.nhfbnp)) + for option in self.options: + f_hfb.write(' {}'.format(option)) + f_hfb.write('\n') + for a in self.hfb_data: + f_hfb.write( + '{:10d}{:10d}{:10d}{:10d}{:10d}{:13.6g}\n'.format(a[0] + 1, + a[1] + 1, + a[2] + 1, + a[3] + 1, + a[4] + 1, + a[5])) + f_hfb.write('{:10d}'.format(self.nacthfb)) + f_hfb.close() + + @staticmethod + def get_empty(ncells=0, aux_names=None, structured=True): + """ + Get an empty recarray that corresponds to hfb dtype and has + been extended to include aux variables and associated + aux names. + + """ + dtype = ModflowHfb.get_default_dtype(structured=structured) + if aux_names is not None: + dtype = Package.add_to_dtype(dtype, aux_names, np.float32) + return create_empty_recarray(ncells, dtype, default_value=-1.0E+10) + + @staticmethod + def get_default_dtype(structured=True): + """ + Get the default dtype for hfb data + + """ + if structured: + dtype = np.dtype([("k", np.int), + ("irow1", np.int), ("icol1", np.int), + ("irow2", np.int), ("icol2", np.int), + ("hydchr", np.float32)]) + else: + assert not structured, 'is there an unstructured HFB???' + return dtype + + @staticmethod + def get_sfac_columns(): + return ['hydchr'] + + @staticmethod + def load(f, model, ext_unit_dict=None): + """ + Load an existing package. + + Parameters + ---------- + f : filename or file handle + File to load. + model : model object + The model object (of type: class:`flopy.modflow.mf.Modflow`) + to which this package will be added. + ext_unit_dict : dictionary, optional + If the arrays in the file are specified using EXTERNAL, + or older style array control records, then `f` should be a file + handle. In this case ext_unit_dict is required, which can be + constructed using the function + :class:`flopy.utils.mfreadnam.parsenamefile`. + + Returns + ------- + hfb : ModflowHfb object + ModflowHfb object (of type :class:`flopy.modflow.mfbas.ModflowHfb`) + + Examples + -------- + + >>> import flopy + >>> m = flopy.modflow.Modflow() + >>> hfb = flopy.modflow.ModflowHfb.load('test.hfb', m) + + """ + + if model.verbose: + sys.stdout.write('loading hfb6 package file...\n') + + openfile = not hasattr(f, 'read') + if openfile: + filename = f + f = open(filename, 'r') + + # dataset 0 -- header + while True: + line = f.readline() + if line[0] != '#': + break + # dataset 1 + t = line_parse(line) + nphfb = int(t[0]) + mxfb = int(t[1]) + nhfbnp = int(t[2]) + # check for no-print suppressor + options = [] + aux_names = [] + if len(t) > 2: + it = 2 + while it < len(t): + toption = t[it] + # print it, t[it] + if toption.lower() == 'noprint': + options.append(toption) + elif 'aux' in toption.lower(): + options.append(' '.join(t[it:it + 2])) + aux_names.append(t[it + 1].lower()) + it += 1 + it += 1 + # data set 2 and 3 + if nphfb > 0: + dt = ModflowHfb.get_empty(1).dtype + pak_parms = mfparbc.load(f, nphfb, dt, model, + ext_unit_dict=ext_unit_dict, + verbose=model.verbose) + # data set 4 + bnd_output = None + if nhfbnp > 0: + specified = ModflowHfb.get_empty(nhfbnp) + for ibnd in range(nhfbnp): + line = f.readline() + if "open/close" in line.lower(): + raise NotImplementedError( + "load() method does not support \'open/close\'") + t = line.strip().split() + specified[ibnd] = tuple(t[:len(specified.dtype.names)]) + + # convert indices to zero-based + specified['k'] -= 1 + specified['irow1'] -= 1 + specified['icol1'] -= 1 + specified['irow2'] -= 1 + specified['icol2'] -= 1 + + bnd_output = np.recarray.copy(specified) + + if nphfb > 0: + partype = ['hydchr'] + line = f.readline() + t = line.strip().split() + nacthfb = int(t[0]) + for iparm in range(nacthfb): + line = f.readline() + t = line.strip().split() + pname = t[0].lower() + iname = 'static' + par_dict, current_dict = pak_parms.get(pname) + data_dict = current_dict[iname] + par_current = ModflowHfb.get_empty(par_dict['nlst']) + + # + if model.mfpar.pval is None: + parval = np.float(par_dict['parval']) + else: + try: + parval = np.float(model.mfpar.pval.pval_dict[pname]) + except: + parval = np.float(par_dict['parval']) + + # fill current parameter data (par_current) + for ibnd, t in enumerate(data_dict): + t = tuple(t) + par_current[ibnd] = tuple(t[:len(par_current.dtype.names)]) + + # convert indices to zero-based + par_current['k'] -= 1 + par_current['irow1'] -= 1 + par_current['icol1'] -= 1 + par_current['irow2'] -= 1 + par_current['icol2'] -= 1 + + for ptype in partype: + par_current[ptype] *= parval + + if bnd_output is None: + bnd_output = np.recarray.copy(par_current) + else: + bnd_output = stack_arrays((bnd_output, par_current), + asrecarray=True, usemask=False) + + if openfile: + f.close() + + # set package unit number + unitnumber = None + filenames = [None] + if ext_unit_dict is not None: + unitnumber, filenames[0] = \ + model.get_ext_dict_attr(ext_unit_dict, + filetype=ModflowHfb.ftype()) + + hfb = ModflowHfb(model, nphfb=0, mxfb=0, nhfbnp=len(bnd_output), + hfb_data=bnd_output, + nacthfb=0, options=options, unitnumber=unitnumber, + filenames=filenames) + return hfb + + @staticmethod + def ftype(): + return 'HFB6' + + @staticmethod + def defaultunit(): + return 29 diff --git a/flopy/modflow/mfhob.py b/flopy/modflow/mfhob.py index 8d3034efae..f7e0ce23d9 100755 --- a/flopy/modflow/mfhob.py +++ b/flopy/modflow/mfhob.py @@ -1,678 +1,678 @@ -import sys -import collections -import numpy as np -from ..pakbase import Package -from ..utils.recarray_utils import create_empty_recarray - - -class ModflowHob(Package): - """ - Head Observation package class - - Parameters - ---------- - iuhobsv : int - unit number where output is saved. If iuhobsv is None, a unit number - will be assigned (default is None). - hobdry : float - Value of the simulated equivalent written to the observation output - file when the observation is omitted because a cell is dry - (default is 0). - tomulth : float - Time step multiplier for head observations. The product of tomulth and - toffset must produce a time value in units consistent with other model - input. tomulth can be dimensionless or can be used to convert the units - of toffset to the time unit used in the simulation (default is 1). - obs_data : HeadObservation or list of HeadObservation instances - A single HeadObservation instance or a list of HeadObservation - instances containing all of the data for each observation. If obs_data - is None a default HeadObservation with an observation in layer, row, - column (0, 0, 0) and a head value of 0 at totim 0 will be created - (default is None). - hobname : str - Name of head observation output file. If iuhobsv is greater than 0, - and hobname is None, the model basename with a '.hob.out' extension - will be used (default is None). - extension : string - Filename extension (default is hob) - no_print : boolean - When True or 1, a list of head observations will not be - written to the Listing File (default is False) - options : list of strings - Package options (default is None). - unitnumber : int - File unit number (default is None) - filenames : str or list of str - Filenames to use for the package and the output files. If filenames - is None the package name will be created using the model name and - package extension and the hob output name will be created using the - model name and .hob.out extension (for example, modflowtest.hob.out), - if iuhobsv is a number greater than zero. If a single string is passed - the package will be set to the string and hob output name will be - created using the model name and .hob.out extension, if iuhobsv is a - number greater than zero. To define the names for all package files - (input and output) the length of the list of strings should be 2. - Default is None. - - Attributes - ---------- - - Methods - ------- - - See Also - -------- - - Notes - - Examples - -------- - - >>> import flopy - >>> model = flopy.modflow.Modflow() - >>> dis = flopy.modflow.ModflowDis(model, nlay=1, nrow=11, ncol=11, nper=2, - ... perlen=[1,1]) - >>> tsd = [[1.,54.4], [2., 55.2]] - >>> obsdata = flopy.modflow.HeadObservation(model, layer=0, row=5, - ... column=5, time_series_data=tsd) - >>> hob = flopy.modflow.ModflowHob(model, iuhobsv=51, hobdry=-9999., - ... obs_data=obsdata) - - - """ - - def __init__(self, model, iuhobsv=None, hobdry=0, tomulth=1.0, - obs_data=None, hobname=None, extension='hob', - no_print=False, options=None, - unitnumber=None, filenames=None): - """ - Package constructor - """ - # set default unit number of one is not specified - if unitnumber is None: - unitnumber = ModflowHob.defaultunit() - - # set filenames - if filenames is None: - filenames = [None, None] - elif isinstance(filenames, str): - filenames = [filenames, None] - elif isinstance(filenames, list): - if len(filenames) < 2: - filenames.append(None) - - # set filenames[1] to hobname if filenames[1] is not None - if filenames[1] is None: - if hobname is not None: - filenames[1] = hobname - - if iuhobsv is not None: - fname = filenames[1] - model.add_output_file(iuhobsv, fname=fname, - extension='hob.out', binflag=False, - package=ModflowHob.ftype()) - else: - iuhobsv = 0 - - # Fill namefile items - name = [ModflowHob.ftype()] - units = [unitnumber] - extra = [''] - - # set package name - fname = [filenames[0]] - - # Call ancestor's init to set self.parent, - # extension, name and unit number - Package.__init__(self, model, extension=extension, name=name, - unit_number=units, extra=extra, filenames=fname) - - self.url = 'hob.htm' - self.heading = '# {} package for '.format(self.name[0]) + \ - ' {}, '.format(model.version_types[model.version]) + \ - 'generated by Flopy.' - - self.iuhobsv = iuhobsv - self.hobdry = hobdry - self.tomulth = tomulth - - # create default - if obs_data is None: - obs_data = HeadObservation(model) - - # make sure obs_data is a list - if isinstance(obs_data, HeadObservation): - obs_data = [obs_data] - - # set self.obs_data - self.obs_data = obs_data - - self.no_print = no_print - self.np = 0 - if options is None: - options = [] - if self.no_print: - options.append('NOPRINT') - self.options = options - - # add checks for input compliance (obsnam length, etc.) - self.parent.add_package(self) - - def _set_dimensions(self): - """ - Set the length of the obs_data list - - Returns - ------- - None - - """ - # make sure each entry of obs_data list is a HeadObservation instance - # and calculate nh, mobs, and maxm - msg = '' - self.nh = 0 - self.mobs = 0 - self.maxm = 0 - for idx, obs in enumerate(self.obs_data): - if not isinstance(obs, HeadObservation): - msg += 'ModflowHob: obs_data entry {} '.format(idx) + \ - 'is not a HeadObservation instance.\n' - continue - self.nh += obs.nobs - if obs.multilayer: - self.mobs += obs.nobs - self.maxm = max(self.maxm, obs.maxm) - if msg != '': - raise ValueError(msg) - return - - def write_file(self): - """ - Write the package file - - Returns - ------- - None - - """ - # determine the dimensions of HOB data - self._set_dimensions() - - # open file for writing - f = open(self.fn_path, 'w') - - # write dataset 0 - f.write('{}\n'.format(self.heading)) - - # write dataset 1 - f.write('{:10d}'.format(self.nh)) - f.write('{:10d}'.format(self.mobs)) - f.write('{:10d}'.format(self.maxm)) - f.write('{:10d}'.format(self.iuhobsv)) - f.write('{:10.4g}'.format(self.hobdry)) - if self.no_print or 'NOPRINT' in self.options: - f.write('{: >10}'.format('NOPRINT')) - f.write('\n') - - # write dataset 2 - f.write('{:10.4g}\n'.format(self.tomulth)) - - # write datasets 3-6 - for idx, obs in enumerate(self.obs_data): - # dataset 3 - obsname = obs.obsname - if isinstance(obsname, bytes): - obsname = obsname.decode('utf-8') - line = '{:12s} '.format(obsname) - layer = obs.layer - if layer >= 0: - layer += 1 - line += '{:10d} '.format(layer) - line += '{:10d} '.format(obs.row + 1) - line += '{:10d} '.format(obs.column + 1) - irefsp = obs.irefsp - if irefsp >= 0: - irefsp += 1 - line += '{:10d} '.format(irefsp) - if obs.nobs == 1: - toffset = obs.time_series_data[0]['toffset'] - hobs = obs.time_series_data[0]['hobs'] - else: - toffset = 0. - hobs = 0. - line += '{:20} '.format(toffset) - line += '{:10.4f} '.format(obs.roff) - line += '{:10.4f} '.format(obs.coff) - line += '{:10.4f} '.format(hobs) - line += ' # DATASET 3 - Observation {}'.format(idx + 1) - f.write('{}\n'.format(line)) - - # dataset 4 - if len(obs.mlay.keys()) > 1: - line = '' - for key, value in iter(obs.mlay.items()): - line += '{:5d}{:10.4f}'.format(key + 1, value) - line += ' # DATASET 4 - Observation {}'.format(idx + 1) - f.write('{}\n'.format(line)) - - # dataset 5 - if irefsp < 0: - line = '{:10d}'.format(obs.itt) - line += 103 * ' ' - line += ' # DATASET 5 - Observation {}'.format(idx + 1) - f.write('{}\n'.format(line)) - - # dataset 6: - if obs.nobs > 1: - for jdx, t in enumerate(obs.time_series_data): - obsname = t['obsname'] - if isinstance(obsname, bytes): - obsname = obsname.decode('utf-8') - line = '{:12s} '.format(obsname) - line += '{:10d} '.format(t['irefsp'] + 1) - line += '{:20} '.format(t['toffset']) - line += '{:10.4f} '.format(t['hobs']) - line += 55 * ' ' - line += ' # DATASET 6 - ' + \ - 'Observation {}.{}'.format(idx + 1, jdx + 1) - f.write('{}\n'.format(line)) - - # close the hob package file - f.close() - - return - - @staticmethod - def load(f, model, ext_unit_dict=None, check=True): - """ - Load an existing package. - - Parameters - ---------- - f : filename or file handle - File to load. - model : model object - The model object (of type :class:`flopy.modflow.mf.Modflow`) to - which this package will be added. - ext_unit_dict : dictionary, optional - If the arrays in the file are specified using EXTERNAL, - or older style array control records, then `f` should be a file - handle. In this case ext_unit_dict is required, which can be - constructed using the function - :class:`flopy.utils.mfreadnam.parsenamefile`. - check : boolean - Check package data for common errors. (default True) - - Returns - ------- - hob : ModflowHob package object - ModflowHob package object. - - Examples - -------- - - >>> import flopy - >>> m = flopy.modflow.Modflow() - >>> hobs = flopy.modflow.ModflowHob.load('test.hob', m) - - """ - - if model.verbose: - sys.stdout.write('loading hob package file...\n') - - openfile = not hasattr(f, 'read') - if openfile: - filename = f - f = open(filename, 'r') - - # dataset 0 -- header - while True: - line = f.readline() - if line[0] != '#': - break - - # read dataset 1 - t = line.strip().split() - nh = int(t[0]) - iuhobsv = None - hobdry = 0 - if len(t) > 3: - iuhobsv = int(t[3]) - hobdry = float(t[4]) - - # read dataset 2 - line = f.readline() - t = line.strip().split() - tomulth = float(t[0]) - - # read observation data - obs_data = [] - - # read datasets 3-6 - nobs = 0 - while True: - # read dataset 3 - line = f.readline() - t = line.strip().split() - obsnam = t[0] - layer = int(t[1]) - row = int(t[2]) - 1 - col = int(t[3]) - 1 - irefsp0 = int(t[4]) - toffset = float(t[5]) - roff = float(t[6]) - coff = float(t[7]) - hob = float(t[8]) - - # read dataset 4 if multilayer obs - if layer > 0: - layer -= 1 - mlay = {layer: 1.} - else: - line = f.readline() - t = line.strip().split() - mlay = collections.OrderedDict() - if len(t) >= abs(layer) * 2: - for j in range(0, abs(layer) * 2, 2): - k = int(t[j]) - 1 - # catch case where the same layer is specified - # more than once. In this case add previous - # value to the current value - keys = list(mlay.keys()) - v = 0. - if k in keys: - v = mlay[k] - mlay[k] = float(t[j + 1]) + v - else: - for j in range(abs(layer)): - k = int(t[0]) - 1 - keys = list(mlay.keys()) - v = 0. - if k in keys: - v = mlay[k] - mlay[k] = float(t[1]) + v - - if j != abs(layer) - 1: - line = f.readline() - t = line.strip().split() - # reset layer - layer = -len(list(mlay.keys())) - - # read datasets 5 & 6. Index loop variable - if irefsp0 > 0: - itt = 1 - irefsp0 -= 1 - totim = model.dis.get_totim_from_kper_toffset( - irefsp0, - toffset * tomulth) - names = [obsnam] - tsd = [totim, hob] - nobs += 1 - else: - names = [] - tsd = [] - # read data set 5 - line = f.readline() - t = line.strip().split() - itt = int(t[0]) - # dataset 6 - for j in range(abs(irefsp0)): - line = f.readline() - t = line.strip().split() - names.append(t[0]) - irefsp = int(t[1]) - 1 - toffset = float(t[2]) - totim = model.dis.get_totim_from_kper_toffset( - irefsp, - toffset * tomulth) - hob = float(t[3]) - tsd.append([totim, hob]) - nobs += 1 - - obs_data.append(HeadObservation(model, tomulth=tomulth, - layer=layer, row=row, column=col, - roff=roff, coff=coff, - obsname=obsnam, - mlay=mlay, itt=itt, - time_series_data=tsd, - names=names)) - if nobs == nh: - break - - if openfile: - f.close() - - # set package unit number - unitnumber = None - filenames = [None, None] - if ext_unit_dict is not None: - unitnumber, filenames[0] = \ - model.get_ext_dict_attr(ext_unit_dict, - filetype=ModflowHob.ftype()) - if iuhobsv is not None: - if iuhobsv > 0: - iu, filenames[1] = \ - model.get_ext_dict_attr(ext_unit_dict, unit=iuhobsv) - model.add_pop_key_list(iuhobsv) - - # create hob object instance - hob = ModflowHob(model, iuhobsv=iuhobsv, hobdry=hobdry, - tomulth=tomulth, obs_data=obs_data, - unitnumber=unitnumber, filenames=filenames) - - return hob - - @staticmethod - def ftype(): - return 'HOB' - - @staticmethod - def defaultunit(): - return 39 - - -class HeadObservation(object): - """ - Create single HeadObservation instance from a time series array. A list of - HeadObservation instances are passed to the ModflowHob package. - - Parameters - ---------- - tomulth : float - Time-offset multiplier for head observations. Default is 1. - obsname : string - Observation name. Default is 'HOBS' - layer : int - The zero-based layer index of the cell in which the head observation - is located. If layer is less than zero, hydraulic heads from multiple - layers are combined to calculate a simulated value. The number of - layers equals the absolute value of layer, or abs(layer). Default is 0. - row : int - The zero-based row index for the observation. Default is 0. - column : int - The zero-based column index of the observation. Default is 0. - irefsp : int - The zero-based stress period to which the observation time is - referenced. - roff : float - Fractional offset from center of cell in Y direction (between rows). - Default is 0. - coff : float - Fractional offset from center of cell in X direction (between columns). - Default is 0. - itt : int - Flag that identifies whether head or head changes are used as - observations. itt = 1 specified for heads and itt = 2 specified - if initial value is head and subsequent changes in head. Only - specified if irefsp is < 0. Default is 1. - mlay : dictionary of length (abs(irefsp)) - Key represents zero-based layer numbers for multilayer observations and - value represents the fractional value for each layer of multilayer - observations. If mlay is None, a default mlay of {0: 1.} will be - used (default is None). - time_series_data : list or numpy array - Two-dimensional list or numpy array containing the simulation time of - the observation and the observed head [[totim, hob]]. If - time_series_dataDefault is None, a default observation of 0. at - totim 0. will be created (default is None). - names : list - List of specified observation names. If names is None, observation - names will be automatically generated from obsname and the order - of the timeseries data (default is None). - - Returns - ------- - obs : HeadObservation - HeadObservation object. - - Examples - -------- - - >>> import flopy - >>> model = flopy.modflow.Modflow() - >>> dis = flopy.modflow.ModflowDis(model, nlay=1, nrow=11, ncol=11, nper=2, - ... perlen=[1,1]) - >>> tsd = [[1.,54.4], [2., 55.2]] - >>> obsdata = flopy.modflow.HeadObservation(model, layer=0, row=5, - ... column=5, time_series_data=tsd) - - """ - - def __init__(self, model, tomulth=1., obsname='HOBS', - layer=0, row=0, column=0, irefsp=None, - roff=0., coff=0., itt=1, mlay=None, - time_series_data=None, names=None): - """ - Object constructor - """ - - if mlay is None: - mlay = {0: 1.} - if time_series_data is None: - time_series_data = [[0., 0.]] - if irefsp is None: - if len(time_series_data) == 1: - irefsp = 1 - else: - irefsp = -1 * len(time_series_data) - - # set class attributes - self.obsname = obsname - self.layer = layer - self.row = row - self.column = column - self.irefsp = irefsp - self.roff = roff - self.coff = coff - self.itt = itt - self.mlay = mlay - self.maxm = 0 - - # check if multilayer observation - self.multilayer = False - if len(self.mlay.keys()) > 1: - self.maxm = len(self.mlay.keys()) - self.multilayer = True - tot = 0. - for key, value in self.mlay.items(): - tot += value - if not (np.isclose(tot, 1.0, rtol=0)): - msg = ('sum of dataset 4 proportions must equal 1.0 - ' + \ - 'sum of dataset 4 proportions = {tot} for ' + \ - 'observation name {obsname}.').format( - tot=tot, - obsname=self.obsname) - raise ValueError(msg) - - # convert passed time_series_data to a numpy array - if isinstance(time_series_data, list): - time_series_data = np.array(time_series_data, dtype=np.float) - - # if a single observation is passed as a list reshape to a - # two-dimensional numpy array - if len(time_series_data.shape) == 1: - time_series_data = np.reshape(time_series_data, (1, 2)) - - # find indices of time series data that are valid - tmax = model.dis.get_final_totim() - keep_idx = time_series_data[:, 0] <= tmax - time_series_data = time_series_data[keep_idx, :] - - # set the number of observations in this time series - shape = time_series_data.shape - self.nobs = shape[0] - - # construct names if not passed - if names is None: - if self.nobs == 1: - names = [obsname] - else: - names = [] - for idx in range(self.nobs): - names.append('{}.{}'.format(obsname, idx + 1)) - # make sure the length of names is greater than or equal to nobs - else: - if isinstance(names, str): - names = [names] - elif not isinstance(names, list): - msg = 'HeadObservation names must be a ' + \ - 'string or a list of strings' - raise ValueError(msg) - if len(names) < self.nobs: - msg = 'a name must be specified for every valid ' + \ - 'observation - {} '.format(len(names)) + \ - 'names were passed but at least ' + \ - '{} names are required.'.format(self.nobs) - raise ValueError(msg) - - # create time_series_data - self.time_series_data = self._get_empty(ncells=shape[0]) - for idx in range(self.nobs): - t = time_series_data[idx, 0] - kstp, kper, toffset = model.dis.get_kstp_kper_toffset(t) - self.time_series_data[idx]['totim'] = t - self.time_series_data[idx]['irefsp'] = kper - self.time_series_data[idx]['toffset'] = toffset / tomulth - self.time_series_data[idx]['hobs'] = time_series_data[idx, 1] - self.time_series_data[idx]['obsname'] = names[idx] - - if self.nobs > 1: - self.irefsp = -self.nobs - else: - self.irefsp = self.time_series_data[0]['irefsp'] - - def _get_empty(self, ncells=0): - """ - Get an empty time_series_data recarray for a HeadObservation - - Parameters - ---------- - ncells : int - number of time entries in a HeadObservation - - Returns - ------- - d : np.recarray - - """ - # get an empty recarray that corresponds to dtype - dtype = self._get_dtype() - d = create_empty_recarray(ncells, dtype, default_value=-1.0E+10) - d['obsname'] = '' - return d - - def _get_dtype(self): - """ - Get the dtype for HeadObservation time_series_data - - - Returns - ------- - dtype : np.dtype - - """ - # get the default HOB dtype - dtype = np.dtype([("totim", np.float32), ("irefsp", np.int), - ("toffset", np.float32), - ("hobs", np.float32), ("obsname", '|S12')]) - return dtype +import sys +import collections +import numpy as np +from ..pakbase import Package +from ..utils.recarray_utils import create_empty_recarray + + +class ModflowHob(Package): + """ + Head Observation package class + + Parameters + ---------- + iuhobsv : int + unit number where output is saved. If iuhobsv is None, a unit number + will be assigned (default is None). + hobdry : float + Value of the simulated equivalent written to the observation output + file when the observation is omitted because a cell is dry + (default is 0). + tomulth : float + Time step multiplier for head observations. The product of tomulth and + toffset must produce a time value in units consistent with other model + input. tomulth can be dimensionless or can be used to convert the units + of toffset to the time unit used in the simulation (default is 1). + obs_data : HeadObservation or list of HeadObservation instances + A single HeadObservation instance or a list of HeadObservation + instances containing all of the data for each observation. If obs_data + is None a default HeadObservation with an observation in layer, row, + column (0, 0, 0) and a head value of 0 at totim 0 will be created + (default is None). + hobname : str + Name of head observation output file. If iuhobsv is greater than 0, + and hobname is None, the model basename with a '.hob.out' extension + will be used (default is None). + extension : string + Filename extension (default is hob) + no_print : boolean + When True or 1, a list of head observations will not be + written to the Listing File (default is False) + options : list of strings + Package options (default is None). + unitnumber : int + File unit number (default is None) + filenames : str or list of str + Filenames to use for the package and the output files. If filenames + is None the package name will be created using the model name and + package extension and the hob output name will be created using the + model name and .hob.out extension (for example, modflowtest.hob.out), + if iuhobsv is a number greater than zero. If a single string is passed + the package will be set to the string and hob output name will be + created using the model name and .hob.out extension, if iuhobsv is a + number greater than zero. To define the names for all package files + (input and output) the length of the list of strings should be 2. + Default is None. + + Attributes + ---------- + + Methods + ------- + + See Also + -------- + + Notes + + Examples + -------- + + >>> import flopy + >>> model = flopy.modflow.Modflow() + >>> dis = flopy.modflow.ModflowDis(model, nlay=1, nrow=11, ncol=11, nper=2, + ... perlen=[1,1]) + >>> tsd = [[1.,54.4], [2., 55.2]] + >>> obsdata = flopy.modflow.HeadObservation(model, layer=0, row=5, + ... column=5, time_series_data=tsd) + >>> hob = flopy.modflow.ModflowHob(model, iuhobsv=51, hobdry=-9999., + ... obs_data=obsdata) + + + """ + + def __init__(self, model, iuhobsv=None, hobdry=0, tomulth=1.0, + obs_data=None, hobname=None, extension='hob', + no_print=False, options=None, + unitnumber=None, filenames=None): + """ + Package constructor + """ + # set default unit number of one is not specified + if unitnumber is None: + unitnumber = ModflowHob.defaultunit() + + # set filenames + if filenames is None: + filenames = [None, None] + elif isinstance(filenames, str): + filenames = [filenames, None] + elif isinstance(filenames, list): + if len(filenames) < 2: + filenames.append(None) + + # set filenames[1] to hobname if filenames[1] is not None + if filenames[1] is None: + if hobname is not None: + filenames[1] = hobname + + if iuhobsv is not None: + fname = filenames[1] + model.add_output_file(iuhobsv, fname=fname, + extension='hob.out', binflag=False, + package=ModflowHob.ftype()) + else: + iuhobsv = 0 + + # Fill namefile items + name = [ModflowHob.ftype()] + units = [unitnumber] + extra = [''] + + # set package name + fname = [filenames[0]] + + # Call ancestor's init to set self.parent, + # extension, name and unit number + Package.__init__(self, model, extension=extension, name=name, + unit_number=units, extra=extra, filenames=fname) + + self.url = 'hob.htm' + self.heading = '# {} package for '.format(self.name[0]) + \ + ' {}, '.format(model.version_types[model.version]) + \ + 'generated by Flopy.' + + self.iuhobsv = iuhobsv + self.hobdry = hobdry + self.tomulth = tomulth + + # create default + if obs_data is None: + obs_data = HeadObservation(model) + + # make sure obs_data is a list + if isinstance(obs_data, HeadObservation): + obs_data = [obs_data] + + # set self.obs_data + self.obs_data = obs_data + + self.no_print = no_print + self.np = 0 + if options is None: + options = [] + if self.no_print: + options.append('NOPRINT') + self.options = options + + # add checks for input compliance (obsnam length, etc.) + self.parent.add_package(self) + + def _set_dimensions(self): + """ + Set the length of the obs_data list + + Returns + ------- + None + + """ + # make sure each entry of obs_data list is a HeadObservation instance + # and calculate nh, mobs, and maxm + msg = '' + self.nh = 0 + self.mobs = 0 + self.maxm = 0 + for idx, obs in enumerate(self.obs_data): + if not isinstance(obs, HeadObservation): + msg += 'ModflowHob: obs_data entry {} '.format(idx) + \ + 'is not a HeadObservation instance.\n' + continue + self.nh += obs.nobs + if obs.multilayer: + self.mobs += obs.nobs + self.maxm = max(self.maxm, obs.maxm) + if msg != '': + raise ValueError(msg) + return + + def write_file(self): + """ + Write the package file + + Returns + ------- + None + + """ + # determine the dimensions of HOB data + self._set_dimensions() + + # open file for writing + f = open(self.fn_path, 'w') + + # write dataset 0 + f.write('{}\n'.format(self.heading)) + + # write dataset 1 + f.write('{:10d}'.format(self.nh)) + f.write('{:10d}'.format(self.mobs)) + f.write('{:10d}'.format(self.maxm)) + f.write('{:10d}'.format(self.iuhobsv)) + f.write('{:10.4g}'.format(self.hobdry)) + if self.no_print or 'NOPRINT' in self.options: + f.write('{: >10}'.format('NOPRINT')) + f.write('\n') + + # write dataset 2 + f.write('{:10.4g}\n'.format(self.tomulth)) + + # write datasets 3-6 + for idx, obs in enumerate(self.obs_data): + # dataset 3 + obsname = obs.obsname + if isinstance(obsname, bytes): + obsname = obsname.decode('utf-8') + line = '{:12s} '.format(obsname) + layer = obs.layer + if layer >= 0: + layer += 1 + line += '{:10d} '.format(layer) + line += '{:10d} '.format(obs.row + 1) + line += '{:10d} '.format(obs.column + 1) + irefsp = obs.irefsp + if irefsp >= 0: + irefsp += 1 + line += '{:10d} '.format(irefsp) + if obs.nobs == 1: + toffset = obs.time_series_data[0]['toffset'] + hobs = obs.time_series_data[0]['hobs'] + else: + toffset = 0. + hobs = 0. + line += '{:20} '.format(toffset) + line += '{:10.4f} '.format(obs.roff) + line += '{:10.4f} '.format(obs.coff) + line += '{:10.4f} '.format(hobs) + line += ' # DATASET 3 - Observation {}'.format(idx + 1) + f.write('{}\n'.format(line)) + + # dataset 4 + if len(obs.mlay.keys()) > 1: + line = '' + for key, value in iter(obs.mlay.items()): + line += '{:5d}{:10.4f}'.format(key + 1, value) + line += ' # DATASET 4 - Observation {}'.format(idx + 1) + f.write('{}\n'.format(line)) + + # dataset 5 + if irefsp < 0: + line = '{:10d}'.format(obs.itt) + line += 103 * ' ' + line += ' # DATASET 5 - Observation {}'.format(idx + 1) + f.write('{}\n'.format(line)) + + # dataset 6: + if obs.nobs > 1: + for jdx, t in enumerate(obs.time_series_data): + obsname = t['obsname'] + if isinstance(obsname, bytes): + obsname = obsname.decode('utf-8') + line = '{:12s} '.format(obsname) + line += '{:10d} '.format(t['irefsp'] + 1) + line += '{:20} '.format(t['toffset']) + line += '{:10.4f} '.format(t['hobs']) + line += 55 * ' ' + line += ' # DATASET 6 - ' + \ + 'Observation {}.{}'.format(idx + 1, jdx + 1) + f.write('{}\n'.format(line)) + + # close the hob package file + f.close() + + return + + @staticmethod + def load(f, model, ext_unit_dict=None, check=True): + """ + Load an existing package. + + Parameters + ---------- + f : filename or file handle + File to load. + model : model object + The model object (of type :class:`flopy.modflow.mf.Modflow`) to + which this package will be added. + ext_unit_dict : dictionary, optional + If the arrays in the file are specified using EXTERNAL, + or older style array control records, then `f` should be a file + handle. In this case ext_unit_dict is required, which can be + constructed using the function + :class:`flopy.utils.mfreadnam.parsenamefile`. + check : boolean + Check package data for common errors. (default True) + + Returns + ------- + hob : ModflowHob package object + ModflowHob package object. + + Examples + -------- + + >>> import flopy + >>> m = flopy.modflow.Modflow() + >>> hobs = flopy.modflow.ModflowHob.load('test.hob', m) + + """ + + if model.verbose: + sys.stdout.write('loading hob package file...\n') + + openfile = not hasattr(f, 'read') + if openfile: + filename = f + f = open(filename, 'r') + + # dataset 0 -- header + while True: + line = f.readline() + if line[0] != '#': + break + + # read dataset 1 + t = line.strip().split() + nh = int(t[0]) + iuhobsv = None + hobdry = 0 + if len(t) > 3: + iuhobsv = int(t[3]) + hobdry = float(t[4]) + + # read dataset 2 + line = f.readline() + t = line.strip().split() + tomulth = float(t[0]) + + # read observation data + obs_data = [] + + # read datasets 3-6 + nobs = 0 + while True: + # read dataset 3 + line = f.readline() + t = line.strip().split() + obsnam = t[0] + layer = int(t[1]) + row = int(t[2]) - 1 + col = int(t[3]) - 1 + irefsp0 = int(t[4]) + toffset = float(t[5]) + roff = float(t[6]) + coff = float(t[7]) + hob = float(t[8]) + + # read dataset 4 if multilayer obs + if layer > 0: + layer -= 1 + mlay = {layer: 1.} + else: + line = f.readline() + t = line.strip().split() + mlay = collections.OrderedDict() + if len(t) >= abs(layer) * 2: + for j in range(0, abs(layer) * 2, 2): + k = int(t[j]) - 1 + # catch case where the same layer is specified + # more than once. In this case add previous + # value to the current value + keys = list(mlay.keys()) + v = 0. + if k in keys: + v = mlay[k] + mlay[k] = float(t[j + 1]) + v + else: + for j in range(abs(layer)): + k = int(t[0]) - 1 + keys = list(mlay.keys()) + v = 0. + if k in keys: + v = mlay[k] + mlay[k] = float(t[1]) + v + + if j != abs(layer) - 1: + line = f.readline() + t = line.strip().split() + # reset layer + layer = -len(list(mlay.keys())) + + # read datasets 5 & 6. Index loop variable + if irefsp0 > 0: + itt = 1 + irefsp0 -= 1 + totim = model.dis.get_totim_from_kper_toffset( + irefsp0, + toffset * tomulth) + names = [obsnam] + tsd = [totim, hob] + nobs += 1 + else: + names = [] + tsd = [] + # read data set 5 + line = f.readline() + t = line.strip().split() + itt = int(t[0]) + # dataset 6 + for j in range(abs(irefsp0)): + line = f.readline() + t = line.strip().split() + names.append(t[0]) + irefsp = int(t[1]) - 1 + toffset = float(t[2]) + totim = model.dis.get_totim_from_kper_toffset( + irefsp, + toffset * tomulth) + hob = float(t[3]) + tsd.append([totim, hob]) + nobs += 1 + + obs_data.append(HeadObservation(model, tomulth=tomulth, + layer=layer, row=row, column=col, + roff=roff, coff=coff, + obsname=obsnam, + mlay=mlay, itt=itt, + time_series_data=tsd, + names=names)) + if nobs == nh: + break + + if openfile: + f.close() + + # set package unit number + unitnumber = None + filenames = [None, None] + if ext_unit_dict is not None: + unitnumber, filenames[0] = \ + model.get_ext_dict_attr(ext_unit_dict, + filetype=ModflowHob.ftype()) + if iuhobsv is not None: + if iuhobsv > 0: + iu, filenames[1] = \ + model.get_ext_dict_attr(ext_unit_dict, unit=iuhobsv) + model.add_pop_key_list(iuhobsv) + + # create hob object instance + hob = ModflowHob(model, iuhobsv=iuhobsv, hobdry=hobdry, + tomulth=tomulth, obs_data=obs_data, + unitnumber=unitnumber, filenames=filenames) + + return hob + + @staticmethod + def ftype(): + return 'HOB' + + @staticmethod + def defaultunit(): + return 39 + + +class HeadObservation(object): + """ + Create single HeadObservation instance from a time series array. A list of + HeadObservation instances are passed to the ModflowHob package. + + Parameters + ---------- + tomulth : float + Time-offset multiplier for head observations. Default is 1. + obsname : string + Observation name. Default is 'HOBS' + layer : int + The zero-based layer index of the cell in which the head observation + is located. If layer is less than zero, hydraulic heads from multiple + layers are combined to calculate a simulated value. The number of + layers equals the absolute value of layer, or abs(layer). Default is 0. + row : int + The zero-based row index for the observation. Default is 0. + column : int + The zero-based column index of the observation. Default is 0. + irefsp : int + The zero-based stress period to which the observation time is + referenced. + roff : float + Fractional offset from center of cell in Y direction (between rows). + Default is 0. + coff : float + Fractional offset from center of cell in X direction (between columns). + Default is 0. + itt : int + Flag that identifies whether head or head changes are used as + observations. itt = 1 specified for heads and itt = 2 specified + if initial value is head and subsequent changes in head. Only + specified if irefsp is < 0. Default is 1. + mlay : dictionary of length (abs(irefsp)) + Key represents zero-based layer numbers for multilayer observations and + value represents the fractional value for each layer of multilayer + observations. If mlay is None, a default mlay of {0: 1.} will be + used (default is None). + time_series_data : list or numpy array + Two-dimensional list or numpy array containing the simulation time of + the observation and the observed head [[totim, hob]]. If + time_series_dataDefault is None, a default observation of 0. at + totim 0. will be created (default is None). + names : list + List of specified observation names. If names is None, observation + names will be automatically generated from obsname and the order + of the timeseries data (default is None). + + Returns + ------- + obs : HeadObservation + HeadObservation object. + + Examples + -------- + + >>> import flopy + >>> model = flopy.modflow.Modflow() + >>> dis = flopy.modflow.ModflowDis(model, nlay=1, nrow=11, ncol=11, nper=2, + ... perlen=[1,1]) + >>> tsd = [[1.,54.4], [2., 55.2]] + >>> obsdata = flopy.modflow.HeadObservation(model, layer=0, row=5, + ... column=5, time_series_data=tsd) + + """ + + def __init__(self, model, tomulth=1., obsname='HOBS', + layer=0, row=0, column=0, irefsp=None, + roff=0., coff=0., itt=1, mlay=None, + time_series_data=None, names=None): + """ + Object constructor + """ + + if mlay is None: + mlay = {0: 1.} + if time_series_data is None: + time_series_data = [[0., 0.]] + if irefsp is None: + if len(time_series_data) == 1: + irefsp = 1 + else: + irefsp = -1 * len(time_series_data) + + # set class attributes + self.obsname = obsname + self.layer = layer + self.row = row + self.column = column + self.irefsp = irefsp + self.roff = roff + self.coff = coff + self.itt = itt + self.mlay = mlay + self.maxm = 0 + + # check if multilayer observation + self.multilayer = False + if len(self.mlay.keys()) > 1: + self.maxm = len(self.mlay.keys()) + self.multilayer = True + tot = 0. + for key, value in self.mlay.items(): + tot += value + if not (np.isclose(tot, 1.0, rtol=0)): + msg = ('sum of dataset 4 proportions must equal 1.0 - ' + \ + 'sum of dataset 4 proportions = {tot} for ' + \ + 'observation name {obsname}.').format( + tot=tot, + obsname=self.obsname) + raise ValueError(msg) + + # convert passed time_series_data to a numpy array + if isinstance(time_series_data, list): + time_series_data = np.array(time_series_data, dtype=np.float) + + # if a single observation is passed as a list reshape to a + # two-dimensional numpy array + if len(time_series_data.shape) == 1: + time_series_data = np.reshape(time_series_data, (1, 2)) + + # find indices of time series data that are valid + tmax = model.dis.get_final_totim() + keep_idx = time_series_data[:, 0] <= tmax + time_series_data = time_series_data[keep_idx, :] + + # set the number of observations in this time series + shape = time_series_data.shape + self.nobs = shape[0] + + # construct names if not passed + if names is None: + if self.nobs == 1: + names = [obsname] + else: + names = [] + for idx in range(self.nobs): + names.append('{}.{}'.format(obsname, idx + 1)) + # make sure the length of names is greater than or equal to nobs + else: + if isinstance(names, str): + names = [names] + elif not isinstance(names, list): + msg = 'HeadObservation names must be a ' + \ + 'string or a list of strings' + raise ValueError(msg) + if len(names) < self.nobs: + msg = 'a name must be specified for every valid ' + \ + 'observation - {} '.format(len(names)) + \ + 'names were passed but at least ' + \ + '{} names are required.'.format(self.nobs) + raise ValueError(msg) + + # create time_series_data + self.time_series_data = self._get_empty(ncells=shape[0]) + for idx in range(self.nobs): + t = time_series_data[idx, 0] + kstp, kper, toffset = model.dis.get_kstp_kper_toffset(t) + self.time_series_data[idx]['totim'] = t + self.time_series_data[idx]['irefsp'] = kper + self.time_series_data[idx]['toffset'] = toffset / tomulth + self.time_series_data[idx]['hobs'] = time_series_data[idx, 1] + self.time_series_data[idx]['obsname'] = names[idx] + + if self.nobs > 1: + self.irefsp = -self.nobs + else: + self.irefsp = self.time_series_data[0]['irefsp'] + + def _get_empty(self, ncells=0): + """ + Get an empty time_series_data recarray for a HeadObservation + + Parameters + ---------- + ncells : int + number of time entries in a HeadObservation + + Returns + ------- + d : np.recarray + + """ + # get an empty recarray that corresponds to dtype + dtype = self._get_dtype() + d = create_empty_recarray(ncells, dtype, default_value=-1.0E+10) + d['obsname'] = '' + return d + + def _get_dtype(self): + """ + Get the dtype for HeadObservation time_series_data + + + Returns + ------- + dtype : np.dtype + + """ + # get the default HOB dtype + dtype = np.dtype([("totim", np.float32), ("irefsp", np.int), + ("toffset", np.float32), + ("hobs", np.float32), ("obsname", '|S12')]) + return dtype diff --git a/flopy/modflow/mfhyd.py b/flopy/modflow/mfhyd.py index 590beb9d99..598a971f62 100644 --- a/flopy/modflow/mfhyd.py +++ b/flopy/modflow/mfhyd.py @@ -1,351 +1,351 @@ -""" -mfhyd module. Contains the ModflowHydclass. Note that the user can access -the ModflowHyd class as `flopy.modflow.ModflowHyd`. - -Additional information for this MODFLOW package can be found at the `Online -MODFLOW Guide -`_. - -""" -import sys - -import numpy as np - -from ..pakbase import Package -from ..utils.recarray_utils import create_empty_recarray - - -class ModflowHyd(Package): - """ - MODFLOW HYDMOD (HYD) Package Class. - - Parameters - ---------- - model : model object - The model object (of type :class:`flopy.modflow.mf.Modflow`) to which - this package will be added. - nhyd : int - the maximum number of observation points. (default is 1). - ihydun : int - A flag that is used to determine if hydmod data should be saved. - If ihydun is non-zero hydmod data will be saved. (default is 1). - hydnoh : float - is a user-specified value that is output if a value cannot be computed - at a hydrograph location. For example, the cell in which the hydrograph - is located may be a no-flow cell. (default is -999.) - obsdata : list of lists, numpy array, or numpy recarray (nhyd, 7) - Each row of obsdata includes data defining pckg (3 character string), - arr (2 character string), intyp (1 character string) klay (int), - xl (float), yl (float), hydlbl (14 character string) for each - observation. - - pckg : str - is a 3-character flag to indicate which package is to be addressed - by hydmod for the hydrograph of each observation point. - arr : str - is a text code indicating which model data value is to be accessed - for the hydrograph of each observation point. - intyp : str - is a 1-character value to indicate how the data from the specified - feature are to be accessed; The two options are 'I' for - interpolated value or 'C' for cell value (intyp must be 'C' for - STR and SFR Package hydrographs. - klay : int - is the layer sequence number (zero-based) of the array to be - addressed by HYDMOD. - xl : float - is the coordinate of the hydrograph point in model units of length - measured parallel to model rows, with the origin at the lower left - corner of the model grid. - yl : float - is the coordinate of the hydrograph point in model units of length - measured parallel to model columns, with the origin at the lower - left corner of the model grid. - hydlbl : str - is used to form a label for the hydrograph. - - - The simplest form is a list of lists. For example, if nhyd=3 this - gives the form of:: - - obsdata = - [ - [pckg, arr, intyp, klay, xl, yl, hydlbl], - [pckg, arr, intyp, klay, xl, yl, hydlbl], - [pckg, arr, intyp, klay, xl, yl, hydlbl] - ] - - extension : list string - Filename extension (default is ['hyd', 'hyd.bin']) - unitnumber : int - File unit number (default is None). - filenames : str or list of str - Filenames to use for the package and the output files. If - filenames=None the package name will be created using the model name - and package extension and the hydmod output name will be created using - the model name and .hyd.bin extension (for example, - modflowtest.hyd.bin). If a single string is passed the package will be - set to the string and hydmod output name will be created using the - model name and .hyd.bin extension. To define the names for all package - files (input and output) the length of the list of strings should be 2. - Default is None. - - Attributes - ---------- - - Methods - ------- - - See Also - -------- - - Notes - ----- - - Examples - -------- - - >>> import flopy - >>> m = flopy.modflow.Modflow() - >>> hyd = flopy.modflow.ModflowHyd(m) - - """ - - def __init__(self, model, nhyd=1, ihydun=None, hydnoh=-999., - obsdata=[['BAS', 'HD', 'I', 0, 0., 0., 'HOBS1']], - extension=['hyd', 'hyd.bin'], unitnumber=None, - filenames=None): - """ - Package constructor. - - """ - - # set default unit number of one is not specified - if unitnumber is None: - unitnumber = ModflowHyd.defaultunit() - - # set filenames - if filenames is None: - filenames = [None, None] - elif isinstance(filenames, str): - filenames = [filenames, None] - elif isinstance(filenames, list): - if len(filenames) < 2: - filenames.append(None) - - # set ihydun to a default unit number if it isn't specified - if ihydun is None: - ihydun = 536 - - # update external file information with hydmod output - fname = filenames[1] - model.add_output_file(ihydun, fname=fname, extension='hyd.bin', - package=ModflowHyd.ftype()) - - # Fill namefile items - name = [ModflowHyd.ftype()] - units = [unitnumber] - extra = [''] - - # set package name - fname = [filenames[0]] - - # Call ancestor's init to set self.parent, extension, name and unit number - Package.__init__(self, model, extension=extension, name=name, - unit_number=units, extra=extra, filenames=fname) - - nrow, ncol, nlay, nper = self.parent.nrow_ncol_nlay_nper - self.heading = '# {} package for '.format(self.name[0]) + \ - ' {}, '.format(model.version_types[model.version]) + \ - 'generated by Flopy.' - self.url = 'hyd.htm' - - self.nhyd = nhyd - self.ihydun = ihydun - self.hydnoh = hydnoh - - dtype = ModflowHyd.get_default_dtype() - obs = ModflowHyd.get_empty(nhyd) - if isinstance(obsdata, list): - if len(obsdata) != nhyd: - e = 'ModflowHyd: nhyd ({}) does not equal '.format(nhyd) + \ - 'length of obsdata ({}).'.format(len(obsdata)) - raise RuntimeError(e) - for idx in range(nhyd): - obs['pckg'][idx] = obsdata[idx][0] - obs['arr'][idx] = obsdata[idx][1] - obs['intyp'][idx] = obsdata[idx][2] - obs['klay'][idx] = int(obsdata[idx][3]) - obs['xl'][idx] = float(obsdata[idx][4]) - obs['yl'][idx] = float(obsdata[idx][5]) - obs['hydlbl'][idx] = obsdata[idx][6] - obsdata = obs - elif isinstance(obsdata, np.ndarray): - if obsdata.dtype == np.object: - if obsdata.shape[1] != len(dtype): - raise IndexError('Incorrect number of fields for obsdata') - obsdata = obsdata.transpose() - obs['pckg'] = obsdata[0] - obs['arr'] = obsdata[1] - obs['intyp'] = obsdata[2] - obs['klay'] = obsdata[3] - obs['xl'] = obsdata[4] - obs['yl'] = obsdata[5] - obs['hydlbl'] = obsdata[6] - else: - inds = ['pckg', 'arr', 'intyp', 'klay', 'xl', 'yl', 'hydlbl'] - for idx in inds: - obs['pckg'] = obsdata['pckg'] - obs['arr'] = obsdata['arr'] - obs['intyp'] = obsdata['intyp'] - obs['klay'] = obsdata['klay'] - obs['xl'] = obsdata['xl'] - obs['yl'] = obsdata['yl'] - obs['hydlbl'] = obsdata['hydlbl'] - obsdata = obs - obsdata = obsdata.view(dtype=dtype) - self.obsdata = obsdata - - # add package - self.parent.add_package(self) - - def write_file(self): - """ - Write the package file. - - Returns - ------- - None - - """ - # Open file for writing - - f = open(self.fn_path, 'w') - - # write dataset 1 - f.write('{} {} {} {}\n'.format(self.nhyd, self.ihydun, self.hydnoh, - self.heading)) - - # write dataset 2 - for idx in range(self.nhyd): - f.write('{} '.format(self.obsdata['pckg'][idx].decode())) - f.write('{} '.format(self.obsdata['arr'][idx].decode())) - f.write('{} '.format(self.obsdata['intyp'][idx].decode())) - f.write('{} '.format(self.obsdata['klay'][idx] + 1)) - f.write('{} '.format(self.obsdata['xl'][idx])) - f.write('{} '.format(self.obsdata['yl'][idx])) - f.write('{} '.format(self.obsdata['hydlbl'][idx].decode())) - f.write('\n') - - # close hydmod file - f.close() - - @staticmethod - def get_empty(ncells=0): - # get an empty recarray that corresponds to dtype - dtype = ModflowHyd.get_default_dtype() - return create_empty_recarray(ncells, dtype) - - @staticmethod - def get_default_dtype(): - # PCKG ARR INTYP KLAY XL YL HYDLBL - dtype = np.dtype([("pckg", '|S3'), ("arr", '|S2'), - ("intyp", '|S1'), ("klay", np.int), - ("xl", np.float32), ("yl", np.float32), - ("hydlbl", '|S14')]) - return dtype - - @staticmethod - def load(f, model, ext_unit_dict=None): - """ - Load an existing package. - - Parameters - ---------- - f : filename or file handle - File to load. - model : model object - The model object (of type :class:`flopy.modflow.mf.Modflow`) to - which this package will be added. - ext_unit_dict : dictionary, optional - If the arrays in the file are specified using EXTERNAL, - or older style array control records, then `f` should be a file - handle. In this case ext_unit_dict is required, which can be - constructed using the function - :class:`flopy.utils.mfreadnam.parsenamefile`. - - Returns - ------- - hyd : ModflowHyd object - - Examples - -------- - - >>> import flopy - >>> m = flopy.modflow.Modflow() - >>> hyd = flopy.modflow.ModflowHyd.load('test.hyd', m) - - """ - - if model.verbose: - sys.stdout.write('loading hydmod package file...\n') - - openfile = not hasattr(f, 'read') - if openfile: - filename = f - f = open(filename, 'r') - - # --read dataset 1 - # NHYD IHYDUN HYDNOH - if model.verbose: - sys.stdout.write(' loading hydmod dataset 1\n') - line = f.readline() - t = line.strip().split() - nhyd = int(t[0]) - ihydun = int(t[1]) - model.add_pop_key_list(ihydun) - hydnoh = float(t[2]) - - obs = ModflowHyd.get_empty(nhyd) - - for idx in range(nhyd): - line = f.readline() - t = line.strip().split() - obs['pckg'][idx] = t[0].strip() - obs['arr'][idx] = t[1].strip() - obs['intyp'][idx] = t[2].strip() - obs['klay'][idx] = int(t[3]) - 1 - obs['xl'][idx] = float(t[4]) - obs['yl'][idx] = float(t[5]) - obs['hydlbl'][idx] = t[6].strip() - - if openfile: - f.close() - - # set package unit number - unitnumber = None - filenames = [None, None] - if ext_unit_dict is not None: - unitnumber, filenames[0] = \ - model.get_ext_dict_attr(ext_unit_dict, - filetype=ModflowHyd.ftype()) - if ihydun > 0: - iu, filenames[1] = \ - model.get_ext_dict_attr(ext_unit_dict, unit=ihydun) - model.add_pop_key_list(ihydun) - - # create hyd instance - hyd = ModflowHyd(model, nhyd=nhyd, ihydun=ihydun, hydnoh=hydnoh, - obsdata=obs, unitnumber=unitnumber, - filenames=filenames) - - # return hyd instance - return hyd - - @staticmethod - def ftype(): - return 'HYD' - - @staticmethod - def defaultunit(): - return 36 +""" +mfhyd module. Contains the ModflowHydclass. Note that the user can access +the ModflowHyd class as `flopy.modflow.ModflowHyd`. + +Additional information for this MODFLOW package can be found at the `Online +MODFLOW Guide +`_. + +""" +import sys + +import numpy as np + +from ..pakbase import Package +from ..utils.recarray_utils import create_empty_recarray + + +class ModflowHyd(Package): + """ + MODFLOW HYDMOD (HYD) Package Class. + + Parameters + ---------- + model : model object + The model object (of type :class:`flopy.modflow.mf.Modflow`) to which + this package will be added. + nhyd : int + the maximum number of observation points. (default is 1). + ihydun : int + A flag that is used to determine if hydmod data should be saved. + If ihydun is non-zero hydmod data will be saved. (default is 1). + hydnoh : float + is a user-specified value that is output if a value cannot be computed + at a hydrograph location. For example, the cell in which the hydrograph + is located may be a no-flow cell. (default is -999.) + obsdata : list of lists, numpy array, or numpy recarray (nhyd, 7) + Each row of obsdata includes data defining pckg (3 character string), + arr (2 character string), intyp (1 character string) klay (int), + xl (float), yl (float), hydlbl (14 character string) for each + observation. + + pckg : str + is a 3-character flag to indicate which package is to be addressed + by hydmod for the hydrograph of each observation point. + arr : str + is a text code indicating which model data value is to be accessed + for the hydrograph of each observation point. + intyp : str + is a 1-character value to indicate how the data from the specified + feature are to be accessed; The two options are 'I' for + interpolated value or 'C' for cell value (intyp must be 'C' for + STR and SFR Package hydrographs. + klay : int + is the layer sequence number (zero-based) of the array to be + addressed by HYDMOD. + xl : float + is the coordinate of the hydrograph point in model units of length + measured parallel to model rows, with the origin at the lower left + corner of the model grid. + yl : float + is the coordinate of the hydrograph point in model units of length + measured parallel to model columns, with the origin at the lower + left corner of the model grid. + hydlbl : str + is used to form a label for the hydrograph. + + + The simplest form is a list of lists. For example, if nhyd=3 this + gives the form of:: + + obsdata = + [ + [pckg, arr, intyp, klay, xl, yl, hydlbl], + [pckg, arr, intyp, klay, xl, yl, hydlbl], + [pckg, arr, intyp, klay, xl, yl, hydlbl] + ] + + extension : list string + Filename extension (default is ['hyd', 'hyd.bin']) + unitnumber : int + File unit number (default is None). + filenames : str or list of str + Filenames to use for the package and the output files. If + filenames=None the package name will be created using the model name + and package extension and the hydmod output name will be created using + the model name and .hyd.bin extension (for example, + modflowtest.hyd.bin). If a single string is passed the package will be + set to the string and hydmod output name will be created using the + model name and .hyd.bin extension. To define the names for all package + files (input and output) the length of the list of strings should be 2. + Default is None. + + Attributes + ---------- + + Methods + ------- + + See Also + -------- + + Notes + ----- + + Examples + -------- + + >>> import flopy + >>> m = flopy.modflow.Modflow() + >>> hyd = flopy.modflow.ModflowHyd(m) + + """ + + def __init__(self, model, nhyd=1, ihydun=None, hydnoh=-999., + obsdata=[['BAS', 'HD', 'I', 0, 0., 0., 'HOBS1']], + extension=['hyd', 'hyd.bin'], unitnumber=None, + filenames=None): + """ + Package constructor. + + """ + + # set default unit number of one is not specified + if unitnumber is None: + unitnumber = ModflowHyd.defaultunit() + + # set filenames + if filenames is None: + filenames = [None, None] + elif isinstance(filenames, str): + filenames = [filenames, None] + elif isinstance(filenames, list): + if len(filenames) < 2: + filenames.append(None) + + # set ihydun to a default unit number if it isn't specified + if ihydun is None: + ihydun = 536 + + # update external file information with hydmod output + fname = filenames[1] + model.add_output_file(ihydun, fname=fname, extension='hyd.bin', + package=ModflowHyd.ftype()) + + # Fill namefile items + name = [ModflowHyd.ftype()] + units = [unitnumber] + extra = [''] + + # set package name + fname = [filenames[0]] + + # Call ancestor's init to set self.parent, extension, name and unit number + Package.__init__(self, model, extension=extension, name=name, + unit_number=units, extra=extra, filenames=fname) + + nrow, ncol, nlay, nper = self.parent.nrow_ncol_nlay_nper + self.heading = '# {} package for '.format(self.name[0]) + \ + ' {}, '.format(model.version_types[model.version]) + \ + 'generated by Flopy.' + self.url = 'hyd.htm' + + self.nhyd = nhyd + self.ihydun = ihydun + self.hydnoh = hydnoh + + dtype = ModflowHyd.get_default_dtype() + obs = ModflowHyd.get_empty(nhyd) + if isinstance(obsdata, list): + if len(obsdata) != nhyd: + e = 'ModflowHyd: nhyd ({}) does not equal '.format(nhyd) + \ + 'length of obsdata ({}).'.format(len(obsdata)) + raise RuntimeError(e) + for idx in range(nhyd): + obs['pckg'][idx] = obsdata[idx][0] + obs['arr'][idx] = obsdata[idx][1] + obs['intyp'][idx] = obsdata[idx][2] + obs['klay'][idx] = int(obsdata[idx][3]) + obs['xl'][idx] = float(obsdata[idx][4]) + obs['yl'][idx] = float(obsdata[idx][5]) + obs['hydlbl'][idx] = obsdata[idx][6] + obsdata = obs + elif isinstance(obsdata, np.ndarray): + if obsdata.dtype == np.object: + if obsdata.shape[1] != len(dtype): + raise IndexError('Incorrect number of fields for obsdata') + obsdata = obsdata.transpose() + obs['pckg'] = obsdata[0] + obs['arr'] = obsdata[1] + obs['intyp'] = obsdata[2] + obs['klay'] = obsdata[3] + obs['xl'] = obsdata[4] + obs['yl'] = obsdata[5] + obs['hydlbl'] = obsdata[6] + else: + inds = ['pckg', 'arr', 'intyp', 'klay', 'xl', 'yl', 'hydlbl'] + for idx in inds: + obs['pckg'] = obsdata['pckg'] + obs['arr'] = obsdata['arr'] + obs['intyp'] = obsdata['intyp'] + obs['klay'] = obsdata['klay'] + obs['xl'] = obsdata['xl'] + obs['yl'] = obsdata['yl'] + obs['hydlbl'] = obsdata['hydlbl'] + obsdata = obs + obsdata = obsdata.view(dtype=dtype) + self.obsdata = obsdata + + # add package + self.parent.add_package(self) + + def write_file(self): + """ + Write the package file. + + Returns + ------- + None + + """ + # Open file for writing + + f = open(self.fn_path, 'w') + + # write dataset 1 + f.write('{} {} {} {}\n'.format(self.nhyd, self.ihydun, self.hydnoh, + self.heading)) + + # write dataset 2 + for idx in range(self.nhyd): + f.write('{} '.format(self.obsdata['pckg'][idx].decode())) + f.write('{} '.format(self.obsdata['arr'][idx].decode())) + f.write('{} '.format(self.obsdata['intyp'][idx].decode())) + f.write('{} '.format(self.obsdata['klay'][idx] + 1)) + f.write('{} '.format(self.obsdata['xl'][idx])) + f.write('{} '.format(self.obsdata['yl'][idx])) + f.write('{} '.format(self.obsdata['hydlbl'][idx].decode())) + f.write('\n') + + # close hydmod file + f.close() + + @staticmethod + def get_empty(ncells=0): + # get an empty recarray that corresponds to dtype + dtype = ModflowHyd.get_default_dtype() + return create_empty_recarray(ncells, dtype) + + @staticmethod + def get_default_dtype(): + # PCKG ARR INTYP KLAY XL YL HYDLBL + dtype = np.dtype([("pckg", '|S3'), ("arr", '|S2'), + ("intyp", '|S1'), ("klay", np.int), + ("xl", np.float32), ("yl", np.float32), + ("hydlbl", '|S14')]) + return dtype + + @staticmethod + def load(f, model, ext_unit_dict=None): + """ + Load an existing package. + + Parameters + ---------- + f : filename or file handle + File to load. + model : model object + The model object (of type :class:`flopy.modflow.mf.Modflow`) to + which this package will be added. + ext_unit_dict : dictionary, optional + If the arrays in the file are specified using EXTERNAL, + or older style array control records, then `f` should be a file + handle. In this case ext_unit_dict is required, which can be + constructed using the function + :class:`flopy.utils.mfreadnam.parsenamefile`. + + Returns + ------- + hyd : ModflowHyd object + + Examples + -------- + + >>> import flopy + >>> m = flopy.modflow.Modflow() + >>> hyd = flopy.modflow.ModflowHyd.load('test.hyd', m) + + """ + + if model.verbose: + sys.stdout.write('loading hydmod package file...\n') + + openfile = not hasattr(f, 'read') + if openfile: + filename = f + f = open(filename, 'r') + + # --read dataset 1 + # NHYD IHYDUN HYDNOH + if model.verbose: + sys.stdout.write(' loading hydmod dataset 1\n') + line = f.readline() + t = line.strip().split() + nhyd = int(t[0]) + ihydun = int(t[1]) + model.add_pop_key_list(ihydun) + hydnoh = float(t[2]) + + obs = ModflowHyd.get_empty(nhyd) + + for idx in range(nhyd): + line = f.readline() + t = line.strip().split() + obs['pckg'][idx] = t[0].strip() + obs['arr'][idx] = t[1].strip() + obs['intyp'][idx] = t[2].strip() + obs['klay'][idx] = int(t[3]) - 1 + obs['xl'][idx] = float(t[4]) + obs['yl'][idx] = float(t[5]) + obs['hydlbl'][idx] = t[6].strip() + + if openfile: + f.close() + + # set package unit number + unitnumber = None + filenames = [None, None] + if ext_unit_dict is not None: + unitnumber, filenames[0] = \ + model.get_ext_dict_attr(ext_unit_dict, + filetype=ModflowHyd.ftype()) + if ihydun > 0: + iu, filenames[1] = \ + model.get_ext_dict_attr(ext_unit_dict, unit=ihydun) + model.add_pop_key_list(ihydun) + + # create hyd instance + hyd = ModflowHyd(model, nhyd=nhyd, ihydun=ihydun, hydnoh=hydnoh, + obsdata=obs, unitnumber=unitnumber, + filenames=filenames) + + # return hyd instance + return hyd + + @staticmethod + def ftype(): + return 'HYD' + + @staticmethod + def defaultunit(): + return 36 diff --git a/flopy/modflow/mflak.py b/flopy/modflow/mflak.py index a8ce876485..9f4adbe6ef 100644 --- a/flopy/modflow/mflak.py +++ b/flopy/modflow/mflak.py @@ -1,838 +1,838 @@ -""" -mflak module. Contains the ModflowLak class. Note that the user can access -the ModflowLak class as `flopy.modflow.ModflowLak`. - -Additional information for this MODFLOW package can be found at the `Online -MODFLOW Guide -`_. - -""" -import sys -import numpy as np -from ..pakbase import Package -from ..utils.util_array import Transient3d -from ..utils import Util3d, read_fixed_var, write_fixed_var - - -class ModflowLak(Package): - """ - MODFLOW Lake Package Class. - - Parameters - ---------- - model : model object - The model object (of type :class:`flopy.modflow.mf.Modflow`) to which - this package will be added. - nlakes : int - NLAKES Number of separate lakes. - Sublakes of multiple-lake systems are considered separate lakes for - input purposes. The variable NLAKES is used, with certain internal - assumptions and approximations, to dimension arrays for the simulation. - ipakcb : int - (ILKCB in MODFLOW documentation) - Whether or not to write cell-by-cell flows (yes if ILKCB> 0, no - otherwise). If ILKCB< 0 and "Save Budget" is specified in the Output - Control or ICBCFL is not equal to 0, the cell-by-cell flows will be - printed in the standard output file. ICBCFL is specified in the input - to the Output Control Option of MODFLOW. - lwrt : int or list of ints (one per SP) - lwrt > 0, suppresses printout from the lake package. Default is 0 (to - print budget information) - theta : float - Explicit (THETA = 0.0), semi-implicit (0.0 < THETA < 1.0), or implicit - (THETA = 1.0) solution for lake stages. SURFDEPTH is read only if - THETA is assigned a negative value (the negative value of THETA is - then changed to a positive value internally by the code). - * A new method of solving for lake stage uses only the time-weighting - factor THETA (Merritt and Konikow, 2000, p. 52) for transient - simulations. THETA is automatically set to a value of 1.0 for all - steady-state stress periods. For transient stress periods, Explicit - (THETA = 0.0), semi-implicit (0.0 < THETA < 1.0), or implicit - (THETA = 1.0) solutions can be used to calculate lake stages. The - option to specify negative values for THETA is supported to allow - specification of additional variables (NSSITER, SSCNCR, SURFDEP) - for simulations that only include transient stress periods. If - THETA is specified as a negative value, then it is converted to a - positive value for calculations of lake stage. - * In MODFLOW-2000 and later, ISS is not part of the input. Instead - NSSITR or SSCNCR should be included if one or more stress periods - is a steady state stress period as defined in Ss/tr in the - Discretization file. - * SSCNCR and NSSITR can be read for a transient only simulation by - placing a negative sign immediately in front of THETA. A negative - THETA sets a flag which assumes input values for NSSITR and SSCNCR - will follow THETA in the format as described by Merritt and Konikow - (p. 52). A negative THETA is automatically reset to a positive - value after values of NSSITR and SSCNCR are read. - nssitr : int - Maximum number of iterations for Newton's method of solution for - equilibrium lake stages in each MODFLOW iteration for steady-state - aquifer head solution. Only read if ISS (option flag input to DIS - Package of MODFLOW indicating steady-state solution) is not zero or - if THETA is specified as a negative value. - * NSSITR and SSCNCR may be omitted for transient solutions (ISS = 0). - * In MODFLOW-2000 and later, ISS is not part of the input. - Instead NSSITR or SSCNCR should be included if one or more stress - periods is a steady state stress period as defined in Ss/tr in the - Discretization file. - * SSCNCR and NSSITR can be read for a transient only simulation by - placing a negative sign immediately in front of THETA. A negative - THETA sets a flag which assumes input values for NSSITR and SSCNCR - will follow THETA in the format as described by Merritt and Konikow - (p. 52). A negative THETA is automatically reset to a positive - value after values of NSSITR and SSCNCR are read. - * If NSSITR = 0, a value of 100 will be used instead. - sscncr : float - Convergence criterion for equilibrium lake stage solution by Newton's - method. Only read if ISS is not zero or if THETA is specified as a - negative value. See notes above for nssitr. - surfdepth : float - The height of small topological variations (undulations) in lake-bottom - elevations that can affect groundwater discharge to lakes. SURFDEPTH - decreases the lakebed conductance for vertical flow across a horizontal - lakebed caused both by a groundwater head that is between the lakebed - and the lakebed plus SURFDEPTH and a lake stage that is also between - the lakebed and the lakebed plus SURFDEPTH. This method provides a - smooth transition from a condition of no groundwater discharge to a - lake, when groundwater head is below the lakebed, to a condition of - increasing groundwater discharge to a lake as groundwater head becomes - greater than the elevation of the dry lakebed. The method also allows - for the transition of seepage from a lake to groundwater when the lake - stage decreases to the lakebed elevation. Values of SURFDEPTH ranging - from 0.01 to 0.5 have been used successfully in test simulations. - SURFDEP is read only if THETA is specified as a negative value. - stages : float or list of floats - The initial stage of each lake at the beginning of the run. - stage_range : list of tuples (ssmn, ssmx) of length nlakes - Where ssmn and ssmx are the minimum and maximum stages allowed for each - lake in steady-state solution. - * SSMN and SSMX are not needed for a transient run and must be - omitted when the solution is transient. - * When the first stress period is a steady-state stress period, - SSMN is defined in record 3. - - For subsequent steady-state stress periods, SSMN is defined in - record 9a. - lakarr : array of integers (nlay, nrow, ncol) - LKARR A value is read in for every grid cell. - If LKARR(I,J,K) = 0, the grid cell is not a lake volume cell. - If LKARR(I,J,K) > 0, its value is the identification number of the lake - occupying the grid cell. LKARR(I,J,K) must not exceed the value NLAKES. - If it does, or if LKARR(I,J,K) < 0, LKARR(I,J,K) is set to zero. - Lake cells cannot be overlain by non-lake cells in a higher layer. - Lake cells must be inactive cells (IBOUND = 0) and should not be - convertible to active cells (WETDRY = 0). - - The Lake package can be used when all or some of the model layers - containing the lake are confined. The authors recommend using the - Layer-Property Flow Package (LPF) for this case, although the - BCF and HUF Packages will work too. However, when using the BCF6 - package to define aquifer properties, lake/aquifer conductances in the - lateral direction are based solely on the lakebed leakance (and not on - the lateral transmissivity of the aquifer layer). As before, when the - BCF6 package is used, vertical lake/aquifer conductances are based on - lakebed conductance and on the vertical hydraulic conductivity of the - aquifer layer underlying the lake when the wet/dry option is - implemented, and only on the lakebed leakance when the wet/dry option - is not implemented. - bdlknc : array of floats (nlay, nrow, ncol) - BDLKNC A value is read in for every grid cell. The value is the lakebed - leakance that will be assigned to lake/aquifer interfaces that occur - in the corresponding grid cell. If the wet-dry option flag (IWDFLG) is - not active (cells cannot rewet if they become dry), then the BDLKNC - values are assumed to represent the combined leakances of the lakebed - material and the aquifer material between the lake and the centers of - the underlying grid cells, i. e., the vertical conductance values (CV) - will not be used in the computation of conductances across lake/aquifer - boundary faces in the vertical direction. - - IBOUND and WETDRY should be set to zero for every cell for which LKARR - is not equal to zero. IBOUND is defined in the input to the Basic - Package of MODFLOW. WETDRY is defined in the input to the BCF or other - flow package of MODFLOW if the IWDFLG option is active. When used with - the HUF package, the Lake Package has been modified to compute - effective lake-aquifer conductance solely on the basis of the - user-specified value of lakebed leakance; aquifer hydraulic - conductivities are not used in this calculation. An appropriate - informational message is now printed after the lakebed conductances - are written to the main output file. - sill_data : dict - (dataset 8 in documentation) - Dict of lists keyed by stress period. Each list has a tuple of dataset - 8a, 8b for every multi-lake system, where dataset 8a is another tuple of - IC : int - The number of sublakes - ISUB : list of ints - The identification numbers of the sublakes in the sublake - system being described in this record. The center lake number - is listed first. - And dataset 8b contains - SILLVT : sequence of floats - A sequence of sill elevations for each sublakes that determines - whether the center lake is connected with a given sublake. - Values are entered for each sublake in the order the sublakes - are listed in the previous record. - flux_data : dict - (dataset 9 in documentation) - Dict of lists keyed by stress period. The list for each stress period - is a list of lists, with each list containing the variables - PRCPLK EVAPLK RNF WTHDRW [SSMN] [SSMX] from the documentation. - PRCPLK : float - The rate of precipitation per unit area at the surface of a - lake (L/T). - EVAPLK : float - The rate of evaporation per unit area from the surface of a - lake (L/T). - RNF : float - Overland runoff from an adjacent watershed entering the lake. - If RNF > 0, it is specified directly as a volumetric rate, or - flux (L3 /T). If RNF < 0, its absolute value is used as a - dimensionless multiplier applied to the product of the lake - precipitation rate per unit area (PRCPLK) and the surface area - of the lake at its full stage (occupying all layer 1 lake - cells). When RNF is entered as a dimensionless multiplier - (RNF < 0), it is considered to be the product of two - proportionality factors. The first is the ratio of the area of - the basin contributing runoff to the surface area of the lake - when it is at full stage. The second is the fraction of the - current rainfall rate that becomes runoff to the lake. This - procedure provides a means for the automated computation of - runoff rate from a watershed to a lake as a function of - varying rainfall rate. For example, if the basin area is 10 - times greater than the surface area of the lake, and 20 percent - of the precipitation on the basin becomes overland runoff - directly into the lake, then set RNF = -2.0. - WTHDRW : float - The volumetric rate, or flux (L3 /T), of water removal from a - lake by means other than rainfall, evaporation, surface - outflow, or groundwater seepage. A negative value indicates - augmentation. Normally, this would be used to specify the - rate of artificial withdrawal from a lake for human water use, - or if negative, artificial augmentation of a lake volume for - aesthetic or recreational purposes. - SSMN : float - Minimum stage allowed for each lake in steady-state solution. - See notes on ssmn and ssmx above. - SSMX : float - SSMX Maximum stage allowed for each lake in steady-state - solution. - - options : list of strings - Package options. (default is None). - extension : string - Filename extension (default is 'lak') - unitnumber : int - File unit number (default is None). - filenames : str or list of str - Filenames to use for the package and the output files. If - filenames=None the package name will be created using the model name - and package extension and the cbc output name will be created using - the model name and .cbc extension (for example, modflowtest.cbc), - if ipakcbc is a number greater than zero. If a single string is passed - the package will be set to the string and cbc output names will be - created using the model name and .cbc extension, if ipakcbc is a - number greater than zero. To define the names for all package files - (input and output) the length of the list of strings should be 2. - Default is None. - - Methods - ------- - - See Also - -------- - - Notes - ----- - Parameters are not supported in FloPy. - - Examples - -------- - - >>> import flopy - >>> m = flopy.modflow.Modflow() - >>> lak = {} - >>> lak[0] = [[2, 3, 4, 15.6, 1050., -4]] #this lake boundary will be - >>> #applied to all stress periods - >>> lak = flopy.modflow.ModflowLak(m, nstress_period_data=strd) - - """ - - def __init__(self, model, nlakes=1, ipakcb=None, theta=-1., - nssitr=0, sscncr=0.0, surfdep=0., stages=1., stage_range=None, - tab_files=None, tab_units=None, lakarr=None, bdlknc=None, - sill_data=None, flux_data=None, - extension='lak', unitnumber=None, filenames=None, - options=None, lwrt=0, **kwargs): - """ - Package constructor. - - """ - # set default unit number of one is not specified - if unitnumber is None: - unitnumber = ModflowLak.defaultunit() - - # set filenames - tabdata = False - nlen = 2 - if options is not None: - for option in options: - if 'TABLEINPUT' in option.upper(): - tabdata = True - nlen += nlakes - break - if filenames is None: - filenames = [None for x in range(nlen)] - elif isinstance(filenames, str): - filenames = [filenames] + [None for x in range(nlen - 1)] - elif isinstance(filenames, list): - if len(filenames) < nlen: - filenames = filenames + [None for x in range(2, nlen)] - - # update external file information with cbc output, if necessary - if ipakcb is not None: - fname = filenames[1] - model.add_output_file(ipakcb, fname=fname, - package=ModflowLak.ftype()) - else: - ipakcb = 0 - - # table input files - if tabdata: - if tab_files is None: - tab_files = filenames[2:] - - # add tab_files as external files - if tabdata: - # make sure the number of tabfiles is equal to the number of lakes - if len(tab_files) < nlakes: - msg = 'a tabfile must be specified for each lake' + \ - '{} tabfiles specified '.format(len(tab_files)) + \ - 'instead of {} tabfiles'.format(nlakes) - # make sure tab_files are not None - for idx, fname in enumerate(tab_files): - if fname is None: - msg = 'a filename must be specified for the ' + \ - 'tabfile for lake {}'.format(idx + 1) - raise ValueError(msg) - # set unit for tab files if not passed to __init__ - if tab_units is None: - tab_units = [] - for idx in range(len(tab_files)): - tab_units.append(model.next_ext_unit()) - # add tabfiles as external files - for iu, fname in zip(tab_units, tab_files): - model.add_external(fname, iu) - - # Fill namefile items - name = [ModflowLak.ftype()] - units = [unitnumber] - extra = [''] - - # set package name - fname = [filenames[0]] - - # Call ancestor's init to set self.parent, extension, name and unit number - Package.__init__(self, model, extension=extension, name=name, - unit_number=units, extra=extra, filenames=fname) - - self.heading = '# {} package for '.format(self.name[0]) + \ - ' {}, '.format(model.version_types[model.version]) + \ - 'generated by Flopy.' - self.url = 'lak.htm' - - if options is None: - options = [] - self.options = options - self.nlakes = nlakes - self.ipakcb = ipakcb - self.theta = theta - self.nssitr = nssitr - self.sscncr = sscncr - self.surfdep = surfdep - self.lwrt = lwrt - - if isinstance(stages, float): - if self.nlakes == 1: - stages = np.array([self.nlakes], dtype=np.float) * stages - else: - stages = np.ones(self.nlakes, dtype=float) * stages - elif isinstance(stages, list): - stages = np.array(stages) - if stages.shape[0] != nlakes: - err = 'stages shape should be ' + \ - '({}) but is only ({}).'.format(nlakes, stages.shape[0]) - raise Exception(err) - self.stages = stages - if stage_range is None: - stage_range = np.ones((nlakes, 2), dtype=np.float) - stage_range[:, 0] = -10000. - stage_range[:, 1] = 10000. - else: - if isinstance(stage_range, list): - stage_range = np.array(stage_range) - elif isinstance(stage_range, float): - err = 'stage_range should be a list or ' + \ - 'array of size ({}, 2)'.format(nlakes) - raise Exception(err) - if self.parent.dis.steady[0]: - if stage_range.shape != (nlakes, 2): - err = 'stages shape should be ' + \ - '({},2) but is only {}.'.format(nlakes, - stage_range.shape) - raise Exception(err) - self.stage_range = stage_range - - # tabfile data - self.tabdata = tabdata - self.iunit_tab = tab_units - - if lakarr is None and bdlknc is None: - err = 'lakarr and bdlknc must be specified' - raise Exception(err) - nrow, ncol, nlay, nper = self.parent.get_nrow_ncol_nlay_nper() - self.lakarr = Transient3d(model, (nlay, nrow, ncol), np.int32, - lakarr, name='lakarr_') - self.bdlknc = Transient3d(model, (nlay, nrow, ncol), np.float32, - bdlknc, name='bdlknc_') - - if sill_data is not None: - if not isinstance(sill_data, dict): - try: - sill_data = {0: sill_data} - except: - err = 'sill_data must be a dictionary' - raise Exception(err) - - if flux_data is not None: - if not isinstance(flux_data, dict): - # convert array to a dictionary - try: - flux_data = {0: flux_data} - except: - err = 'flux_data must be a dictionary' - raise Exception(err) - for key, value in flux_data.items(): - if isinstance(value, np.ndarray): - td = {} - for k in range(value.shape[0]): - td[k] = value[k, :].tolist() - flux_data[key] = td - if len(list(flux_data.keys())) != nlakes: - err = 'flux_data dictionary must ' + \ - 'have {} entries'.format(nlakes) - raise Exception(err) - elif isinstance(value, float) or \ - isinstance(value, int): - td = {} - for k in range(self.nlakes): - td[k] = (np.ones(6, dtype=np.float) * value).tolist() - flux_data[key] = td - elif isinstance(value, dict): - try: - steady = self.parent.dis.steady[key] - except: - steady = True - nlen = 4 - if steady and key > 0: - nlen = 6 - for k in range(self.nlakes): - td = value[k] - if len(td) < nlen: - err = 'flux_data entry for stress period'.format( - key + 1) + \ - 'has {} entries but '.format(nlen) + \ - 'should have {} entries'.format(len(td)) - raise Exception(err) - - self.flux_data = flux_data - self.sill_data = sill_data - - self.parent.add_package(self) - - return - - def ncells(self): - # Return the maximum number of cells that have a stream - # (developed for MT3DMS SSM package) - nrow, ncol, nlay, nper = self.parent.nrow_ncol_nlay_nper - return (nlay * nrow * ncol) - - def write_file(self): - """ - Write the package file. - - Returns - ------- - None - - """ - f = open(self.fn_path, 'w') - # dataset 0 - self.heading = '# {} package for '.format(self.name[0]) + \ - '{}, generated by Flopy.'.format(self.parent.version) - f.write('{0}\n'.format(self.heading)) - - # dataset 1a - if len(self.options) > 0: - for option in self.options: - f.write('{} '.format(option)) - f.write('\n') - - # dataset 1b - f.write(write_fixed_var([self.nlakes, self.ipakcb], - free=self.parent.free_format_input)) - # dataset 2 - steady = np.any(self.parent.dis.steady.array) - t = [self.theta] - if self.theta < 0. or steady: - t.append(self.nssitr) - t.append(self.sscncr) - if self.theta < 0.: - t.append(self.surfdep) - f.write(write_fixed_var(t, free=self.parent.free_format_input)) - - # dataset 3 - steady = self.parent.dis.steady[0] - for n in range(self.nlakes): - ipos = [10] - t = [self.stages[n]] - if steady: - ipos.append(10) - t.append(self.stage_range[n, 0]) - ipos.append(10) - t.append(self.stage_range[n, 1]) - if self.tabdata: - ipos.append(5) - t.append(self.iunit_tab[n]) - f.write(write_fixed_var(t, ipos=ipos, - free=self.parent.free_format_input)) - - ds8_keys = list( - self.sill_data.keys()) if self.sill_data is not None else [] - ds9_keys = list(self.flux_data.keys()) - nper = self.parent.dis.steady.shape[0] - for kper in range(nper): - itmp, file_entry_lakarr = self.lakarr.get_kper_entry(kper) - ibd, file_entry_bdlknc = self.bdlknc.get_kper_entry(kper) - - itmp2 = 0 - if kper in ds9_keys: - itmp2 = 1 - elif len(ds9_keys) > 0: - itmp2 = -1 - if isinstance(self.lwrt, list): - tmplwrt = self.lwrt[kper] - else: - tmplwrt = self.lwrt - t = [itmp, itmp2, tmplwrt] - comment = 'Stress period {}'.format(kper + 1) - f.write(write_fixed_var(t, free=self.parent.free_format_input, - comment=comment)) - - if itmp > 0: - f.write(file_entry_lakarr) - f.write(file_entry_bdlknc) - - nslms = 0 - if kper in ds8_keys: - ds8 = self.sill_data[kper] - nslms = len(ds8) - - f.write(write_fixed_var([nslms], length=5, - free=self.parent.free_format_input, - comment='Data set 7')) - if nslms > 0: - for n in range(nslms): - d1, d2 = ds8[n] - s = write_fixed_var(d1, length=5, - free=self.parent.free_format_input, - comment='Data set 8a') - f.write(s) - s = write_fixed_var(d2, - free=self.parent.free_format_input, - comment='Data set 8b') - f.write(s) - - if itmp2 > 0: - ds9 = self.flux_data[kper] - for n in range(self.nlakes): - try: - steady = self.parent.dis.steady[kper] - except: - steady = True - if kper > 0 and steady: - t = ds9[n] - else: - t = ds9[n][0:4] - s = write_fixed_var(t, - free=self.parent.free_format_input, - comment='Data set 9a') - f.write(s) - - # close the lak file - f.close() - - @staticmethod - def load(f, model, nper=None, ext_unit_dict=None): - """ - Load an existing package. - - Parameters - ---------- - f : filename or file handle - File to load. - model : model object - The model object (of type :class:`flopy.modflow.mf.Modflow`) to - which this package will be added. - nper : int - The number of stress periods. If nper is None, then nper will be - obtained from the model object. (default is None). - ext_unit_dict : dictionary, optional - If the arrays in the file are specified using EXTERNAL, - or older style array control records, then `f` should be a file - handle. In this case ext_unit_dict is required, which can be - constructed using the function - :class:`flopy.utils.mfreadnam.parsenamefile`. - - Returns - ------- - str : ModflowLak object - ModflowLak object. - - Examples - -------- - - >>> import flopy - >>> m = flopy.modflow.Modflow() - >>> lak = flopy.modflow.ModflowStr.load('test.lak', m) - - """ - - if model.verbose: - sys.stdout.write('loading lak package file...\n') - - openfile = not hasattr(f, 'read') - if openfile: - filename = f - f = open(filename, 'r', errors='replace') - - # dataset 0 -- header - while True: - line = f.readline() - if line[0] != '#': - break - - options = [] - tabdata = False - if 'TABLEINPUT' in line.upper(): - if model.verbose: - print(" reading lak dataset 1a") - options.append('TABLEINPUT') - tabdata = True - line = f.readline() - - # read dataset 1b - if model.verbose: - print(" reading lak dataset 1b") - t = line.strip().split() - nlakes = int(t[0]) - ipakcb = 0 - try: - ipakcb = int(t[1]) - except: - pass - - # read dataset 2 - line = f.readline().rstrip() - if model.array_free_format: - t = line.split() - else: - t = read_fixed_var(line, ncol=4) - theta = float(t[0]) - nssitr, sscncr = 0, 0. - if theta < 0: - try: - nssitr = int(t[1]) - except: - if model.verbose: - print(' implicit nssitr defined in file') - try: - sscncr = float(t[2]) - except: - if model.verbose: - print(' implicit sscncr defined in file') - - surfdep = 0. - if theta < 0.: - surfdep = float(t[3]) - - if nper is None: - nrow, ncol, nlay, nper = model.get_nrow_ncol_nlay_nper() - - if model.verbose: - print(" reading lak dataset 3") - stages = [] - stage_range = [] - if tabdata: - tab_units = [] - else: - tab_units = None - for lake in range(nlakes): - line = f.readline().rstrip() - if model.array_free_format: - t = line.split() - else: - t = read_fixed_var(line, ipos=[10, 10, 10, 5]) - stages.append(t[0]) - ipos = 1 - if model.dis.steady[0]: - stage_range.append((float(t[ipos]), float(t[ipos + 1]))) - ipos += 2 - if tabdata: - iu = int(t[ipos]) - tab_units.append(iu) - - lake_loc = {} - lake_lknc = {} - sill_data = {} - flux_data = {} - lwrt = [] - for iper in range(nper): - if model.verbose: - print(" reading lak dataset 4 - " + - "for stress period {}".format(iper + 1)) - line = f.readline().rstrip() - if model.array_free_format: - t = line.split() - else: - t = read_fixed_var(line, ncol=3) - itmp, itmp1, tmplwrt = int(t[0]), int(t[1]), int(t[2]) - lwrt.append(tmplwrt) - - if itmp > 0: - if model.verbose: - print(" reading lak dataset 5 - " + - "for stress period {}".format(iper + 1)) - name = 'LKARR_StressPeriod_{}'.format(iper) - lakarr = Util3d.load(f, model, (nlay, nrow, ncol), np.int32, - name, ext_unit_dict) - if model.verbose: - print(" reading lak dataset 6 - " + - "for stress period {}".format(iper + 1)) - name = 'BDLKNC_StressPeriod_{}'.format(iper) - bdlknc = Util3d.load(f, model, (nlay, nrow, ncol), np.float32, - name, ext_unit_dict) - - lake_loc[iper] = lakarr - lake_lknc[iper] = bdlknc - - if model.verbose: - print(" reading lak dataset 7 - " + - "for stress period {}".format(iper + 1)) - line = f.readline().rstrip() - t = line.split() - nslms = int(t[0]) - ds8 = [] - if nslms > 0: - if model.verbose: - print(" reading lak dataset 8 - " + - "for stress period {}".format(iper + 1)) - for i in range(nslms): - line = f.readline().rstrip() - if model.array_free_format: - t = line.split() - else: - ic = int(line[0:5]) - t = read_fixed_var(line, ncol=ic + 1, length=5) - ic = int(t[0]) - ds8a = [ic] - for j in range(1, ic + 1): - ds8a.append(int(t[j])) - line = f.readline().rstrip() - if model.array_free_format: - t = line.split() - else: - t = read_fixed_var(line, ncol=ic - 1) - silvt = [] - for j in range(ic - 1): - silvt.append(float(t[j])) - ds8.append((ds8a, silvt)) - sill_data[iper] = ds8 - if itmp1 >= 0: - if model.verbose: - print(" reading lak dataset 9 - " + - "for stress period {}".format(iper + 1)) - ds9 = {} - for n in range(nlakes): - line = f.readline().rstrip() - if model.array_free_format: - t = line.split() - else: - t = read_fixed_var(line, ncol=6) - tds = [] - tds.append(float(t[0])) - tds.append(float(t[1])) - tds.append(float(t[2])) - tds.append(float(t[3])) - if model.dis.steady[iper]: - if iper == 0: - tds.append(stage_range[n][0]) - tds.append(stage_range[n][1]) - else: - tds.append(float(t[4])) - tds.append(float(t[5])) - else: - tds.append(0.) - tds.append(0.) - ds9[n] = tds - flux_data[iper] = ds9 - - if openfile: - f.close() - - # convert lake data to Transient3d objects - lake_loc = Transient3d(model, (nlay, nrow, ncol), np.int32, - lake_loc, name='lakarr_') - lake_lknc = Transient3d(model, (nlay, nrow, ncol), np.float32, - lake_lknc, name='bdlknc_') - - # determine specified unit number - n = 2 - if tab_units is not None: - n += nlakes - unitnumber = None - filenames = [None for x in range(n)] - if ext_unit_dict is not None: - unitnumber, filenames[0] = \ - model.get_ext_dict_attr(ext_unit_dict, - filetype=ModflowLak.ftype()) - if ipakcb > 0: - iu, filenames[1] = \ - model.get_ext_dict_attr(ext_unit_dict, unit=ipakcb) - model.add_pop_key_list(ipakcb) - - ipos = 2 - if tab_units is not None: - for i in range(len(tab_units)): - iu, filenames[ipos] = \ - model.get_ext_dict_attr(ext_unit_dict, - unit=tab_units[i]) - ipos += 1 - - lakpak = ModflowLak(model, options=options, nlakes=nlakes, - ipakcb=ipakcb, theta=theta, nssitr=nssitr, - surfdep=surfdep, sscncr=sscncr, lwrt=lwrt, - stages=stages, - stage_range=stage_range, tab_units=tab_units, - lakarr=lake_loc, bdlknc=lake_lknc, - sill_data=sill_data, flux_data=flux_data, - unitnumber=unitnumber, filenames=filenames) - return lakpak - - @staticmethod - def ftype(): - return 'LAK' - - @staticmethod - def defaultunit(): - return 119 +""" +mflak module. Contains the ModflowLak class. Note that the user can access +the ModflowLak class as `flopy.modflow.ModflowLak`. + +Additional information for this MODFLOW package can be found at the `Online +MODFLOW Guide +`_. + +""" +import sys +import numpy as np +from ..pakbase import Package +from ..utils.util_array import Transient3d +from ..utils import Util3d, read_fixed_var, write_fixed_var + + +class ModflowLak(Package): + """ + MODFLOW Lake Package Class. + + Parameters + ---------- + model : model object + The model object (of type :class:`flopy.modflow.mf.Modflow`) to which + this package will be added. + nlakes : int + NLAKES Number of separate lakes. + Sublakes of multiple-lake systems are considered separate lakes for + input purposes. The variable NLAKES is used, with certain internal + assumptions and approximations, to dimension arrays for the simulation. + ipakcb : int + (ILKCB in MODFLOW documentation) + Whether or not to write cell-by-cell flows (yes if ILKCB> 0, no + otherwise). If ILKCB< 0 and "Save Budget" is specified in the Output + Control or ICBCFL is not equal to 0, the cell-by-cell flows will be + printed in the standard output file. ICBCFL is specified in the input + to the Output Control Option of MODFLOW. + lwrt : int or list of ints (one per SP) + lwrt > 0, suppresses printout from the lake package. Default is 0 (to + print budget information) + theta : float + Explicit (THETA = 0.0), semi-implicit (0.0 < THETA < 1.0), or implicit + (THETA = 1.0) solution for lake stages. SURFDEPTH is read only if + THETA is assigned a negative value (the negative value of THETA is + then changed to a positive value internally by the code). + * A new method of solving for lake stage uses only the time-weighting + factor THETA (Merritt and Konikow, 2000, p. 52) for transient + simulations. THETA is automatically set to a value of 1.0 for all + steady-state stress periods. For transient stress periods, Explicit + (THETA = 0.0), semi-implicit (0.0 < THETA < 1.0), or implicit + (THETA = 1.0) solutions can be used to calculate lake stages. The + option to specify negative values for THETA is supported to allow + specification of additional variables (NSSITER, SSCNCR, SURFDEP) + for simulations that only include transient stress periods. If + THETA is specified as a negative value, then it is converted to a + positive value for calculations of lake stage. + * In MODFLOW-2000 and later, ISS is not part of the input. Instead + NSSITR or SSCNCR should be included if one or more stress periods + is a steady state stress period as defined in Ss/tr in the + Discretization file. + * SSCNCR and NSSITR can be read for a transient only simulation by + placing a negative sign immediately in front of THETA. A negative + THETA sets a flag which assumes input values for NSSITR and SSCNCR + will follow THETA in the format as described by Merritt and Konikow + (p. 52). A negative THETA is automatically reset to a positive + value after values of NSSITR and SSCNCR are read. + nssitr : int + Maximum number of iterations for Newton's method of solution for + equilibrium lake stages in each MODFLOW iteration for steady-state + aquifer head solution. Only read if ISS (option flag input to DIS + Package of MODFLOW indicating steady-state solution) is not zero or + if THETA is specified as a negative value. + * NSSITR and SSCNCR may be omitted for transient solutions (ISS = 0). + * In MODFLOW-2000 and later, ISS is not part of the input. + Instead NSSITR or SSCNCR should be included if one or more stress + periods is a steady state stress period as defined in Ss/tr in the + Discretization file. + * SSCNCR and NSSITR can be read for a transient only simulation by + placing a negative sign immediately in front of THETA. A negative + THETA sets a flag which assumes input values for NSSITR and SSCNCR + will follow THETA in the format as described by Merritt and Konikow + (p. 52). A negative THETA is automatically reset to a positive + value after values of NSSITR and SSCNCR are read. + * If NSSITR = 0, a value of 100 will be used instead. + sscncr : float + Convergence criterion for equilibrium lake stage solution by Newton's + method. Only read if ISS is not zero or if THETA is specified as a + negative value. See notes above for nssitr. + surfdepth : float + The height of small topological variations (undulations) in lake-bottom + elevations that can affect groundwater discharge to lakes. SURFDEPTH + decreases the lakebed conductance for vertical flow across a horizontal + lakebed caused both by a groundwater head that is between the lakebed + and the lakebed plus SURFDEPTH and a lake stage that is also between + the lakebed and the lakebed plus SURFDEPTH. This method provides a + smooth transition from a condition of no groundwater discharge to a + lake, when groundwater head is below the lakebed, to a condition of + increasing groundwater discharge to a lake as groundwater head becomes + greater than the elevation of the dry lakebed. The method also allows + for the transition of seepage from a lake to groundwater when the lake + stage decreases to the lakebed elevation. Values of SURFDEPTH ranging + from 0.01 to 0.5 have been used successfully in test simulations. + SURFDEP is read only if THETA is specified as a negative value. + stages : float or list of floats + The initial stage of each lake at the beginning of the run. + stage_range : list of tuples (ssmn, ssmx) of length nlakes + Where ssmn and ssmx are the minimum and maximum stages allowed for each + lake in steady-state solution. + * SSMN and SSMX are not needed for a transient run and must be + omitted when the solution is transient. + * When the first stress period is a steady-state stress period, + SSMN is defined in record 3. + + For subsequent steady-state stress periods, SSMN is defined in + record 9a. + lakarr : array of integers (nlay, nrow, ncol) + LKARR A value is read in for every grid cell. + If LKARR(I,J,K) = 0, the grid cell is not a lake volume cell. + If LKARR(I,J,K) > 0, its value is the identification number of the lake + occupying the grid cell. LKARR(I,J,K) must not exceed the value NLAKES. + If it does, or if LKARR(I,J,K) < 0, LKARR(I,J,K) is set to zero. + Lake cells cannot be overlain by non-lake cells in a higher layer. + Lake cells must be inactive cells (IBOUND = 0) and should not be + convertible to active cells (WETDRY = 0). + + The Lake package can be used when all or some of the model layers + containing the lake are confined. The authors recommend using the + Layer-Property Flow Package (LPF) for this case, although the + BCF and HUF Packages will work too. However, when using the BCF6 + package to define aquifer properties, lake/aquifer conductances in the + lateral direction are based solely on the lakebed leakance (and not on + the lateral transmissivity of the aquifer layer). As before, when the + BCF6 package is used, vertical lake/aquifer conductances are based on + lakebed conductance and on the vertical hydraulic conductivity of the + aquifer layer underlying the lake when the wet/dry option is + implemented, and only on the lakebed leakance when the wet/dry option + is not implemented. + bdlknc : array of floats (nlay, nrow, ncol) + BDLKNC A value is read in for every grid cell. The value is the lakebed + leakance that will be assigned to lake/aquifer interfaces that occur + in the corresponding grid cell. If the wet-dry option flag (IWDFLG) is + not active (cells cannot rewet if they become dry), then the BDLKNC + values are assumed to represent the combined leakances of the lakebed + material and the aquifer material between the lake and the centers of + the underlying grid cells, i. e., the vertical conductance values (CV) + will not be used in the computation of conductances across lake/aquifer + boundary faces in the vertical direction. + + IBOUND and WETDRY should be set to zero for every cell for which LKARR + is not equal to zero. IBOUND is defined in the input to the Basic + Package of MODFLOW. WETDRY is defined in the input to the BCF or other + flow package of MODFLOW if the IWDFLG option is active. When used with + the HUF package, the Lake Package has been modified to compute + effective lake-aquifer conductance solely on the basis of the + user-specified value of lakebed leakance; aquifer hydraulic + conductivities are not used in this calculation. An appropriate + informational message is now printed after the lakebed conductances + are written to the main output file. + sill_data : dict + (dataset 8 in documentation) + Dict of lists keyed by stress period. Each list has a tuple of dataset + 8a, 8b for every multi-lake system, where dataset 8a is another tuple of + IC : int + The number of sublakes + ISUB : list of ints + The identification numbers of the sublakes in the sublake + system being described in this record. The center lake number + is listed first. + And dataset 8b contains + SILLVT : sequence of floats + A sequence of sill elevations for each sublakes that determines + whether the center lake is connected with a given sublake. + Values are entered for each sublake in the order the sublakes + are listed in the previous record. + flux_data : dict + (dataset 9 in documentation) + Dict of lists keyed by stress period. The list for each stress period + is a list of lists, with each list containing the variables + PRCPLK EVAPLK RNF WTHDRW [SSMN] [SSMX] from the documentation. + PRCPLK : float + The rate of precipitation per unit area at the surface of a + lake (L/T). + EVAPLK : float + The rate of evaporation per unit area from the surface of a + lake (L/T). + RNF : float + Overland runoff from an adjacent watershed entering the lake. + If RNF > 0, it is specified directly as a volumetric rate, or + flux (L3 /T). If RNF < 0, its absolute value is used as a + dimensionless multiplier applied to the product of the lake + precipitation rate per unit area (PRCPLK) and the surface area + of the lake at its full stage (occupying all layer 1 lake + cells). When RNF is entered as a dimensionless multiplier + (RNF < 0), it is considered to be the product of two + proportionality factors. The first is the ratio of the area of + the basin contributing runoff to the surface area of the lake + when it is at full stage. The second is the fraction of the + current rainfall rate that becomes runoff to the lake. This + procedure provides a means for the automated computation of + runoff rate from a watershed to a lake as a function of + varying rainfall rate. For example, if the basin area is 10 + times greater than the surface area of the lake, and 20 percent + of the precipitation on the basin becomes overland runoff + directly into the lake, then set RNF = -2.0. + WTHDRW : float + The volumetric rate, or flux (L3 /T), of water removal from a + lake by means other than rainfall, evaporation, surface + outflow, or groundwater seepage. A negative value indicates + augmentation. Normally, this would be used to specify the + rate of artificial withdrawal from a lake for human water use, + or if negative, artificial augmentation of a lake volume for + aesthetic or recreational purposes. + SSMN : float + Minimum stage allowed for each lake in steady-state solution. + See notes on ssmn and ssmx above. + SSMX : float + SSMX Maximum stage allowed for each lake in steady-state + solution. + + options : list of strings + Package options. (default is None). + extension : string + Filename extension (default is 'lak') + unitnumber : int + File unit number (default is None). + filenames : str or list of str + Filenames to use for the package and the output files. If + filenames=None the package name will be created using the model name + and package extension and the cbc output name will be created using + the model name and .cbc extension (for example, modflowtest.cbc), + if ipakcbc is a number greater than zero. If a single string is passed + the package will be set to the string and cbc output names will be + created using the model name and .cbc extension, if ipakcbc is a + number greater than zero. To define the names for all package files + (input and output) the length of the list of strings should be 2. + Default is None. + + Methods + ------- + + See Also + -------- + + Notes + ----- + Parameters are not supported in FloPy. + + Examples + -------- + + >>> import flopy + >>> m = flopy.modflow.Modflow() + >>> lak = {} + >>> lak[0] = [[2, 3, 4, 15.6, 1050., -4]] #this lake boundary will be + >>> #applied to all stress periods + >>> lak = flopy.modflow.ModflowLak(m, nstress_period_data=strd) + + """ + + def __init__(self, model, nlakes=1, ipakcb=None, theta=-1., + nssitr=0, sscncr=0.0, surfdep=0., stages=1., stage_range=None, + tab_files=None, tab_units=None, lakarr=None, bdlknc=None, + sill_data=None, flux_data=None, + extension='lak', unitnumber=None, filenames=None, + options=None, lwrt=0, **kwargs): + """ + Package constructor. + + """ + # set default unit number of one is not specified + if unitnumber is None: + unitnumber = ModflowLak.defaultunit() + + # set filenames + tabdata = False + nlen = 2 + if options is not None: + for option in options: + if 'TABLEINPUT' in option.upper(): + tabdata = True + nlen += nlakes + break + if filenames is None: + filenames = [None for x in range(nlen)] + elif isinstance(filenames, str): + filenames = [filenames] + [None for x in range(nlen - 1)] + elif isinstance(filenames, list): + if len(filenames) < nlen: + filenames = filenames + [None for x in range(2, nlen)] + + # update external file information with cbc output, if necessary + if ipakcb is not None: + fname = filenames[1] + model.add_output_file(ipakcb, fname=fname, + package=ModflowLak.ftype()) + else: + ipakcb = 0 + + # table input files + if tabdata: + if tab_files is None: + tab_files = filenames[2:] + + # add tab_files as external files + if tabdata: + # make sure the number of tabfiles is equal to the number of lakes + if len(tab_files) < nlakes: + msg = 'a tabfile must be specified for each lake' + \ + '{} tabfiles specified '.format(len(tab_files)) + \ + 'instead of {} tabfiles'.format(nlakes) + # make sure tab_files are not None + for idx, fname in enumerate(tab_files): + if fname is None: + msg = 'a filename must be specified for the ' + \ + 'tabfile for lake {}'.format(idx + 1) + raise ValueError(msg) + # set unit for tab files if not passed to __init__ + if tab_units is None: + tab_units = [] + for idx in range(len(tab_files)): + tab_units.append(model.next_ext_unit()) + # add tabfiles as external files + for iu, fname in zip(tab_units, tab_files): + model.add_external(fname, iu) + + # Fill namefile items + name = [ModflowLak.ftype()] + units = [unitnumber] + extra = [''] + + # set package name + fname = [filenames[0]] + + # Call ancestor's init to set self.parent, extension, name and unit number + Package.__init__(self, model, extension=extension, name=name, + unit_number=units, extra=extra, filenames=fname) + + self.heading = '# {} package for '.format(self.name[0]) + \ + ' {}, '.format(model.version_types[model.version]) + \ + 'generated by Flopy.' + self.url = 'lak.htm' + + if options is None: + options = [] + self.options = options + self.nlakes = nlakes + self.ipakcb = ipakcb + self.theta = theta + self.nssitr = nssitr + self.sscncr = sscncr + self.surfdep = surfdep + self.lwrt = lwrt + + if isinstance(stages, float): + if self.nlakes == 1: + stages = np.array([self.nlakes], dtype=np.float) * stages + else: + stages = np.ones(self.nlakes, dtype=float) * stages + elif isinstance(stages, list): + stages = np.array(stages) + if stages.shape[0] != nlakes: + err = 'stages shape should be ' + \ + '({}) but is only ({}).'.format(nlakes, stages.shape[0]) + raise Exception(err) + self.stages = stages + if stage_range is None: + stage_range = np.ones((nlakes, 2), dtype=np.float) + stage_range[:, 0] = -10000. + stage_range[:, 1] = 10000. + else: + if isinstance(stage_range, list): + stage_range = np.array(stage_range) + elif isinstance(stage_range, float): + err = 'stage_range should be a list or ' + \ + 'array of size ({}, 2)'.format(nlakes) + raise Exception(err) + if self.parent.dis.steady[0]: + if stage_range.shape != (nlakes, 2): + err = 'stages shape should be ' + \ + '({},2) but is only {}.'.format(nlakes, + stage_range.shape) + raise Exception(err) + self.stage_range = stage_range + + # tabfile data + self.tabdata = tabdata + self.iunit_tab = tab_units + + if lakarr is None and bdlknc is None: + err = 'lakarr and bdlknc must be specified' + raise Exception(err) + nrow, ncol, nlay, nper = self.parent.get_nrow_ncol_nlay_nper() + self.lakarr = Transient3d(model, (nlay, nrow, ncol), np.int32, + lakarr, name='lakarr_') + self.bdlknc = Transient3d(model, (nlay, nrow, ncol), np.float32, + bdlknc, name='bdlknc_') + + if sill_data is not None: + if not isinstance(sill_data, dict): + try: + sill_data = {0: sill_data} + except: + err = 'sill_data must be a dictionary' + raise Exception(err) + + if flux_data is not None: + if not isinstance(flux_data, dict): + # convert array to a dictionary + try: + flux_data = {0: flux_data} + except: + err = 'flux_data must be a dictionary' + raise Exception(err) + for key, value in flux_data.items(): + if isinstance(value, np.ndarray): + td = {} + for k in range(value.shape[0]): + td[k] = value[k, :].tolist() + flux_data[key] = td + if len(list(flux_data.keys())) != nlakes: + err = 'flux_data dictionary must ' + \ + 'have {} entries'.format(nlakes) + raise Exception(err) + elif isinstance(value, float) or \ + isinstance(value, int): + td = {} + for k in range(self.nlakes): + td[k] = (np.ones(6, dtype=np.float) * value).tolist() + flux_data[key] = td + elif isinstance(value, dict): + try: + steady = self.parent.dis.steady[key] + except: + steady = True + nlen = 4 + if steady and key > 0: + nlen = 6 + for k in range(self.nlakes): + td = value[k] + if len(td) < nlen: + err = 'flux_data entry for stress period'.format( + key + 1) + \ + 'has {} entries but '.format(nlen) + \ + 'should have {} entries'.format(len(td)) + raise Exception(err) + + self.flux_data = flux_data + self.sill_data = sill_data + + self.parent.add_package(self) + + return + + def ncells(self): + # Return the maximum number of cells that have a stream + # (developed for MT3DMS SSM package) + nrow, ncol, nlay, nper = self.parent.nrow_ncol_nlay_nper + return (nlay * nrow * ncol) + + def write_file(self): + """ + Write the package file. + + Returns + ------- + None + + """ + f = open(self.fn_path, 'w') + # dataset 0 + self.heading = '# {} package for '.format(self.name[0]) + \ + '{}, generated by Flopy.'.format(self.parent.version) + f.write('{0}\n'.format(self.heading)) + + # dataset 1a + if len(self.options) > 0: + for option in self.options: + f.write('{} '.format(option)) + f.write('\n') + + # dataset 1b + f.write(write_fixed_var([self.nlakes, self.ipakcb], + free=self.parent.free_format_input)) + # dataset 2 + steady = np.any(self.parent.dis.steady.array) + t = [self.theta] + if self.theta < 0. or steady: + t.append(self.nssitr) + t.append(self.sscncr) + if self.theta < 0.: + t.append(self.surfdep) + f.write(write_fixed_var(t, free=self.parent.free_format_input)) + + # dataset 3 + steady = self.parent.dis.steady[0] + for n in range(self.nlakes): + ipos = [10] + t = [self.stages[n]] + if steady: + ipos.append(10) + t.append(self.stage_range[n, 0]) + ipos.append(10) + t.append(self.stage_range[n, 1]) + if self.tabdata: + ipos.append(5) + t.append(self.iunit_tab[n]) + f.write(write_fixed_var(t, ipos=ipos, + free=self.parent.free_format_input)) + + ds8_keys = list( + self.sill_data.keys()) if self.sill_data is not None else [] + ds9_keys = list(self.flux_data.keys()) + nper = self.parent.dis.steady.shape[0] + for kper in range(nper): + itmp, file_entry_lakarr = self.lakarr.get_kper_entry(kper) + ibd, file_entry_bdlknc = self.bdlknc.get_kper_entry(kper) + + itmp2 = 0 + if kper in ds9_keys: + itmp2 = 1 + elif len(ds9_keys) > 0: + itmp2 = -1 + if isinstance(self.lwrt, list): + tmplwrt = self.lwrt[kper] + else: + tmplwrt = self.lwrt + t = [itmp, itmp2, tmplwrt] + comment = 'Stress period {}'.format(kper + 1) + f.write(write_fixed_var(t, free=self.parent.free_format_input, + comment=comment)) + + if itmp > 0: + f.write(file_entry_lakarr) + f.write(file_entry_bdlknc) + + nslms = 0 + if kper in ds8_keys: + ds8 = self.sill_data[kper] + nslms = len(ds8) + + f.write(write_fixed_var([nslms], length=5, + free=self.parent.free_format_input, + comment='Data set 7')) + if nslms > 0: + for n in range(nslms): + d1, d2 = ds8[n] + s = write_fixed_var(d1, length=5, + free=self.parent.free_format_input, + comment='Data set 8a') + f.write(s) + s = write_fixed_var(d2, + free=self.parent.free_format_input, + comment='Data set 8b') + f.write(s) + + if itmp2 > 0: + ds9 = self.flux_data[kper] + for n in range(self.nlakes): + try: + steady = self.parent.dis.steady[kper] + except: + steady = True + if kper > 0 and steady: + t = ds9[n] + else: + t = ds9[n][0:4] + s = write_fixed_var(t, + free=self.parent.free_format_input, + comment='Data set 9a') + f.write(s) + + # close the lak file + f.close() + + @staticmethod + def load(f, model, nper=None, ext_unit_dict=None): + """ + Load an existing package. + + Parameters + ---------- + f : filename or file handle + File to load. + model : model object + The model object (of type :class:`flopy.modflow.mf.Modflow`) to + which this package will be added. + nper : int + The number of stress periods. If nper is None, then nper will be + obtained from the model object. (default is None). + ext_unit_dict : dictionary, optional + If the arrays in the file are specified using EXTERNAL, + or older style array control records, then `f` should be a file + handle. In this case ext_unit_dict is required, which can be + constructed using the function + :class:`flopy.utils.mfreadnam.parsenamefile`. + + Returns + ------- + str : ModflowLak object + ModflowLak object. + + Examples + -------- + + >>> import flopy + >>> m = flopy.modflow.Modflow() + >>> lak = flopy.modflow.ModflowStr.load('test.lak', m) + + """ + + if model.verbose: + sys.stdout.write('loading lak package file...\n') + + openfile = not hasattr(f, 'read') + if openfile: + filename = f + f = open(filename, 'r', errors='replace') + + # dataset 0 -- header + while True: + line = f.readline() + if line[0] != '#': + break + + options = [] + tabdata = False + if 'TABLEINPUT' in line.upper(): + if model.verbose: + print(" reading lak dataset 1a") + options.append('TABLEINPUT') + tabdata = True + line = f.readline() + + # read dataset 1b + if model.verbose: + print(" reading lak dataset 1b") + t = line.strip().split() + nlakes = int(t[0]) + ipakcb = 0 + try: + ipakcb = int(t[1]) + except: + pass + + # read dataset 2 + line = f.readline().rstrip() + if model.array_free_format: + t = line.split() + else: + t = read_fixed_var(line, ncol=4) + theta = float(t[0]) + nssitr, sscncr = 0, 0. + if theta < 0: + try: + nssitr = int(t[1]) + except: + if model.verbose: + print(' implicit nssitr defined in file') + try: + sscncr = float(t[2]) + except: + if model.verbose: + print(' implicit sscncr defined in file') + + surfdep = 0. + if theta < 0.: + surfdep = float(t[3]) + + if nper is None: + nrow, ncol, nlay, nper = model.get_nrow_ncol_nlay_nper() + + if model.verbose: + print(" reading lak dataset 3") + stages = [] + stage_range = [] + if tabdata: + tab_units = [] + else: + tab_units = None + for lake in range(nlakes): + line = f.readline().rstrip() + if model.array_free_format: + t = line.split() + else: + t = read_fixed_var(line, ipos=[10, 10, 10, 5]) + stages.append(t[0]) + ipos = 1 + if model.dis.steady[0]: + stage_range.append((float(t[ipos]), float(t[ipos + 1]))) + ipos += 2 + if tabdata: + iu = int(t[ipos]) + tab_units.append(iu) + + lake_loc = {} + lake_lknc = {} + sill_data = {} + flux_data = {} + lwrt = [] + for iper in range(nper): + if model.verbose: + print(" reading lak dataset 4 - " + + "for stress period {}".format(iper + 1)) + line = f.readline().rstrip() + if model.array_free_format: + t = line.split() + else: + t = read_fixed_var(line, ncol=3) + itmp, itmp1, tmplwrt = int(t[0]), int(t[1]), int(t[2]) + lwrt.append(tmplwrt) + + if itmp > 0: + if model.verbose: + print(" reading lak dataset 5 - " + + "for stress period {}".format(iper + 1)) + name = 'LKARR_StressPeriod_{}'.format(iper) + lakarr = Util3d.load(f, model, (nlay, nrow, ncol), np.int32, + name, ext_unit_dict) + if model.verbose: + print(" reading lak dataset 6 - " + + "for stress period {}".format(iper + 1)) + name = 'BDLKNC_StressPeriod_{}'.format(iper) + bdlknc = Util3d.load(f, model, (nlay, nrow, ncol), np.float32, + name, ext_unit_dict) + + lake_loc[iper] = lakarr + lake_lknc[iper] = bdlknc + + if model.verbose: + print(" reading lak dataset 7 - " + + "for stress period {}".format(iper + 1)) + line = f.readline().rstrip() + t = line.split() + nslms = int(t[0]) + ds8 = [] + if nslms > 0: + if model.verbose: + print(" reading lak dataset 8 - " + + "for stress period {}".format(iper + 1)) + for i in range(nslms): + line = f.readline().rstrip() + if model.array_free_format: + t = line.split() + else: + ic = int(line[0:5]) + t = read_fixed_var(line, ncol=ic + 1, length=5) + ic = int(t[0]) + ds8a = [ic] + for j in range(1, ic + 1): + ds8a.append(int(t[j])) + line = f.readline().rstrip() + if model.array_free_format: + t = line.split() + else: + t = read_fixed_var(line, ncol=ic - 1) + silvt = [] + for j in range(ic - 1): + silvt.append(float(t[j])) + ds8.append((ds8a, silvt)) + sill_data[iper] = ds8 + if itmp1 >= 0: + if model.verbose: + print(" reading lak dataset 9 - " + + "for stress period {}".format(iper + 1)) + ds9 = {} + for n in range(nlakes): + line = f.readline().rstrip() + if model.array_free_format: + t = line.split() + else: + t = read_fixed_var(line, ncol=6) + tds = [] + tds.append(float(t[0])) + tds.append(float(t[1])) + tds.append(float(t[2])) + tds.append(float(t[3])) + if model.dis.steady[iper]: + if iper == 0: + tds.append(stage_range[n][0]) + tds.append(stage_range[n][1]) + else: + tds.append(float(t[4])) + tds.append(float(t[5])) + else: + tds.append(0.) + tds.append(0.) + ds9[n] = tds + flux_data[iper] = ds9 + + if openfile: + f.close() + + # convert lake data to Transient3d objects + lake_loc = Transient3d(model, (nlay, nrow, ncol), np.int32, + lake_loc, name='lakarr_') + lake_lknc = Transient3d(model, (nlay, nrow, ncol), np.float32, + lake_lknc, name='bdlknc_') + + # determine specified unit number + n = 2 + if tab_units is not None: + n += nlakes + unitnumber = None + filenames = [None for x in range(n)] + if ext_unit_dict is not None: + unitnumber, filenames[0] = \ + model.get_ext_dict_attr(ext_unit_dict, + filetype=ModflowLak.ftype()) + if ipakcb > 0: + iu, filenames[1] = \ + model.get_ext_dict_attr(ext_unit_dict, unit=ipakcb) + model.add_pop_key_list(ipakcb) + + ipos = 2 + if tab_units is not None: + for i in range(len(tab_units)): + iu, filenames[ipos] = \ + model.get_ext_dict_attr(ext_unit_dict, + unit=tab_units[i]) + ipos += 1 + + lakpak = ModflowLak(model, options=options, nlakes=nlakes, + ipakcb=ipakcb, theta=theta, nssitr=nssitr, + surfdep=surfdep, sscncr=sscncr, lwrt=lwrt, + stages=stages, + stage_range=stage_range, tab_units=tab_units, + lakarr=lake_loc, bdlknc=lake_lknc, + sill_data=sill_data, flux_data=flux_data, + unitnumber=unitnumber, filenames=filenames) + return lakpak + + @staticmethod + def ftype(): + return 'LAK' + + @staticmethod + def defaultunit(): + return 119 diff --git a/flopy/modflow/mflmt.py b/flopy/modflow/mflmt.py index 4232ada917..09312bf4c2 100644 --- a/flopy/modflow/mflmt.py +++ b/flopy/modflow/mflmt.py @@ -1,254 +1,254 @@ -""" -mflmt module. Contains the ModflowLmt class. Note that the user can access -the ModflowLmt class as `flopy.modflow.ModflowLmt`. - -Additional information for this MODFLOW package can be found at the `Online -MODFLOW Guide -`_. - -""" -import os -import sys -from ..pakbase import Package - - -class ModflowLmt(Package): - """ - MODFLOW Link-MT3DMS Package Class. - - Parameters - ---------- - model : model object - The model object (of type :class:`flopy.modflow.mf.Modflow`) to which - this package will be added. - output_file_name : string - Filename for output file (default is 'mt3d_link.ftl') - unitnumber : int - File unit number (default is 24). - output_file_unit : int - Output file unit number, pertaining to the file identified - by output_file_name (default is 54). - output_file_header : string - Header for the output file (default is 'extended') - output_file_format : {'formatted', 'unformatted'} - Format of the output file (default is 'unformatted') - package_flows : ['sfr', 'lak', 'uzf'] - Specifies which of the advanced package flows should be added to the - flow-transport link (FTL) file. The addition of these flags may quickly - increase the FTL file size. Thus, the user must specifically request - their amendment within the FTL file. Default is not to add these - terms to the FTL file by omitting the keyword package_flows from - the LMT input file. One or multiple strings can be passed as a list to - the argument. - extension : string - Filename extension (default is 'lmt6') - unitnumber : int - File unit number (default is None). - filenames : str or list of str - Filenames to use for the package. If filenames=None the package name - will be created using the model name and package extension. If a - single string is passed the package will be set to the string. - Default is None. - - Attributes - ---------- - - Methods - ------- - - See Also - -------- - - Notes - ----- - Parameters are supported in Flopy only when reading in existing models. - Parameter values are converted to native values in Flopy and the - connection to "parameters" is thus nonexistent. - - Examples - -------- - - >>> import flopy - >>> m = flopy.modflow.Modflow() - >>> lmt = flopy.modflow.ModflowLmt(m, output_file_name='mt3d_linkage.ftl') - - """ - - def __init__(self, model, output_file_name='mt3d_link.ftl', - output_file_unit=54, output_file_header='extended', - output_file_format='unformatted', extension='lmt6', - package_flows=[], unitnumber=None, filenames=None): - - # set default unit number of one is not specified - if unitnumber is None: - unitnumber = ModflowLmt.defaultunit() - - # set filenames - if filenames is None: - filenames = [None] - elif isinstance(filenames, str): - filenames = [filenames] - - # Fill namefile items - name = [ModflowLmt.ftype()] - units = [unitnumber] - extra = [''] - - # set package name - fname = [filenames[0]] - - # Call ancestor's init to set self.parent, extension, name and unit number - Package.__init__(self, model, extension=extension, name=name, - unit_number=units, extra=extra, filenames=fname) - - self.heading = '# {} package for '.format(self.name[0]) + \ - ' {}, '.format(model.version_types[model.version]) + \ - 'generated by Flopy.' - self.url = 'lmt.htm' - self.output_file_name = output_file_name - self.output_file_unit = output_file_unit - self.output_file_header = output_file_header - self.output_file_format = output_file_format - self.package_flows = package_flows - self.parent.add_package(self) - return - - def write_file(self): - """ - Write the package file. - - Returns - ------- - None - - """ - f = open(self.fn_path, 'w') - f.write('{}\n'.format(self.heading)) - f.write('{:20s}\n'.format('OUTPUT_FILE_NAME ' + - self.output_file_name)) - f.write('{:20s} {:10d}\n'.format('OUTPUT_FILE_UNIT ', - self.output_file_unit)) - f.write('{:20s}\n'.format('OUTPUT_FILE_HEADER ' + - self.output_file_header)) - f.write('{:20s}\n'.format('OUTPUT_FILE_FORMAT ' + - self.output_file_format)) - if self.package_flows: # check that the list is not empty - # Generate a string to write - pckgs = '' - if 'sfr' in [x.lower() for x in self.package_flows]: - pckgs += 'SFR ' - if 'lak' in [x.lower() for x in self.package_flows]: - pckgs += 'LAK ' - if 'uzf' in [x.lower() for x in self.package_flows]: - pckgs += 'UZF ' - if 'all' in [x.lower() for x in self.package_flows]: - pckgs += 'ALL' - - line = 'PACKAGE_FLOWS ' + pckgs - f.write('%s\n' % (line)) - - f.close() - - @staticmethod - def load(f, model, ext_unit_dict=None): - """ - Load an existing package. - - Parameters - ---------- - f : filename or file handle - File to load. - model : model object - The model object (of type :class:`flopy.modflow.mf.Modflow`) to - which this package will be added. - ext_unit_dict : dictionary, optional - If the arrays in the file are specified using EXTERNAL, - or older style array control records, then `f` should be a file - handle. In this case ext_unit_dict is required, which can be - constructed using the function - :class:`flopy.utils.mfreadnam.parsenamefile`. - - Returns - ------- - lmt : ModflowLmt object - ModflowLmt object. - - Examples - -------- - - >>> import flopy - >>> m = flopy.modflow.Modflow() - >>> lmt = flopy.modflow.ModflowGhb.load('test.lmt', m) - - """ - - if model.verbose: - sys.stdout.write('loading lmt package file...\n') - - openfile = not hasattr(f, 'read') - if openfile: - filename = f - f = open(filename, 'r') - elif hasattr(f, 'name'): - filename = f.name - else: - filename = None - - # set default values - if filename: - prefix = os.path.splitext(os.path.basename(filename))[0] - output_file_name = prefix + '.ftl' - else: - output_file_name = model.name + '.ftl' - output_file_unit = 333 - output_file_header = 'standard' - output_file_format = 'unformatted' - package_flows = [] - - for line in f: - if line[0] == '#': - continue - t = line.strip().split() - if len(t) < 2: - continue - if t[0].lower() == 'output_file_name': - output_file_name = t[1] - elif t[0].lower() == 'output_file_unit': - output_file_unit = int(t[1]) - elif t[0].lower() == 'output_file_header': - output_file_header = t[1] - elif t[0].lower() == 'output_file_format': - output_file_format = t[1] - elif t[0].lower() == 'package_flows': - # Multiple entries can follow 'package_flows' - if len(t) > 1: - for i in range(1, len(t)): - package_flows.append(t[i]) - - if openfile: - f.close() - - # determine specified unit number - unitnumber = None - filenames = [None] - if ext_unit_dict is not None: - unitnumber, filenames[0] = \ - model.get_ext_dict_attr(ext_unit_dict, - filetype=ModflowLmt.ftype()) - - lmt = ModflowLmt(model, output_file_name=output_file_name, - output_file_unit=output_file_unit, - output_file_header=output_file_header, - output_file_format=output_file_format, - package_flows=package_flows, - unitnumber=unitnumber, - filenames=filenames) - return lmt - - @staticmethod - def ftype(): - return 'LMT6' - - @staticmethod - def defaultunit(): - return 30 +""" +mflmt module. Contains the ModflowLmt class. Note that the user can access +the ModflowLmt class as `flopy.modflow.ModflowLmt`. + +Additional information for this MODFLOW package can be found at the `Online +MODFLOW Guide +`_. + +""" +import os +import sys +from ..pakbase import Package + + +class ModflowLmt(Package): + """ + MODFLOW Link-MT3DMS Package Class. + + Parameters + ---------- + model : model object + The model object (of type :class:`flopy.modflow.mf.Modflow`) to which + this package will be added. + output_file_name : string + Filename for output file (default is 'mt3d_link.ftl') + unitnumber : int + File unit number (default is 24). + output_file_unit : int + Output file unit number, pertaining to the file identified + by output_file_name (default is 54). + output_file_header : string + Header for the output file (default is 'extended') + output_file_format : {'formatted', 'unformatted'} + Format of the output file (default is 'unformatted') + package_flows : ['sfr', 'lak', 'uzf'] + Specifies which of the advanced package flows should be added to the + flow-transport link (FTL) file. The addition of these flags may quickly + increase the FTL file size. Thus, the user must specifically request + their amendment within the FTL file. Default is not to add these + terms to the FTL file by omitting the keyword package_flows from + the LMT input file. One or multiple strings can be passed as a list to + the argument. + extension : string + Filename extension (default is 'lmt6') + unitnumber : int + File unit number (default is None). + filenames : str or list of str + Filenames to use for the package. If filenames=None the package name + will be created using the model name and package extension. If a + single string is passed the package will be set to the string. + Default is None. + + Attributes + ---------- + + Methods + ------- + + See Also + -------- + + Notes + ----- + Parameters are supported in Flopy only when reading in existing models. + Parameter values are converted to native values in Flopy and the + connection to "parameters" is thus nonexistent. + + Examples + -------- + + >>> import flopy + >>> m = flopy.modflow.Modflow() + >>> lmt = flopy.modflow.ModflowLmt(m, output_file_name='mt3d_linkage.ftl') + + """ + + def __init__(self, model, output_file_name='mt3d_link.ftl', + output_file_unit=54, output_file_header='extended', + output_file_format='unformatted', extension='lmt6', + package_flows=[], unitnumber=None, filenames=None): + + # set default unit number of one is not specified + if unitnumber is None: + unitnumber = ModflowLmt.defaultunit() + + # set filenames + if filenames is None: + filenames = [None] + elif isinstance(filenames, str): + filenames = [filenames] + + # Fill namefile items + name = [ModflowLmt.ftype()] + units = [unitnumber] + extra = [''] + + # set package name + fname = [filenames[0]] + + # Call ancestor's init to set self.parent, extension, name and unit number + Package.__init__(self, model, extension=extension, name=name, + unit_number=units, extra=extra, filenames=fname) + + self.heading = '# {} package for '.format(self.name[0]) + \ + ' {}, '.format(model.version_types[model.version]) + \ + 'generated by Flopy.' + self.url = 'lmt.htm' + self.output_file_name = output_file_name + self.output_file_unit = output_file_unit + self.output_file_header = output_file_header + self.output_file_format = output_file_format + self.package_flows = package_flows + self.parent.add_package(self) + return + + def write_file(self): + """ + Write the package file. + + Returns + ------- + None + + """ + f = open(self.fn_path, 'w') + f.write('{}\n'.format(self.heading)) + f.write('{:20s}\n'.format('OUTPUT_FILE_NAME ' + + self.output_file_name)) + f.write('{:20s} {:10d}\n'.format('OUTPUT_FILE_UNIT ', + self.output_file_unit)) + f.write('{:20s}\n'.format('OUTPUT_FILE_HEADER ' + + self.output_file_header)) + f.write('{:20s}\n'.format('OUTPUT_FILE_FORMAT ' + + self.output_file_format)) + if self.package_flows: # check that the list is not empty + # Generate a string to write + pckgs = '' + if 'sfr' in [x.lower() for x in self.package_flows]: + pckgs += 'SFR ' + if 'lak' in [x.lower() for x in self.package_flows]: + pckgs += 'LAK ' + if 'uzf' in [x.lower() for x in self.package_flows]: + pckgs += 'UZF ' + if 'all' in [x.lower() for x in self.package_flows]: + pckgs += 'ALL' + + line = 'PACKAGE_FLOWS ' + pckgs + f.write('%s\n' % (line)) + + f.close() + + @staticmethod + def load(f, model, ext_unit_dict=None): + """ + Load an existing package. + + Parameters + ---------- + f : filename or file handle + File to load. + model : model object + The model object (of type :class:`flopy.modflow.mf.Modflow`) to + which this package will be added. + ext_unit_dict : dictionary, optional + If the arrays in the file are specified using EXTERNAL, + or older style array control records, then `f` should be a file + handle. In this case ext_unit_dict is required, which can be + constructed using the function + :class:`flopy.utils.mfreadnam.parsenamefile`. + + Returns + ------- + lmt : ModflowLmt object + ModflowLmt object. + + Examples + -------- + + >>> import flopy + >>> m = flopy.modflow.Modflow() + >>> lmt = flopy.modflow.ModflowGhb.load('test.lmt', m) + + """ + + if model.verbose: + sys.stdout.write('loading lmt package file...\n') + + openfile = not hasattr(f, 'read') + if openfile: + filename = f + f = open(filename, 'r') + elif hasattr(f, 'name'): + filename = f.name + else: + filename = None + + # set default values + if filename: + prefix = os.path.splitext(os.path.basename(filename))[0] + output_file_name = prefix + '.ftl' + else: + output_file_name = model.name + '.ftl' + output_file_unit = 333 + output_file_header = 'standard' + output_file_format = 'unformatted' + package_flows = [] + + for line in f: + if line[0] == '#': + continue + t = line.strip().split() + if len(t) < 2: + continue + if t[0].lower() == 'output_file_name': + output_file_name = t[1] + elif t[0].lower() == 'output_file_unit': + output_file_unit = int(t[1]) + elif t[0].lower() == 'output_file_header': + output_file_header = t[1] + elif t[0].lower() == 'output_file_format': + output_file_format = t[1] + elif t[0].lower() == 'package_flows': + # Multiple entries can follow 'package_flows' + if len(t) > 1: + for i in range(1, len(t)): + package_flows.append(t[i]) + + if openfile: + f.close() + + # determine specified unit number + unitnumber = None + filenames = [None] + if ext_unit_dict is not None: + unitnumber, filenames[0] = \ + model.get_ext_dict_attr(ext_unit_dict, + filetype=ModflowLmt.ftype()) + + lmt = ModflowLmt(model, output_file_name=output_file_name, + output_file_unit=output_file_unit, + output_file_header=output_file_header, + output_file_format=output_file_format, + package_flows=package_flows, + unitnumber=unitnumber, + filenames=filenames) + return lmt + + @staticmethod + def ftype(): + return 'LMT6' + + @staticmethod + def defaultunit(): + return 30 diff --git a/flopy/modflow/mflpf.py b/flopy/modflow/mflpf.py index 5b512056f7..91cd3d3d01 100644 --- a/flopy/modflow/mflpf.py +++ b/flopy/modflow/mflpf.py @@ -1,631 +1,631 @@ -""" -mflpf module. Contains the ModflowLpf class. Note that the user can access -the ModflowLpf class as `flopy.modflow.ModflowLpf`. - -Additional information for this MODFLOW package can be found at the `Online -MODFLOW Guide -`_. - -""" - -import sys - -import numpy as np -from .mfpar import ModflowPar as mfpar - -from ..pakbase import Package -from ..utils import Util2d, Util3d, read1d -from ..utils.flopy_io import line_parse - - -class ModflowLpf(Package): - """ - MODFLOW Layer Property Flow Package Class. - - Parameters - ---------- - model : model object - The model object (of type :class:`flopy.modflow.mf.Modflow`) to which - this package will be added. - ipakcb : int - A flag that is used to determine if cell-by-cell budget data should be - saved. If ipakcb is non-zero cell-by-cell budget data will be saved. - (default is 0) - hdry : float - Is the head that is assigned to cells that are converted to dry during - a simulation. Although this value plays no role in the model - calculations, it is useful as an indicator when looking at the - resulting heads that are output from the model. HDRY is thus similar - to HNOFLO in the Basic Package, which is the value assigned to cells - that are no-flow cells at the start of a model simulation. - (default is -1.e30). - laytyp : int or array of ints (nlay) - Layer type, contains a flag for each layer that specifies the layer - type. - 0 confined - >0 convertible - <0 convertible unless the THICKSTRT option is in effect. - (default is 0). - layavg : int or array of ints (nlay) - Layer average - 0 is harmonic mean - 1 is logarithmic mean - 2 is arithmetic mean of saturated thickness and logarithmic mean of - of hydraulic conductivity - (default is 0). - chani : float or array of floats (nlay) - contains a value for each layer that is a flag or the horizontal - anisotropy. If CHANI is less than or equal to 0, then variable HANI - defines horizontal anisotropy. If CHANI is greater than 0, then CHANI - is the horizontal anisotropy for the entire layer, and HANI is not - read. If any HANI parameters are used, CHANI for all layers must be - less than or equal to 0. Use as many records as needed to enter a - value of CHANI for each layer. The horizontal anisotropy is the ratio - of the hydraulic conductivity along columns (the Y direction) to the - hydraulic conductivity along rows (the X direction). - (default is 1). - layvka : int or array of ints (nlay) - a flag for each layer that indicates whether variable VKA is vertical - hydraulic conductivity or the ratio of horizontal to vertical - hydraulic conductivity. - 0: VKA is vertical hydraulic conductivity - not 0: VKA is the ratio of horizontal to vertical hydraulic conductivity - (default is 0). - laywet : int or array of ints (nlay) - contains a flag for each layer that indicates if wetting is active. - 0 wetting is inactive - not 0 wetting is active - (default is 0). - wetfct : float - is a factor that is included in the calculation of the head that is - initially established at a cell when it is converted from dry to wet. - (default is 0.1). - iwetit : int - is the iteration interval for attempting to wet cells. Wetting is - attempted every IWETIT iteration. If using the PCG solver - (Hill, 1990), this applies to outer iterations, not inner iterations. - If IWETIT less than or equal to 0, it is changed to 1. - (default is 1). - ihdwet : int - is a flag that determines which equation is used to define the - initial head at cells that become wet. - (default is 0) - hk : float or array of floats (nlay, nrow, ncol) - is the hydraulic conductivity along rows. HK is multiplied by - horizontal anisotropy (see CHANI and HANI) to obtain hydraulic - conductivity along columns. - (default is 1.0). - hani : float or array of floats (nlay, nrow, ncol) - is the ratio of hydraulic conductivity along columns to hydraulic - conductivity along rows, where HK of item 10 specifies the hydraulic - conductivity along rows. Thus, the hydraulic conductivity along - columns is the product of the values in HK and HANI. - (default is 1.0). - vka : float or array of floats (nlay, nrow, ncol) - is either vertical hydraulic conductivity or the ratio of horizontal - to vertical hydraulic conductivity depending on the value of LAYVKA. - (default is 1.0). - ss : float or array of floats (nlay, nrow, ncol) - is specific storage unless the STORAGECOEFFICIENT option is used. - When STORAGECOEFFICIENT is used, Ss is confined storage coefficient. - (default is 1.e-5). - sy : float or array of floats (nlay, nrow, ncol) - is specific yield. - (default is 0.15). - vkcb : float or array of floats (nlay, nrow, ncol) - is the vertical hydraulic conductivity of a Quasi-three-dimensional - confining bed below a layer. (default is 0.0). Note that if an array - is passed for vkcb it must be of size (nlay, nrow, ncol) even though - the information for the bottom layer is not needed. - wetdry : float or array of floats (nlay, nrow, ncol) - is a combination of the wetting threshold and a flag to indicate - which neighboring cells can cause a cell to become wet. - (default is -0.01). - storagecoefficient : boolean - indicates that variable Ss and SS parameters are read as storage - coefficient rather than specific storage. - (default is False). - constantcv : boolean - indicates that vertical conductance for an unconfined cell is - computed from the cell thickness rather than the saturated thickness. - The CONSTANTCV option automatically invokes the NOCVCORRECTION - option. (default is False). - thickstrt : boolean - indicates that layers having a negative LAYTYP are confined, and their - cell thickness for conductance calculations will be computed as - STRT-BOT rather than TOP-BOT. (default is False). - nocvcorrection : boolean - indicates that vertical conductance is not corrected when the vertical - flow correction is applied. (default is False). - novfc : boolean - turns off the vertical flow correction under dewatered conditions. - This option turns off the vertical flow calculation described on p. - 5-8 of USGS Techniques and Methods Report 6-A16 and the vertical - conductance correction described on p. 5-18 of that report. - (default is False). - extension : string - Filename extension (default is 'lpf') - unitnumber : int - File unit number (default is None). - filenames : str or list of str - Filenames to use for the package and the output files. If - filenames=None the package name will be created using the model name - and package extension and the cbc output name will be created using - the model name and .cbc extension (for example, modflowtest.cbc), - if ipakcbc is a number greater than zero. If a single string is passed - the package will be set to the string and cbc output name will be - created using the model name and .cbc extension, if ipakcbc is a - number greater than zero. To define the names for all package files - (input and output) the length of the list of strings should be 2. - Default is None. - - - Attributes - ---------- - - Methods - ------- - - See Also - -------- - - Notes - ----- - - Examples - -------- - - >>> import flopy - >>> m = flopy.modflow.Modflow() - >>> lpf = flopy.modflow.ModflowLpf(m) - - """ - - 'Layer-property flow package class\n' - - def __init__(self, model, laytyp=0, layavg=0, chani=1.0, layvka=0, - laywet=0, ipakcb=None, hdry=-1E+30, iwdflg=0, wetfct=0.1, - iwetit=1, ihdwet=0, hk=1.0, hani=1.0, vka=1.0, ss=1e-5, - sy=0.15, vkcb=0.0, wetdry=-0.01, storagecoefficient=False, - constantcv=False, thickstrt=False, nocvcorrection=False, - novfc=False, extension='lpf', - unitnumber=None, filenames=None): - - # set default unit number of one is not specified - if unitnumber is None: - unitnumber = ModflowLpf.defaultunit() - - # set filenames - if filenames is None: - filenames = [None, None] - elif isinstance(filenames, str): - filenames = [filenames, None] - elif isinstance(filenames, list): - if len(filenames) < 2: - filenames.append(None) - - # update external file information with cbc output, if necessary - if ipakcb is not None: - fname = filenames[1] - model.add_output_file(ipakcb, fname=fname, - package=ModflowLpf.ftype()) - else: - ipakcb = 0 - - # Fill namefile items - name = [ModflowLpf.ftype()] - units = [unitnumber] - extra = [''] - - # set package name - fname = [filenames[0]] - - # Call ancestor's init to set self.parent, extension, name and unit number - Package.__init__(self, model, extension=extension, name=name, - unit_number=units, extra=extra, filenames=fname) - - self.heading = '# {} package for '.format(self.name[0]) + \ - ' {}, '.format(model.version_types[model.version]) + \ - 'generated by Flopy.' - self.url = 'lpf.htm' - nrow, ncol, nlay, nper = self.parent.nrow_ncol_nlay_nper - - # item 1 - self.ipakcb = ipakcb - self.hdry = hdry # Head in cells that are converted to dry during a simulation - self.nplpf = 0 # number of LPF parameters - self.laytyp = Util2d(model, (nlay,), np.int32, laytyp, name='laytyp') - self.layavg = Util2d(model, (nlay,), np.int32, layavg, name='layavg') - self.chani = Util2d(model, (nlay,), np.float32, chani, name='chani') - self.layvka = Util2d(model, (nlay,), np.int32, layvka, name='layvka') - self.laywet = Util2d(model, (nlay,), np.int32, laywet, name='laywet') - # Factor that is included in the calculation of the head when a cell is - # converted from dry to wet - self.wetfct = wetfct - # Iteration interval for attempting to wet cells - self.iwetit = iwetit - # Flag that determines which equation is used to define the initial - # head at cells that become wet - self.ihdwet = ihdwet - self.options = ' ' - if storagecoefficient: - self.options = self.options + 'STORAGECOEFFICIENT ' - if constantcv: self.options = self.options + 'CONSTANTCV ' - if thickstrt: self.options = self.options + 'THICKSTRT ' - if nocvcorrection: self.options = self.options + 'NOCVCORRECTION ' - if novfc: self.options = self.options + 'NOVFC ' - self.hk = Util3d(model, (nlay, nrow, ncol), np.float32, hk, name='hk', - locat=self.unit_number[0]) - self.hani = Util3d(model, (nlay, nrow, ncol), np.float32, hani, - name='hani', locat=self.unit_number[0]) - keys = [] - for k in range(nlay): - key = 'vka' - if self.layvka[k] != 0: - key = 'vani' - keys.append(key) - self.vka = Util3d(model, (nlay, nrow, ncol), np.float32, vka, - name=keys, locat=self.unit_number[0]) - tag = 'ss' - if storagecoefficient: - tag = 'storage' - self.ss = Util3d(model, (nlay, nrow, ncol), np.float32, ss, name=tag, - locat=self.unit_number[0]) - self.sy = Util3d(model, (nlay, nrow, ncol), np.float32, sy, name='sy', - locat=self.unit_number[0]) - self.vkcb = Util3d(model, (nlay, nrow, ncol), np.float32, vkcb, - name='vkcb', locat=self.unit_number[0]) - self.wetdry = Util3d(model, (nlay, nrow, ncol), np.float32, wetdry, - name='wetdry', locat=self.unit_number[0]) - self.parent.add_package(self) - return - - def write_file(self, check=True, f=None): - """ - Write the package file. - - Parameters - ---------- - check : boolean - Check package data for common errors. (default True) - - Returns - ------- - None - - """ - # allows turning off package checks when writing files at model level - if check: - self.check(f='{}.chk'.format(self.name[0]), - verbose=self.parent.verbose, level=1) - - # get model information - nrow, ncol, nlay, nper = self.parent.nrow_ncol_nlay_nper - dis = self.parent.get_package('DIS') - if dis is None: - dis = self.parent.get_package('DISU') - - # Open file for writing - if f is None: - f = open(self.fn_path, 'w') - - # Item 0: text - f.write('{}\n'.format(self.heading)) - - # Item 1: IBCFCB, HDRY, NPLPF - f.write('{0:10d}{1:10.6G}{2:10d} {3:s}\n'.format(self.ipakcb, - self.hdry, - self.nplpf, - self.options)) - # LAYTYP array - f.write(self.laytyp.string) - # LAYAVG array - f.write(self.layavg.string) - # CHANI array - f.write(self.chani.string) - # LAYVKA array - f.write(self.layvka.string) - # LAYWET array - f.write(self.laywet.string) - # Item 7: WETFCT, IWETIT, IHDWET - iwetdry = self.laywet.sum() - if iwetdry > 0: - f.write('{0:10f}{1:10d}{2:10d}\n'.format(self.wetfct, - self.iwetit, - self.ihdwet)) - transient = not dis.steady.all() - for k in range(nlay): - f.write(self.hk[k].get_file_entry()) - if self.chani[k] <= 0.: - f.write(self.hani[k].get_file_entry()) - f.write(self.vka[k].get_file_entry()) - if transient == True: - f.write(self.ss[k].get_file_entry()) - if self.laytyp[k] != 0: - f.write(self.sy[k].get_file_entry()) - if dis.laycbd[k] > 0: - f.write(self.vkcb[k].get_file_entry()) - if (self.laywet[k] != 0 and self.laytyp[k] != 0): - f.write(self.wetdry[k].get_file_entry()) - f.close() - return - - @staticmethod - def load(f, model, ext_unit_dict=None, check=True): - """ - Load an existing package. - - Parameters - ---------- - f : filename or file handle - File to load. - model : model object - The model object (of type :class:`flopy.modflow.mf.Modflow`) to - which this package will be added. - ext_unit_dict : dictionary, optional - If the arrays in the file are specified using EXTERNAL, - or older style array control records, then `f` should be a file - handle. In this case ext_unit_dict is required, which can be - constructed using the function - :class:`flopy.utils.mfreadnam.parsenamefile`. - check : boolean - Check package data for common errors. (default True) - - Returns - ------- - lpf : ModflowLpf object - ModflowLpf object. - - Examples - -------- - - >>> import flopy - >>> m = flopy.modflow.Modflow() - >>> lpf = flopy.modflow.ModflowLpf.load('test.lpf', m) - - """ - - if model.verbose: - sys.stdout.write('loading lpf package file...\n') - - openfile = not hasattr(f, 'read') - if openfile: - filename = f - f = open(filename, 'r') - - # dataset 0 -- header - while True: - line = f.readline() - if line[0] != '#': - break - - # determine problem dimensions - nr, nc, nlay, nper = model.get_nrow_ncol_nlay_nper() - dis = model.get_package('DIS') - if dis is None: - dis = model.get_package('DISU') - - # Item 1: IBCFCB, HDRY, NPLPF - line already read above - if model.verbose: - print(' loading IBCFCB, HDRY, NPLPF...') - t = line_parse(line) - ipakcb, hdry, nplpf = int(t[0]), float(t[1]), int(t[2]) - # if ipakcb != 0: - # model.add_pop_key_list(ipakcb) - # ipakcb = 53 - # options - storagecoefficient = False - constantcv = False - thickstrt = False - nocvcorrection = False - novfc = False - if len(t) > 3: - for k in range(3, len(t)): - if 'STORAGECOEFFICIENT' in t[k].upper(): - storagecoefficient = True - elif 'CONSTANTCV' in t[k].upper(): - constantcv = True - elif 'THICKSTRT' in t[k].upper(): - thickstrt = True - elif 'NOCVCORRECTION' in t[k].upper(): - nocvcorrection = True - elif 'NOVFC' in t[k].upper(): - novfc = True - - # LAYTYP array - if model.verbose: - print(' loading LAYTYP...') - laytyp = np.empty((nlay), dtype=np.int32) - laytyp = read1d(f, laytyp) - - # LAYAVG array - if model.verbose: - print(' loading LAYAVG...') - layavg = np.empty((nlay), dtype=np.int32) - layavg = read1d(f, layavg) - - # CHANI array - if model.verbose: - print(' loading CHANI...') - chani = np.empty((nlay), dtype=np.float32) - chani = read1d(f, chani) - - # LAYVKA array - if model.verbose: - print(' loading LAYVKA...') - layvka = np.empty((nlay,), dtype=np.int32) - layvka = read1d(f, layvka) - - # LAYWET array - if model.verbose: - print(' loading LAYWET...') - laywet = np.empty((nlay), dtype=np.int32) - laywet = read1d(f, laywet) - - # Item 7: WETFCT, IWETIT, IHDWET - wetfct, iwetit, ihdwet = None, None, None - iwetdry = laywet.sum() - if iwetdry > 0: - if model.verbose: - print(' loading WETFCT, IWETIT, IHDWET...') - line = f.readline() - t = line.strip().split() - wetfct, iwetit, ihdwet = float(t[0]), int(t[1]), int(t[2]) - - # parameters data - par_types = [] - if nplpf > 0: - par_types, parm_dict = mfpar.load(f, nplpf, model.verbose) - # print parm_dict - - # non-parameter data - transient = not dis.steady.all() - hk = [0] * nlay - hani = [0] * nlay - vka = [0] * nlay - ss = [0] * nlay - sy = [0] * nlay - vkcb = [0] * nlay - wetdry = [0] * nlay - - # load by layer - for k in range(nlay): - - # allow for unstructured changing nodes per layer - if nr is None: - nrow = 1 - ncol = nc[k] - else: - nrow = nr - ncol = nc - - # hk - if model.verbose: - print(' loading hk layer {0:3d}...'.format(k + 1)) - if 'hk' not in par_types: - t = Util2d.load(f, model, (nrow, ncol), np.float32, 'hk', - ext_unit_dict) - else: - line = f.readline() - t = mfpar.parameter_fill(model, (nrow, ncol), 'hk', parm_dict, - findlayer=k) - hk[k] = t - - # hani - if chani[k] <= 0.: - if model.verbose: - print(' loading hani layer {0:3d}...'.format(k + 1)) - if 'hani' not in par_types: - t = Util2d.load(f, model, (nrow, ncol), np.float32, 'hani', - ext_unit_dict) - else: - line = f.readline() - t = mfpar.parameter_fill(model, (nrow, ncol), 'hani', - parm_dict, findlayer=k) - hani[k] = t - - # vka - if model.verbose: - print(' loading vka layer {0:3d}...'.format(k + 1)) - key = 'vk' - if layvka[k] != 0: - key = 'vani' - if 'vk' not in par_types and 'vani' not in par_types: - t = Util2d.load(f, model, (nrow, ncol), np.float32, key, - ext_unit_dict) - else: - line = f.readline() - key = 'vk' - if 'vani' in par_types: - key = 'vani' - t = mfpar.parameter_fill(model, (nrow, ncol), key, parm_dict, - findlayer=k) - vka[k] = t - - # storage properties - if transient: - - # ss - if model.verbose: - print(' loading ss layer {0:3d}...'.format(k + 1)) - if 'ss' not in par_types: - t = Util2d.load(f, model, (nrow, ncol), np.float32, 'ss', - ext_unit_dict) - else: - line = f.readline() - t = mfpar.parameter_fill(model, (nrow, ncol), 'ss', - parm_dict, findlayer=k) - ss[k] = t - - # sy - if laytyp[k] != 0: - if model.verbose: - print(' loading sy layer {0:3d}...'.format(k + 1)) - if 'sy' not in par_types: - t = Util2d.load(f, model, (nrow, ncol), np.float32, - 'sy', - ext_unit_dict) - else: - line = f.readline() - t = mfpar.parameter_fill(model, (nrow, ncol), 'sy', - parm_dict, findlayer=k) - sy[k] = t - - # vkcb - if dis.laycbd[k] > 0: - if model.verbose: - print(' loading vkcb layer {0:3d}...'.format(k + 1)) - if 'vkcb' not in par_types: - t = Util2d.load(f, model, (nrow, ncol), np.float32, 'vkcb', - ext_unit_dict) - else: - line = f.readline() - t = mfpar.parameter_fill(model, (nrow, ncol), 'vkcb', - parm_dict, findlayer=k) - vkcb[k] = t - - # wetdry - if (laywet[k] != 0 and laytyp[k] != 0): - if model.verbose: - print(' loading wetdry layer {0:3d}...'.format(k + 1)) - t = Util2d.load(f, model, (nrow, ncol), np.float32, 'wetdry', - ext_unit_dict) - wetdry[k] = t - - if openfile: - f.close() - - # set package unit number - unitnumber = None - filenames = [None, None] - if ext_unit_dict is not None: - unitnumber, filenames[0] = \ - model.get_ext_dict_attr(ext_unit_dict, - filetype=ModflowLpf.ftype()) - if ipakcb > 0: - iu, filenames[1] = \ - model.get_ext_dict_attr(ext_unit_dict, unit=ipakcb) - model.add_pop_key_list(ipakcb) - - # create instance of lpf class - lpf = ModflowLpf(model, ipakcb=ipakcb, laytyp=laytyp, layavg=layavg, - chani=chani, layvka=layvka, laywet=laywet, hdry=hdry, - iwdflg=iwetdry, wetfct=wetfct, iwetit=iwetit, - ihdwet=ihdwet, hk=hk, hani=hani, vka=vka, ss=ss, - sy=sy, vkcb=vkcb, wetdry=wetdry, - storagecoefficient=storagecoefficient, - constantcv=constantcv, thickstrt=thickstrt, - novfc=novfc, - unitnumber=unitnumber, filenames=filenames) - if check: - lpf.check(f='{}.chk'.format(lpf.name[0]), - verbose=lpf.parent.verbose, level=0) - return lpf - - @staticmethod - def ftype(): - return 'LPF' - - @staticmethod - def defaultunit(): - return 15 +""" +mflpf module. Contains the ModflowLpf class. Note that the user can access +the ModflowLpf class as `flopy.modflow.ModflowLpf`. + +Additional information for this MODFLOW package can be found at the `Online +MODFLOW Guide +`_. + +""" + +import sys + +import numpy as np +from .mfpar import ModflowPar as mfpar + +from ..pakbase import Package +from ..utils import Util2d, Util3d, read1d +from ..utils.flopy_io import line_parse + + +class ModflowLpf(Package): + """ + MODFLOW Layer Property Flow Package Class. + + Parameters + ---------- + model : model object + The model object (of type :class:`flopy.modflow.mf.Modflow`) to which + this package will be added. + ipakcb : int + A flag that is used to determine if cell-by-cell budget data should be + saved. If ipakcb is non-zero cell-by-cell budget data will be saved. + (default is 0) + hdry : float + Is the head that is assigned to cells that are converted to dry during + a simulation. Although this value plays no role in the model + calculations, it is useful as an indicator when looking at the + resulting heads that are output from the model. HDRY is thus similar + to HNOFLO in the Basic Package, which is the value assigned to cells + that are no-flow cells at the start of a model simulation. + (default is -1.e30). + laytyp : int or array of ints (nlay) + Layer type, contains a flag for each layer that specifies the layer + type. + 0 confined + >0 convertible + <0 convertible unless the THICKSTRT option is in effect. + (default is 0). + layavg : int or array of ints (nlay) + Layer average + 0 is harmonic mean + 1 is logarithmic mean + 2 is arithmetic mean of saturated thickness and logarithmic mean of + of hydraulic conductivity + (default is 0). + chani : float or array of floats (nlay) + contains a value for each layer that is a flag or the horizontal + anisotropy. If CHANI is less than or equal to 0, then variable HANI + defines horizontal anisotropy. If CHANI is greater than 0, then CHANI + is the horizontal anisotropy for the entire layer, and HANI is not + read. If any HANI parameters are used, CHANI for all layers must be + less than or equal to 0. Use as many records as needed to enter a + value of CHANI for each layer. The horizontal anisotropy is the ratio + of the hydraulic conductivity along columns (the Y direction) to the + hydraulic conductivity along rows (the X direction). + (default is 1). + layvka : int or array of ints (nlay) + a flag for each layer that indicates whether variable VKA is vertical + hydraulic conductivity or the ratio of horizontal to vertical + hydraulic conductivity. + 0: VKA is vertical hydraulic conductivity + not 0: VKA is the ratio of horizontal to vertical hydraulic conductivity + (default is 0). + laywet : int or array of ints (nlay) + contains a flag for each layer that indicates if wetting is active. + 0 wetting is inactive + not 0 wetting is active + (default is 0). + wetfct : float + is a factor that is included in the calculation of the head that is + initially established at a cell when it is converted from dry to wet. + (default is 0.1). + iwetit : int + is the iteration interval for attempting to wet cells. Wetting is + attempted every IWETIT iteration. If using the PCG solver + (Hill, 1990), this applies to outer iterations, not inner iterations. + If IWETIT less than or equal to 0, it is changed to 1. + (default is 1). + ihdwet : int + is a flag that determines which equation is used to define the + initial head at cells that become wet. + (default is 0) + hk : float or array of floats (nlay, nrow, ncol) + is the hydraulic conductivity along rows. HK is multiplied by + horizontal anisotropy (see CHANI and HANI) to obtain hydraulic + conductivity along columns. + (default is 1.0). + hani : float or array of floats (nlay, nrow, ncol) + is the ratio of hydraulic conductivity along columns to hydraulic + conductivity along rows, where HK of item 10 specifies the hydraulic + conductivity along rows. Thus, the hydraulic conductivity along + columns is the product of the values in HK and HANI. + (default is 1.0). + vka : float or array of floats (nlay, nrow, ncol) + is either vertical hydraulic conductivity or the ratio of horizontal + to vertical hydraulic conductivity depending on the value of LAYVKA. + (default is 1.0). + ss : float or array of floats (nlay, nrow, ncol) + is specific storage unless the STORAGECOEFFICIENT option is used. + When STORAGECOEFFICIENT is used, Ss is confined storage coefficient. + (default is 1.e-5). + sy : float or array of floats (nlay, nrow, ncol) + is specific yield. + (default is 0.15). + vkcb : float or array of floats (nlay, nrow, ncol) + is the vertical hydraulic conductivity of a Quasi-three-dimensional + confining bed below a layer. (default is 0.0). Note that if an array + is passed for vkcb it must be of size (nlay, nrow, ncol) even though + the information for the bottom layer is not needed. + wetdry : float or array of floats (nlay, nrow, ncol) + is a combination of the wetting threshold and a flag to indicate + which neighboring cells can cause a cell to become wet. + (default is -0.01). + storagecoefficient : boolean + indicates that variable Ss and SS parameters are read as storage + coefficient rather than specific storage. + (default is False). + constantcv : boolean + indicates that vertical conductance for an unconfined cell is + computed from the cell thickness rather than the saturated thickness. + The CONSTANTCV option automatically invokes the NOCVCORRECTION + option. (default is False). + thickstrt : boolean + indicates that layers having a negative LAYTYP are confined, and their + cell thickness for conductance calculations will be computed as + STRT-BOT rather than TOP-BOT. (default is False). + nocvcorrection : boolean + indicates that vertical conductance is not corrected when the vertical + flow correction is applied. (default is False). + novfc : boolean + turns off the vertical flow correction under dewatered conditions. + This option turns off the vertical flow calculation described on p. + 5-8 of USGS Techniques and Methods Report 6-A16 and the vertical + conductance correction described on p. 5-18 of that report. + (default is False). + extension : string + Filename extension (default is 'lpf') + unitnumber : int + File unit number (default is None). + filenames : str or list of str + Filenames to use for the package and the output files. If + filenames=None the package name will be created using the model name + and package extension and the cbc output name will be created using + the model name and .cbc extension (for example, modflowtest.cbc), + if ipakcbc is a number greater than zero. If a single string is passed + the package will be set to the string and cbc output name will be + created using the model name and .cbc extension, if ipakcbc is a + number greater than zero. To define the names for all package files + (input and output) the length of the list of strings should be 2. + Default is None. + + + Attributes + ---------- + + Methods + ------- + + See Also + -------- + + Notes + ----- + + Examples + -------- + + >>> import flopy + >>> m = flopy.modflow.Modflow() + >>> lpf = flopy.modflow.ModflowLpf(m) + + """ + + 'Layer-property flow package class\n' + + def __init__(self, model, laytyp=0, layavg=0, chani=1.0, layvka=0, + laywet=0, ipakcb=None, hdry=-1E+30, iwdflg=0, wetfct=0.1, + iwetit=1, ihdwet=0, hk=1.0, hani=1.0, vka=1.0, ss=1e-5, + sy=0.15, vkcb=0.0, wetdry=-0.01, storagecoefficient=False, + constantcv=False, thickstrt=False, nocvcorrection=False, + novfc=False, extension='lpf', + unitnumber=None, filenames=None): + + # set default unit number of one is not specified + if unitnumber is None: + unitnumber = ModflowLpf.defaultunit() + + # set filenames + if filenames is None: + filenames = [None, None] + elif isinstance(filenames, str): + filenames = [filenames, None] + elif isinstance(filenames, list): + if len(filenames) < 2: + filenames.append(None) + + # update external file information with cbc output, if necessary + if ipakcb is not None: + fname = filenames[1] + model.add_output_file(ipakcb, fname=fname, + package=ModflowLpf.ftype()) + else: + ipakcb = 0 + + # Fill namefile items + name = [ModflowLpf.ftype()] + units = [unitnumber] + extra = [''] + + # set package name + fname = [filenames[0]] + + # Call ancestor's init to set self.parent, extension, name and unit number + Package.__init__(self, model, extension=extension, name=name, + unit_number=units, extra=extra, filenames=fname) + + self.heading = '# {} package for '.format(self.name[0]) + \ + ' {}, '.format(model.version_types[model.version]) + \ + 'generated by Flopy.' + self.url = 'lpf.htm' + nrow, ncol, nlay, nper = self.parent.nrow_ncol_nlay_nper + + # item 1 + self.ipakcb = ipakcb + self.hdry = hdry # Head in cells that are converted to dry during a simulation + self.nplpf = 0 # number of LPF parameters + self.laytyp = Util2d(model, (nlay,), np.int32, laytyp, name='laytyp') + self.layavg = Util2d(model, (nlay,), np.int32, layavg, name='layavg') + self.chani = Util2d(model, (nlay,), np.float32, chani, name='chani') + self.layvka = Util2d(model, (nlay,), np.int32, layvka, name='layvka') + self.laywet = Util2d(model, (nlay,), np.int32, laywet, name='laywet') + # Factor that is included in the calculation of the head when a cell is + # converted from dry to wet + self.wetfct = wetfct + # Iteration interval for attempting to wet cells + self.iwetit = iwetit + # Flag that determines which equation is used to define the initial + # head at cells that become wet + self.ihdwet = ihdwet + self.options = ' ' + if storagecoefficient: + self.options = self.options + 'STORAGECOEFFICIENT ' + if constantcv: self.options = self.options + 'CONSTANTCV ' + if thickstrt: self.options = self.options + 'THICKSTRT ' + if nocvcorrection: self.options = self.options + 'NOCVCORRECTION ' + if novfc: self.options = self.options + 'NOVFC ' + self.hk = Util3d(model, (nlay, nrow, ncol), np.float32, hk, name='hk', + locat=self.unit_number[0]) + self.hani = Util3d(model, (nlay, nrow, ncol), np.float32, hani, + name='hani', locat=self.unit_number[0]) + keys = [] + for k in range(nlay): + key = 'vka' + if self.layvka[k] != 0: + key = 'vani' + keys.append(key) + self.vka = Util3d(model, (nlay, nrow, ncol), np.float32, vka, + name=keys, locat=self.unit_number[0]) + tag = 'ss' + if storagecoefficient: + tag = 'storage' + self.ss = Util3d(model, (nlay, nrow, ncol), np.float32, ss, name=tag, + locat=self.unit_number[0]) + self.sy = Util3d(model, (nlay, nrow, ncol), np.float32, sy, name='sy', + locat=self.unit_number[0]) + self.vkcb = Util3d(model, (nlay, nrow, ncol), np.float32, vkcb, + name='vkcb', locat=self.unit_number[0]) + self.wetdry = Util3d(model, (nlay, nrow, ncol), np.float32, wetdry, + name='wetdry', locat=self.unit_number[0]) + self.parent.add_package(self) + return + + def write_file(self, check=True, f=None): + """ + Write the package file. + + Parameters + ---------- + check : boolean + Check package data for common errors. (default True) + + Returns + ------- + None + + """ + # allows turning off package checks when writing files at model level + if check: + self.check(f='{}.chk'.format(self.name[0]), + verbose=self.parent.verbose, level=1) + + # get model information + nrow, ncol, nlay, nper = self.parent.nrow_ncol_nlay_nper + dis = self.parent.get_package('DIS') + if dis is None: + dis = self.parent.get_package('DISU') + + # Open file for writing + if f is None: + f = open(self.fn_path, 'w') + + # Item 0: text + f.write('{}\n'.format(self.heading)) + + # Item 1: IBCFCB, HDRY, NPLPF + f.write('{0:10d}{1:10.6G}{2:10d} {3:s}\n'.format(self.ipakcb, + self.hdry, + self.nplpf, + self.options)) + # LAYTYP array + f.write(self.laytyp.string) + # LAYAVG array + f.write(self.layavg.string) + # CHANI array + f.write(self.chani.string) + # LAYVKA array + f.write(self.layvka.string) + # LAYWET array + f.write(self.laywet.string) + # Item 7: WETFCT, IWETIT, IHDWET + iwetdry = self.laywet.sum() + if iwetdry > 0: + f.write('{0:10f}{1:10d}{2:10d}\n'.format(self.wetfct, + self.iwetit, + self.ihdwet)) + transient = not dis.steady.all() + for k in range(nlay): + f.write(self.hk[k].get_file_entry()) + if self.chani[k] <= 0.: + f.write(self.hani[k].get_file_entry()) + f.write(self.vka[k].get_file_entry()) + if transient == True: + f.write(self.ss[k].get_file_entry()) + if self.laytyp[k] != 0: + f.write(self.sy[k].get_file_entry()) + if dis.laycbd[k] > 0: + f.write(self.vkcb[k].get_file_entry()) + if (self.laywet[k] != 0 and self.laytyp[k] != 0): + f.write(self.wetdry[k].get_file_entry()) + f.close() + return + + @staticmethod + def load(f, model, ext_unit_dict=None, check=True): + """ + Load an existing package. + + Parameters + ---------- + f : filename or file handle + File to load. + model : model object + The model object (of type :class:`flopy.modflow.mf.Modflow`) to + which this package will be added. + ext_unit_dict : dictionary, optional + If the arrays in the file are specified using EXTERNAL, + or older style array control records, then `f` should be a file + handle. In this case ext_unit_dict is required, which can be + constructed using the function + :class:`flopy.utils.mfreadnam.parsenamefile`. + check : boolean + Check package data for common errors. (default True) + + Returns + ------- + lpf : ModflowLpf object + ModflowLpf object. + + Examples + -------- + + >>> import flopy + >>> m = flopy.modflow.Modflow() + >>> lpf = flopy.modflow.ModflowLpf.load('test.lpf', m) + + """ + + if model.verbose: + sys.stdout.write('loading lpf package file...\n') + + openfile = not hasattr(f, 'read') + if openfile: + filename = f + f = open(filename, 'r') + + # dataset 0 -- header + while True: + line = f.readline() + if line[0] != '#': + break + + # determine problem dimensions + nr, nc, nlay, nper = model.get_nrow_ncol_nlay_nper() + dis = model.get_package('DIS') + if dis is None: + dis = model.get_package('DISU') + + # Item 1: IBCFCB, HDRY, NPLPF - line already read above + if model.verbose: + print(' loading IBCFCB, HDRY, NPLPF...') + t = line_parse(line) + ipakcb, hdry, nplpf = int(t[0]), float(t[1]), int(t[2]) + # if ipakcb != 0: + # model.add_pop_key_list(ipakcb) + # ipakcb = 53 + # options + storagecoefficient = False + constantcv = False + thickstrt = False + nocvcorrection = False + novfc = False + if len(t) > 3: + for k in range(3, len(t)): + if 'STORAGECOEFFICIENT' in t[k].upper(): + storagecoefficient = True + elif 'CONSTANTCV' in t[k].upper(): + constantcv = True + elif 'THICKSTRT' in t[k].upper(): + thickstrt = True + elif 'NOCVCORRECTION' in t[k].upper(): + nocvcorrection = True + elif 'NOVFC' in t[k].upper(): + novfc = True + + # LAYTYP array + if model.verbose: + print(' loading LAYTYP...') + laytyp = np.empty((nlay), dtype=np.int32) + laytyp = read1d(f, laytyp) + + # LAYAVG array + if model.verbose: + print(' loading LAYAVG...') + layavg = np.empty((nlay), dtype=np.int32) + layavg = read1d(f, layavg) + + # CHANI array + if model.verbose: + print(' loading CHANI...') + chani = np.empty((nlay), dtype=np.float32) + chani = read1d(f, chani) + + # LAYVKA array + if model.verbose: + print(' loading LAYVKA...') + layvka = np.empty((nlay,), dtype=np.int32) + layvka = read1d(f, layvka) + + # LAYWET array + if model.verbose: + print(' loading LAYWET...') + laywet = np.empty((nlay), dtype=np.int32) + laywet = read1d(f, laywet) + + # Item 7: WETFCT, IWETIT, IHDWET + wetfct, iwetit, ihdwet = None, None, None + iwetdry = laywet.sum() + if iwetdry > 0: + if model.verbose: + print(' loading WETFCT, IWETIT, IHDWET...') + line = f.readline() + t = line.strip().split() + wetfct, iwetit, ihdwet = float(t[0]), int(t[1]), int(t[2]) + + # parameters data + par_types = [] + if nplpf > 0: + par_types, parm_dict = mfpar.load(f, nplpf, model.verbose) + # print parm_dict + + # non-parameter data + transient = not dis.steady.all() + hk = [0] * nlay + hani = [0] * nlay + vka = [0] * nlay + ss = [0] * nlay + sy = [0] * nlay + vkcb = [0] * nlay + wetdry = [0] * nlay + + # load by layer + for k in range(nlay): + + # allow for unstructured changing nodes per layer + if nr is None: + nrow = 1 + ncol = nc[k] + else: + nrow = nr + ncol = nc + + # hk + if model.verbose: + print(' loading hk layer {0:3d}...'.format(k + 1)) + if 'hk' not in par_types: + t = Util2d.load(f, model, (nrow, ncol), np.float32, 'hk', + ext_unit_dict) + else: + line = f.readline() + t = mfpar.parameter_fill(model, (nrow, ncol), 'hk', parm_dict, + findlayer=k) + hk[k] = t + + # hani + if chani[k] <= 0.: + if model.verbose: + print(' loading hani layer {0:3d}...'.format(k + 1)) + if 'hani' not in par_types: + t = Util2d.load(f, model, (nrow, ncol), np.float32, 'hani', + ext_unit_dict) + else: + line = f.readline() + t = mfpar.parameter_fill(model, (nrow, ncol), 'hani', + parm_dict, findlayer=k) + hani[k] = t + + # vka + if model.verbose: + print(' loading vka layer {0:3d}...'.format(k + 1)) + key = 'vk' + if layvka[k] != 0: + key = 'vani' + if 'vk' not in par_types and 'vani' not in par_types: + t = Util2d.load(f, model, (nrow, ncol), np.float32, key, + ext_unit_dict) + else: + line = f.readline() + key = 'vk' + if 'vani' in par_types: + key = 'vani' + t = mfpar.parameter_fill(model, (nrow, ncol), key, parm_dict, + findlayer=k) + vka[k] = t + + # storage properties + if transient: + + # ss + if model.verbose: + print(' loading ss layer {0:3d}...'.format(k + 1)) + if 'ss' not in par_types: + t = Util2d.load(f, model, (nrow, ncol), np.float32, 'ss', + ext_unit_dict) + else: + line = f.readline() + t = mfpar.parameter_fill(model, (nrow, ncol), 'ss', + parm_dict, findlayer=k) + ss[k] = t + + # sy + if laytyp[k] != 0: + if model.verbose: + print(' loading sy layer {0:3d}...'.format(k + 1)) + if 'sy' not in par_types: + t = Util2d.load(f, model, (nrow, ncol), np.float32, + 'sy', + ext_unit_dict) + else: + line = f.readline() + t = mfpar.parameter_fill(model, (nrow, ncol), 'sy', + parm_dict, findlayer=k) + sy[k] = t + + # vkcb + if dis.laycbd[k] > 0: + if model.verbose: + print(' loading vkcb layer {0:3d}...'.format(k + 1)) + if 'vkcb' not in par_types: + t = Util2d.load(f, model, (nrow, ncol), np.float32, 'vkcb', + ext_unit_dict) + else: + line = f.readline() + t = mfpar.parameter_fill(model, (nrow, ncol), 'vkcb', + parm_dict, findlayer=k) + vkcb[k] = t + + # wetdry + if (laywet[k] != 0 and laytyp[k] != 0): + if model.verbose: + print(' loading wetdry layer {0:3d}...'.format(k + 1)) + t = Util2d.load(f, model, (nrow, ncol), np.float32, 'wetdry', + ext_unit_dict) + wetdry[k] = t + + if openfile: + f.close() + + # set package unit number + unitnumber = None + filenames = [None, None] + if ext_unit_dict is not None: + unitnumber, filenames[0] = \ + model.get_ext_dict_attr(ext_unit_dict, + filetype=ModflowLpf.ftype()) + if ipakcb > 0: + iu, filenames[1] = \ + model.get_ext_dict_attr(ext_unit_dict, unit=ipakcb) + model.add_pop_key_list(ipakcb) + + # create instance of lpf class + lpf = ModflowLpf(model, ipakcb=ipakcb, laytyp=laytyp, layavg=layavg, + chani=chani, layvka=layvka, laywet=laywet, hdry=hdry, + iwdflg=iwetdry, wetfct=wetfct, iwetit=iwetit, + ihdwet=ihdwet, hk=hk, hani=hani, vka=vka, ss=ss, + sy=sy, vkcb=vkcb, wetdry=wetdry, + storagecoefficient=storagecoefficient, + constantcv=constantcv, thickstrt=thickstrt, + novfc=novfc, + unitnumber=unitnumber, filenames=filenames) + if check: + lpf.check(f='{}.chk'.format(lpf.name[0]), + verbose=lpf.parent.verbose, level=0) + return lpf + + @staticmethod + def ftype(): + return 'LPF' + + @staticmethod + def defaultunit(): + return 15 diff --git a/flopy/modflow/mfmlt.py b/flopy/modflow/mfmlt.py index bad8cabdbd..ace7e1059b 100644 --- a/flopy/modflow/mfmlt.py +++ b/flopy/modflow/mfmlt.py @@ -1,268 +1,268 @@ -""" -mfmlt module. Contains the ModflowMlt class. Note that the user can access -the ModflowMlt class as `flopy.modflow.ModflowMlt`. - -Additional information for this MODFLOW package can be found at the `Online -MODFLOW Guide -`_. - -""" -import collections -import sys - -import numpy as np - -from ..pakbase import Package -from ..utils import Util2d - - -class ModflowMlt(Package): - """ - MODFLOW Mult Package Class. - - Parameters - ---------- - model : model object - The model object (of type :class:`flopy.modflow.mf.Modflow`) to which - this package will be added. - mult_dict : dict - Dictionary with mult data for the model. mult_dict is typically - instantiated using load method. - extension : string - Filename extension (default is 'drn') - unitnumber : int - File unit number (default is 21). - - - Attributes - ---------- - - Methods - ------- - - See Also - -------- - - Notes - ----- - Parameters are supported in Flopy only when reading in existing models. - Parameter values are converted to native values in Flopy and the - connection to "parameters" is thus nonexistent. - - Examples - -------- - - >>> import flopy - >>> m = flopy.modflow.Modflow() - >>> mltdict = flopy.modflow.ModflowZon(m, mult_dict=mult_dict) - - """ - - def __init__(self, model, mult_dict=None, - extension='mlt', unitnumber=None, filenames=None): - """ - Package constructor. - - """ - # set default unit number of one is not specified - if unitnumber is None: - unitnumber = ModflowMlt.defaultunit() - - # set filenames - if filenames is None: - filenames = [None] - elif isinstance(filenames, str): - filenames = [filenames] - - # Fill namefile items - name = [ModflowMlt.ftype()] - units = [unitnumber] - extra = [''] - - # set package name - fname = [filenames[0]] - - # Call ancestor's init to set self.parent, extension, name and unit number - Package.__init__(self, model, extension=extension, name=name, - unit_number=units, extra=extra, filenames=fname) - - self.heading = '# {} package for '.format(self.name[0]) + \ - ' {}, '.format(model.version_types[model.version]) + \ - 'generated by Flopy.' - self.url = 'mult.htm' - - self.nml = 0 - if mult_dict is not None: - self.nml = len(mult_dict) - self.mult_dict = mult_dict - # print mult_dict - self.parent.add_package(self) - - def write_file(self): - """ - Write the package file. - - Returns - ------- - None - - Notes - ----- - Not implemented because parameters are only supported on load - - """ - pass - - @staticmethod - def load(f, model, nrow=None, ncol=None, ext_unit_dict=None): - """ - Load an existing package. - - Parameters - ---------- - f : filename or file handle - File to load. - model : model object - The model object (of type :class:`flopy.modflow.mf.Modflow`) to - which this package will be added. - nrow : int - number of rows. If not specified it will be retrieved from - the model object. (default is None). - ncol : int - number of columns. If not specified it will be retrieved from - the model object. (default is None). - ext_unit_dict : dictionary, optional - If the arrays in the file are specified using EXTERNAL, - or older style array control records, then `f` should be a file - handle. In this case ext_unit_dict is required, which can be - constructed using the function - :class:`flopy.utils.mfreadnam.parsenamefile`. - - Returns - ------- - zone : ModflowMult dict - - Examples - -------- - - >>> import flopy - >>> m = flopy.modflow.Modflow() - >>> mlt = flopy.modflow.ModflowMlt.load('test.mlt', m) - - """ - - if model.verbose: - sys.stdout.write('loading mult package file...\n') - - openfile = not hasattr(f, 'read') - if openfile: - filename = f - f = open(filename, 'r') - - # dataset 0 -- header - while True: - line = f.readline() - if line[0] != '#': - break - # dataset 1 - t = line.strip().split() - nml = int(t[0]) - - # get nlay,nrow,ncol if not passed - if nrow is None and ncol is None: - nrow, ncol, nlay, nper = model.get_nrow_ncol_nlay_nper() - - # read zone data - mult_dict = collections.OrderedDict() - for n in range(nml): - line = f.readline() - t = line.strip().split() - if len(t[0]) > 10: - mltnam = t[0][0:10].lower() - else: - mltnam = t[0].lower() - if model.verbose: - sys.stdout.write( - ' reading data for "{:<10s}" mult\n'.format(mltnam)) - readArray = True - kwrd = None - if len(t) > 1: - if 'function' in t[1].lower() or 'expression' in t[1].lower(): - readArray = False - kwrd = t[1].lower() - # load data - if readArray: - t = Util2d.load(f, model, (nrow, ncol), np.float32, mltnam, - ext_unit_dict) - # add unit number to list of external files in - # ext_unit_dict to remove. - if t.locat is not None: - model.add_pop_key_list(t.locat) - else: - line = f.readline() - t = [kwrd, line] - t = ModflowMlt.mult_function(mult_dict, line) - mult_dict[mltnam] = t - - if openfile: - f.close() - - # set package unit number - unitnumber = None - filenames = [None] - if ext_unit_dict is not None: - unitnumber, filenames[0] = \ - model.get_ext_dict_attr(ext_unit_dict, - filetype=ModflowMlt.ftype()) - - # create mlt dictionary - mlt = ModflowMlt(model, mult_dict=mult_dict, unitnumber=unitnumber, - filenames=filenames) - - return mlt - - @staticmethod - def mult_function(mult_dict, line): - """ - Construct a multiplier for the 'FUNCTION' option - - """ - t = line.strip().split() - basename = t.pop(0).lower()[0:10] - multarray = mult_dict[basename] - try: - multarray = multarray.array.copy() - except: - multarray = multarray.copy() - # Construct the multiplier array - while True: - if len(t) < 2: - break - op = t.pop(0) - multname = t.pop(0)[0:10] - try: - atemp = mult_dict[multname.lower()].array - except: - atemp = mult_dict[multname.lower()] - if op == '+': - multarray = multarray + atemp - elif op == '*': - multarray = multarray * atemp - elif op == '-': - multarray = multarray - atemp - elif op == '/': - multarray = multarray / atemp - elif op == '^': - multarray = multarray ** atemp - else: - s = 'Invalid MULT operation {}'.format(op) - raise Exception(s) - return multarray - - @staticmethod - def ftype(): - return 'MULT' - - @staticmethod - def defaultunit(): - return 1002 +""" +mfmlt module. Contains the ModflowMlt class. Note that the user can access +the ModflowMlt class as `flopy.modflow.ModflowMlt`. + +Additional information for this MODFLOW package can be found at the `Online +MODFLOW Guide +`_. + +""" +import collections +import sys + +import numpy as np + +from ..pakbase import Package +from ..utils import Util2d + + +class ModflowMlt(Package): + """ + MODFLOW Mult Package Class. + + Parameters + ---------- + model : model object + The model object (of type :class:`flopy.modflow.mf.Modflow`) to which + this package will be added. + mult_dict : dict + Dictionary with mult data for the model. mult_dict is typically + instantiated using load method. + extension : string + Filename extension (default is 'drn') + unitnumber : int + File unit number (default is 21). + + + Attributes + ---------- + + Methods + ------- + + See Also + -------- + + Notes + ----- + Parameters are supported in Flopy only when reading in existing models. + Parameter values are converted to native values in Flopy and the + connection to "parameters" is thus nonexistent. + + Examples + -------- + + >>> import flopy + >>> m = flopy.modflow.Modflow() + >>> mltdict = flopy.modflow.ModflowZon(m, mult_dict=mult_dict) + + """ + + def __init__(self, model, mult_dict=None, + extension='mlt', unitnumber=None, filenames=None): + """ + Package constructor. + + """ + # set default unit number of one is not specified + if unitnumber is None: + unitnumber = ModflowMlt.defaultunit() + + # set filenames + if filenames is None: + filenames = [None] + elif isinstance(filenames, str): + filenames = [filenames] + + # Fill namefile items + name = [ModflowMlt.ftype()] + units = [unitnumber] + extra = [''] + + # set package name + fname = [filenames[0]] + + # Call ancestor's init to set self.parent, extension, name and unit number + Package.__init__(self, model, extension=extension, name=name, + unit_number=units, extra=extra, filenames=fname) + + self.heading = '# {} package for '.format(self.name[0]) + \ + ' {}, '.format(model.version_types[model.version]) + \ + 'generated by Flopy.' + self.url = 'mult.htm' + + self.nml = 0 + if mult_dict is not None: + self.nml = len(mult_dict) + self.mult_dict = mult_dict + # print mult_dict + self.parent.add_package(self) + + def write_file(self): + """ + Write the package file. + + Returns + ------- + None + + Notes + ----- + Not implemented because parameters are only supported on load + + """ + pass + + @staticmethod + def load(f, model, nrow=None, ncol=None, ext_unit_dict=None): + """ + Load an existing package. + + Parameters + ---------- + f : filename or file handle + File to load. + model : model object + The model object (of type :class:`flopy.modflow.mf.Modflow`) to + which this package will be added. + nrow : int + number of rows. If not specified it will be retrieved from + the model object. (default is None). + ncol : int + number of columns. If not specified it will be retrieved from + the model object. (default is None). + ext_unit_dict : dictionary, optional + If the arrays in the file are specified using EXTERNAL, + or older style array control records, then `f` should be a file + handle. In this case ext_unit_dict is required, which can be + constructed using the function + :class:`flopy.utils.mfreadnam.parsenamefile`. + + Returns + ------- + zone : ModflowMult dict + + Examples + -------- + + >>> import flopy + >>> m = flopy.modflow.Modflow() + >>> mlt = flopy.modflow.ModflowMlt.load('test.mlt', m) + + """ + + if model.verbose: + sys.stdout.write('loading mult package file...\n') + + openfile = not hasattr(f, 'read') + if openfile: + filename = f + f = open(filename, 'r') + + # dataset 0 -- header + while True: + line = f.readline() + if line[0] != '#': + break + # dataset 1 + t = line.strip().split() + nml = int(t[0]) + + # get nlay,nrow,ncol if not passed + if nrow is None and ncol is None: + nrow, ncol, nlay, nper = model.get_nrow_ncol_nlay_nper() + + # read zone data + mult_dict = collections.OrderedDict() + for n in range(nml): + line = f.readline() + t = line.strip().split() + if len(t[0]) > 10: + mltnam = t[0][0:10].lower() + else: + mltnam = t[0].lower() + if model.verbose: + sys.stdout.write( + ' reading data for "{:<10s}" mult\n'.format(mltnam)) + readArray = True + kwrd = None + if len(t) > 1: + if 'function' in t[1].lower() or 'expression' in t[1].lower(): + readArray = False + kwrd = t[1].lower() + # load data + if readArray: + t = Util2d.load(f, model, (nrow, ncol), np.float32, mltnam, + ext_unit_dict) + # add unit number to list of external files in + # ext_unit_dict to remove. + if t.locat is not None: + model.add_pop_key_list(t.locat) + else: + line = f.readline() + t = [kwrd, line] + t = ModflowMlt.mult_function(mult_dict, line) + mult_dict[mltnam] = t + + if openfile: + f.close() + + # set package unit number + unitnumber = None + filenames = [None] + if ext_unit_dict is not None: + unitnumber, filenames[0] = \ + model.get_ext_dict_attr(ext_unit_dict, + filetype=ModflowMlt.ftype()) + + # create mlt dictionary + mlt = ModflowMlt(model, mult_dict=mult_dict, unitnumber=unitnumber, + filenames=filenames) + + return mlt + + @staticmethod + def mult_function(mult_dict, line): + """ + Construct a multiplier for the 'FUNCTION' option + + """ + t = line.strip().split() + basename = t.pop(0).lower()[0:10] + multarray = mult_dict[basename] + try: + multarray = multarray.array.copy() + except: + multarray = multarray.copy() + # Construct the multiplier array + while True: + if len(t) < 2: + break + op = t.pop(0) + multname = t.pop(0)[0:10] + try: + atemp = mult_dict[multname.lower()].array + except: + atemp = mult_dict[multname.lower()] + if op == '+': + multarray = multarray + atemp + elif op == '*': + multarray = multarray * atemp + elif op == '-': + multarray = multarray - atemp + elif op == '/': + multarray = multarray / atemp + elif op == '^': + multarray = multarray ** atemp + else: + s = 'Invalid MULT operation {}'.format(op) + raise Exception(s) + return multarray + + @staticmethod + def ftype(): + return 'MULT' + + @staticmethod + def defaultunit(): + return 1002 diff --git a/flopy/modflow/mfmnw1.py b/flopy/modflow/mfmnw1.py index e4ad8731d5..d742c2bf0f 100644 --- a/flopy/modflow/mfmnw1.py +++ b/flopy/modflow/mfmnw1.py @@ -1,492 +1,492 @@ -import sys -import re -import numpy as np -from ..pakbase import Package -from ..utils.flopy_io import line_parse, pop_item -from ..utils import MfList -from ..utils.recarray_utils import create_empty_recarray, recarray - - -class ModflowMnw1(Package): - """ - MODFLOW Multi-Node Well 1 Package Class. - - Parameters - ---------- - model : model object - The model object (of type :class:`flopy.modflow.mf.Modflow`) to which - this package will be added. - mxmnw : integer - maximum number of multi-node wells to be simulated - ipakcb : integer - A flag that is used to determine if cell-by-cell budget data should be - saved. If ipakcb is non-zero cell-by-cell budget data will be saved. - (default is 0). - iwelpt : integer - verbosity flag - nomoiter : integer - the number of iterations for which flow in MNW wells is calculated - kspref : string - which set of water levels are to be used as reference values for - calculating drawdown - losstype : string - head loss type for each well - wel1_bynode_qsum : list of lists or None - nested list containing file names, unit numbers, and ALLTIME flag for - auxiliary output, e.g. [['test.ByNode',92,'ALLTIME']] - if None, these optional external filenames and unit numbers are not written out - itmp : array - number of wells to be simulated for each stress period (shape : (NPER)) - lay_row_col_qdes_mn_multi : list of arrays - lay, row, col, qdes, and MN or MULTI flag for all well nodes - (length : NPER) - mnwname : string - prefix name of file for outputting time series data from MNW1 - extension : string - Filename extension (default is 'mnw1') - unitnumber : int - File unit number (default is 33). - filenames : string or list of strings - File name of the package (with extension) or a list with the filename - of the package and the cell-by-cell budget file for ipakcb. Default - is None. - - Attributes - ---------- - - Methods - ------- - - See Also - -------- - - Notes - ----- - Parameters are not supported in FloPy. - - The functionality of the ADD flag in data set 4 is not supported. Also - not supported are all water-quality parameters (Qwval Iqwgrp), water-level - limitations (Hlim, Href, DD), non-linear well losses, and pumping - limitations (QCUT, Q-%CUT, Qfrcmn, Qfrcmx, DEFAULT). - - Examples - -------- - - >>> import flopy - >>> ml = flopy.modflow.Modflow() - >>> mnw1 = flopy.modflow.ModflowMnw1(ml, ...) - - """ - - def __init__(self, model, mxmnw=0, ipakcb=None, iwelpt=0, nomoiter=0, - kspref=1, wel1_bynode_qsum=None, losstype='skin', - stress_period_data=None, dtype=None, - mnwname=None, - extension='mnw1', unitnumber=None, filenames=None): - # set default unit number of one is not specified - if unitnumber is None: - unitnumber = ModflowMnw1.defaultunit() - - # set filenames - if filenames is None: - filenames = [None, None] - elif isinstance(filenames, str): - filenames = [filenames, None] - elif isinstance(filenames, list): - if len(filenames) < 2: - filenames.append(None) - - # update external file information with cbc output, if necessary - if ipakcb is not None: - fname = filenames[1] - model.add_output_file(ipakcb, fname=fname, - package=ModflowMnw1.ftype()) - else: - ipakcb = 0 - - # set package name - fname = [filenames[0]] - - # Call ancestor's init to set self.parent, extension, name, and unit number - Package.__init__(self, model, extension, ModflowMnw1.ftype(), - unitnumber, filenames=fname) - - self.url = 'mnw1.htm' - self.nper = self.parent.nrow_ncol_nlay_nper[-1] - self.heading = '# {} package for '.format(self.name[0]) + \ - ' {}, '.format(model.version_types[model.version]) + \ - 'generated by Flopy.' - self.mxmnw = mxmnw # -maximum number of multi-node wells to be simulated - self.ipakcb = ipakcb - self.iwelpt = iwelpt # -verbosity flag - self.nomoiter = nomoiter # -integer indicating the number of iterations for which flow in MNW wells is calculated - self.kspref = kspref # -alphanumeric key indicating which set of water levels are to be used as reference values for calculating drawdown - self.losstype = losstype # -string indicating head loss type for each well - self.wel1_bynode_qsum = wel1_bynode_qsum # -nested list containing file names, unit numbers, and ALLTIME flag for auxiliary output, e.g. [['test.ByNode',92,'ALLTIME']] - # if stress_period_data is not None: - # for per, spd in stress_period_data.items(): - # for n in spd.dtype.names: - # self.stress_period_data[per] = ModflowMnw1.get_empty_stress_period_data(len(spd), - # structured=self.parent.structured) - # self.stress_period_data[per][n] = stress_period_data[per][n] - if dtype is not None: - self.dtype = dtype - else: - self.dtype = self.get_default_dtype( - structured=self.parent.structured) - self.stress_period_data = MfList(self, stress_period_data) - - self.mnwname = mnwname # -string prefix name of file for outputting time series data from MNW1 - - # -input format checks: - lossTypes = ['skin', 'linear', 'nonlinear'] - assert self.losstype.lower() in lossTypes, \ - 'LOSSTYPE (%s) must be one of the following: skin, linear, nonlinear' \ - % (self.losstype) - # auxFileExtensions = ['wl1','ByNode','Qsum'] - # for each in self.wel1_bynode_qsum: - # assert each[0].split('.')[1] in auxFileExtensions, 'File extensions in "wel1_bynode_qsum" must be one of the following: ".wl1", ".ByNode", or ".Qsum".' - self.parent.add_package(self) - - @staticmethod - def get_empty_stress_period_data(itmp, structured=True, - default_value=0): - # get an empty recarray that corresponds to dtype - dtype = ModflowMnw1.get_default_dtype(structured=structured) - return create_empty_recarray(itmp, dtype, default_value=default_value) - - @staticmethod - def get_default_dtype(structured=True): - if structured: - return np.dtype([('mnw_no', np.int), - ('k', np.int), - ('i', np.int), - ('j', np.int), - ('qdes', np.float32), - ('mntxt', np.object), - ('qwval', np.float32), - ('rw', np.float32), - ('skin', np.float32), - ('hlim', np.float32), - ('href', np.float32), - ('dd', np.object), - ('iqwgrp', np.object), - ('cpc', np.object), - ('qcut', np.object), - ('qfrcmn', np.float32), - ('qfrcmx', np.float32), - ('label', np.object)]) - else: - pass - - @staticmethod - def load(f, model, nper=None, gwt=False, nsol=1, ext_unit_dict=None): - - if model.verbose: - sys.stdout.write('loading mnw1 package file...\n') - - structured = model.structured - if nper is None: - nrow, ncol, nlay, nper = model.get_nrow_ncol_nlay_nper() - nper = 1 if nper == 0 else nper # otherwise iterations from 0, nper won't run - - openfile = not hasattr(f, 'read') - if openfile: - filename = f - f = open(filename, 'r') - - # dataset 0 (header) - line = skipcomments(next(f), f) - - # dataset 1 - mxmnw, ipakcb, iwelpt, nomoiter, kspref = _parse_1(line) - - # dataset 2 - line = skipcomments(next(f), f) - losstype = _parse_2(line) - - # dataset 3 - wel1_bynode_qsum = [] - line = skipcomments(next(f), f) - for txt in ['wel1', 'bynode', 'qsum']: - if txt in line.lower(): - wel1_bynode_qsum.append(_parse_3(line, txt)) - line = skipcomments(next(f), f) - - # dataset 4 - line = skipcomments(line, f) - stress_period_data = {} - dtype = ModflowMnw1.get_default_dtype(structured=structured) - qfrcmn_default = None - qfrcmx_default = None - qcut_default = '' - - # not sure what 'add' means - add = True if 'add' in line.lower() else False - - for per in range(nper): - if per > 0: - line = skipcomments(next(f), f) - add = True if 'add' in line.lower() else False - itmp = int(line_parse(line)[0]) - if itmp > 0: - - # dataset 5 - data, \ - qfrcmn_default, \ - qfrcmx_default, \ - qcut_default = _parse_5(f, - itmp, - qfrcmn_default, - qfrcmx_default, - qcut_default) - - # cast data (list) to recarray - tmp = recarray(data, dtype) - spd = ModflowMnw1.get_empty_stress_period_data(len(data)) - for n in dtype.descr: - spd[n[0]] = tmp[n[0]] - stress_period_data[per] = spd - - if openfile: - f.close() - - return ModflowMnw1(model, mxmnw=mxmnw, ipakcb=ipakcb, iwelpt=iwelpt, - nomoiter=nomoiter, - kspref=kspref, wel1_bynode_qsum=wel1_bynode_qsum, - losstype=losstype, - stress_period_data=stress_period_data) - - def write_file(self): - """ - Write the package file. - - Returns - ------- - None - - """ - - # -open file for writing - # f_mnw1 = open( self.file_name[0], 'w' ) - f = open(self.fn_path, 'w') - - # -write header - f.write('%s\n' % self.heading) - - # -Section 1 - MXMNW ipakcb IWELPT NOMOITER REF:kspref - f.write('%10i%10i%10i%10i REF = %s\n' % (self.mxmnw, - self.ipakcb, - self.iwelpt, - self.nomoiter, - self.kspref)) - - # -Section 2 - LOSSTYPE {PLossMNW} - f.write('%s\n' % (self.losstype)) - - if self.wel1_bynode_qsum is not None: - # -Section 3a - {FILE:filename WEL1:iunw1} - for each in self.wel1_bynode_qsum: - if each[0].split('.')[1].lower() == 'wl1': - f.write('FILE:%s WEL1:%-10i\n' % (each[0], - int(each[1]))) - - # -Section 3b - {FILE:filename BYNODE:iunby} {ALLTIME} - for each in self.wel1_bynode_qsum: - if each[0].split('.')[1].lower() == 'bynode': - if len(each) == 2: - f.write('FILE:%s BYNODE:%-10i\n' % (each[0], - int(each[1]))) - elif len(each) == 3: - f.write('FILE:%s BYNODE:%-10i %s\n' % (each[0], - int(each[1]), - each[2])) - - # -Section 3C - {FILE:filename QSUM:iunqs} {ALLTIME} - for each in self.wel1_bynode_qsum: - if each[0].split('.')[1].lower() == 'qsum': - if len(each) == 2: - f.write('FILE:%s QSUM:%-10i\n' % (each[0], - int(each[1]))) - elif len(each) == 3: - f.write('FILE:%s QSUM:%-10i %s\n' % (each[0], - int(each[1]), - each[2])) - - spd = self.stress_period_data.drop('mnw_no') - # force write_transient to keep the list arrays internal because MNW1 doesn't allow open/close - spd.write_transient(f, forceInternal=True) - - # -Un-numbered section PREFIX:MNWNAME - if self.mnwname: - f.write('PREFIX:%s\n' % (self.mnwname)) - - f.close() - - @staticmethod - def ftype(): - return 'MNW1' - - @staticmethod - def defaultunit(): - return 33 - - -def skipcomments(line, f): - if line.strip().startswith('#'): - line = skipcomments(next(f), f) - return line - - -def _parse_1(line): - line = line_parse(line) - mnwmax = pop_item(line, int) - ipakcb = pop_item(line, int) - mnwprint = pop_item(line, int) - next_item = line.pop() - nomoiter = 0 - kspref = 1 - if next_item.isdigit(): - nomoiter = int(next_item) - elif 'ref' in next_item: - line = ' '.join(line) - kspref = re.findall(r'\d+', line) - if len(kspref) > 0: - kspref = int(kspref[0]) - return mnwmax, ipakcb, mnwprint, nomoiter, kspref - - -def _parse_2(line): - line = line.split('!!')[0] - options = ['SKIN', 'NONLINEAR', 'LINEAR'] - losstype = 'skin' - for lt in options: - if lt.lower() in line.lower(): - losstype = lt.lower() - return losstype - - -def _parse_3(line, txt): - def getitem(line, txt): - return line.pop(0).replace(txt + ':', '').strip() - - line = line_parse(line.lower()) - items = [getitem(line, 'file'), - getitem(line, txt)] - if 'alltime' in ' '.join(line): - items.append('alltime') - return items - - -def _parse_5(f, itmp, - qfrcmn_default=None, - qfrcmx_default=None, - qcut_default=''): - data = [] - mnw_no = 0 - mn = False - multi = False - label = '' - for n in range(itmp): - - linetxt = skipcomments(next(f), f).lower() - line = line_parse(linetxt) - - # get the label; strip it out - if 'site:' in linetxt: - label = linetxt.replace(',', ' ').split('site:')[1].split()[0] - label = 'site:' + label - txt = [t for t in line if 'site:' in t] - if len(txt) > 0: # site: might have been in the comments section - line.remove(txt[0]) - - k = pop_item(line, int) - 1 - i = pop_item(line, int) - 1 - j = pop_item(line, int) - 1 - qdes = pop_item(line, float) - - # logic to create column of unique numbers for each MNW - mntxt = '' - if 'mn' in line: - if not mn: - mnw_no -= 1 # this node has same number as previous - if label == '': - label = data[n - 1][-1] - mn = True - mntxt = 'mn' - line.remove('mn') - if 'multi' in line: - multi = True - mntxt = 'multi' - line.remove('multi') - if mn and not multi: - multi = True - - # "The alphanumeric flags MN and DD can appear anywhere - # between columns 41 and 256, inclusive." - dd = '' - if 'dd' in line: - line.remove('dd') - dd = 'dd' - - qwval = pop_item(line, float) - rw = pop_item(line, float) - skin = pop_item(line, float) - hlim = pop_item(line, float) - href = pop_item(line, float) - iqwgrp = pop_item(line) - - cpc = '' - if 'cp:' in linetxt: - cpc = re.findall(r'\d+', line.pop(0)) - # in case there is whitespace between cp: and the value - if len(cpc) == 0: - cpc = pop_item(line) - cpc = 'cp:' + cpc - - qcut = '' - qfrcmn = 0. - qfrcmx = 0. - if 'qcut' in linetxt: - txt = [t for t in line if 'qcut' in t][0] - qcut = txt - line.remove(txt) - elif '%cut' in linetxt: - txt = [t for t in line if '%cut' in t][0] - qcut = txt - line.remove(txt) - if 'qcut' in linetxt or '%cut' in linetxt: - qfrcmn = pop_item(line, float) - qfrcmx = pop_item(line, float) - elif qfrcmn_default is not None and qfrcmx_default is not None: - qfrcmn = qfrcmn_default - qfrcmx = qfrcmx_default - if 'qcut' not in linetxt and '%cut' not in linetxt: - qcut = qcut_default - if 'default' in line: - qfrcmn_default = qfrcmn - qfrcmx_default = qfrcmx - qcut_default = qcut - - idata = [mnw_no, k, i, j, qdes, mntxt, qwval, - rw, skin, hlim, href, dd, iqwgrp, - cpc, qcut, qfrcmn, qfrcmx, label] - data.append(idata) - - # reset MNW designators - # if at the end of the well - if mn and multi: - mnw_no += 1 - mn = False - multi = False - label = '' - elif not mn and not multi: - mnw_no += 1 - label = '' - - return data, qfrcmn_default, qfrcmx_default, qcut_default - - -def _write_5(f, spd): - f.write('{:d} {:d} {:d} {}') - pass +import sys +import re +import numpy as np +from ..pakbase import Package +from ..utils.flopy_io import line_parse, pop_item +from ..utils import MfList +from ..utils.recarray_utils import create_empty_recarray, recarray + + +class ModflowMnw1(Package): + """ + MODFLOW Multi-Node Well 1 Package Class. + + Parameters + ---------- + model : model object + The model object (of type :class:`flopy.modflow.mf.Modflow`) to which + this package will be added. + mxmnw : integer + maximum number of multi-node wells to be simulated + ipakcb : integer + A flag that is used to determine if cell-by-cell budget data should be + saved. If ipakcb is non-zero cell-by-cell budget data will be saved. + (default is 0). + iwelpt : integer + verbosity flag + nomoiter : integer + the number of iterations for which flow in MNW wells is calculated + kspref : string + which set of water levels are to be used as reference values for + calculating drawdown + losstype : string + head loss type for each well + wel1_bynode_qsum : list of lists or None + nested list containing file names, unit numbers, and ALLTIME flag for + auxiliary output, e.g. [['test.ByNode',92,'ALLTIME']] + if None, these optional external filenames and unit numbers are not written out + itmp : array + number of wells to be simulated for each stress period (shape : (NPER)) + lay_row_col_qdes_mn_multi : list of arrays + lay, row, col, qdes, and MN or MULTI flag for all well nodes + (length : NPER) + mnwname : string + prefix name of file for outputting time series data from MNW1 + extension : string + Filename extension (default is 'mnw1') + unitnumber : int + File unit number (default is 33). + filenames : string or list of strings + File name of the package (with extension) or a list with the filename + of the package and the cell-by-cell budget file for ipakcb. Default + is None. + + Attributes + ---------- + + Methods + ------- + + See Also + -------- + + Notes + ----- + Parameters are not supported in FloPy. + + The functionality of the ADD flag in data set 4 is not supported. Also + not supported are all water-quality parameters (Qwval Iqwgrp), water-level + limitations (Hlim, Href, DD), non-linear well losses, and pumping + limitations (QCUT, Q-%CUT, Qfrcmn, Qfrcmx, DEFAULT). + + Examples + -------- + + >>> import flopy + >>> ml = flopy.modflow.Modflow() + >>> mnw1 = flopy.modflow.ModflowMnw1(ml, ...) + + """ + + def __init__(self, model, mxmnw=0, ipakcb=None, iwelpt=0, nomoiter=0, + kspref=1, wel1_bynode_qsum=None, losstype='skin', + stress_period_data=None, dtype=None, + mnwname=None, + extension='mnw1', unitnumber=None, filenames=None): + # set default unit number of one is not specified + if unitnumber is None: + unitnumber = ModflowMnw1.defaultunit() + + # set filenames + if filenames is None: + filenames = [None, None] + elif isinstance(filenames, str): + filenames = [filenames, None] + elif isinstance(filenames, list): + if len(filenames) < 2: + filenames.append(None) + + # update external file information with cbc output, if necessary + if ipakcb is not None: + fname = filenames[1] + model.add_output_file(ipakcb, fname=fname, + package=ModflowMnw1.ftype()) + else: + ipakcb = 0 + + # set package name + fname = [filenames[0]] + + # Call ancestor's init to set self.parent, extension, name, and unit number + Package.__init__(self, model, extension, ModflowMnw1.ftype(), + unitnumber, filenames=fname) + + self.url = 'mnw1.htm' + self.nper = self.parent.nrow_ncol_nlay_nper[-1] + self.heading = '# {} package for '.format(self.name[0]) + \ + ' {}, '.format(model.version_types[model.version]) + \ + 'generated by Flopy.' + self.mxmnw = mxmnw # -maximum number of multi-node wells to be simulated + self.ipakcb = ipakcb + self.iwelpt = iwelpt # -verbosity flag + self.nomoiter = nomoiter # -integer indicating the number of iterations for which flow in MNW wells is calculated + self.kspref = kspref # -alphanumeric key indicating which set of water levels are to be used as reference values for calculating drawdown + self.losstype = losstype # -string indicating head loss type for each well + self.wel1_bynode_qsum = wel1_bynode_qsum # -nested list containing file names, unit numbers, and ALLTIME flag for auxiliary output, e.g. [['test.ByNode',92,'ALLTIME']] + # if stress_period_data is not None: + # for per, spd in stress_period_data.items(): + # for n in spd.dtype.names: + # self.stress_period_data[per] = ModflowMnw1.get_empty_stress_period_data(len(spd), + # structured=self.parent.structured) + # self.stress_period_data[per][n] = stress_period_data[per][n] + if dtype is not None: + self.dtype = dtype + else: + self.dtype = self.get_default_dtype( + structured=self.parent.structured) + self.stress_period_data = MfList(self, stress_period_data) + + self.mnwname = mnwname # -string prefix name of file for outputting time series data from MNW1 + + # -input format checks: + lossTypes = ['skin', 'linear', 'nonlinear'] + assert self.losstype.lower() in lossTypes, \ + 'LOSSTYPE (%s) must be one of the following: skin, linear, nonlinear' \ + % (self.losstype) + # auxFileExtensions = ['wl1','ByNode','Qsum'] + # for each in self.wel1_bynode_qsum: + # assert each[0].split('.')[1] in auxFileExtensions, 'File extensions in "wel1_bynode_qsum" must be one of the following: ".wl1", ".ByNode", or ".Qsum".' + self.parent.add_package(self) + + @staticmethod + def get_empty_stress_period_data(itmp, structured=True, + default_value=0): + # get an empty recarray that corresponds to dtype + dtype = ModflowMnw1.get_default_dtype(structured=structured) + return create_empty_recarray(itmp, dtype, default_value=default_value) + + @staticmethod + def get_default_dtype(structured=True): + if structured: + return np.dtype([('mnw_no', np.int), + ('k', np.int), + ('i', np.int), + ('j', np.int), + ('qdes', np.float32), + ('mntxt', np.object), + ('qwval', np.float32), + ('rw', np.float32), + ('skin', np.float32), + ('hlim', np.float32), + ('href', np.float32), + ('dd', np.object), + ('iqwgrp', np.object), + ('cpc', np.object), + ('qcut', np.object), + ('qfrcmn', np.float32), + ('qfrcmx', np.float32), + ('label', np.object)]) + else: + pass + + @staticmethod + def load(f, model, nper=None, gwt=False, nsol=1, ext_unit_dict=None): + + if model.verbose: + sys.stdout.write('loading mnw1 package file...\n') + + structured = model.structured + if nper is None: + nrow, ncol, nlay, nper = model.get_nrow_ncol_nlay_nper() + nper = 1 if nper == 0 else nper # otherwise iterations from 0, nper won't run + + openfile = not hasattr(f, 'read') + if openfile: + filename = f + f = open(filename, 'r') + + # dataset 0 (header) + line = skipcomments(next(f), f) + + # dataset 1 + mxmnw, ipakcb, iwelpt, nomoiter, kspref = _parse_1(line) + + # dataset 2 + line = skipcomments(next(f), f) + losstype = _parse_2(line) + + # dataset 3 + wel1_bynode_qsum = [] + line = skipcomments(next(f), f) + for txt in ['wel1', 'bynode', 'qsum']: + if txt in line.lower(): + wel1_bynode_qsum.append(_parse_3(line, txt)) + line = skipcomments(next(f), f) + + # dataset 4 + line = skipcomments(line, f) + stress_period_data = {} + dtype = ModflowMnw1.get_default_dtype(structured=structured) + qfrcmn_default = None + qfrcmx_default = None + qcut_default = '' + + # not sure what 'add' means + add = True if 'add' in line.lower() else False + + for per in range(nper): + if per > 0: + line = skipcomments(next(f), f) + add = True if 'add' in line.lower() else False + itmp = int(line_parse(line)[0]) + if itmp > 0: + + # dataset 5 + data, \ + qfrcmn_default, \ + qfrcmx_default, \ + qcut_default = _parse_5(f, + itmp, + qfrcmn_default, + qfrcmx_default, + qcut_default) + + # cast data (list) to recarray + tmp = recarray(data, dtype) + spd = ModflowMnw1.get_empty_stress_period_data(len(data)) + for n in dtype.descr: + spd[n[0]] = tmp[n[0]] + stress_period_data[per] = spd + + if openfile: + f.close() + + return ModflowMnw1(model, mxmnw=mxmnw, ipakcb=ipakcb, iwelpt=iwelpt, + nomoiter=nomoiter, + kspref=kspref, wel1_bynode_qsum=wel1_bynode_qsum, + losstype=losstype, + stress_period_data=stress_period_data) + + def write_file(self): + """ + Write the package file. + + Returns + ------- + None + + """ + + # -open file for writing + # f_mnw1 = open( self.file_name[0], 'w' ) + f = open(self.fn_path, 'w') + + # -write header + f.write('%s\n' % self.heading) + + # -Section 1 - MXMNW ipakcb IWELPT NOMOITER REF:kspref + f.write('%10i%10i%10i%10i REF = %s\n' % (self.mxmnw, + self.ipakcb, + self.iwelpt, + self.nomoiter, + self.kspref)) + + # -Section 2 - LOSSTYPE {PLossMNW} + f.write('%s\n' % (self.losstype)) + + if self.wel1_bynode_qsum is not None: + # -Section 3a - {FILE:filename WEL1:iunw1} + for each in self.wel1_bynode_qsum: + if each[0].split('.')[1].lower() == 'wl1': + f.write('FILE:%s WEL1:%-10i\n' % (each[0], + int(each[1]))) + + # -Section 3b - {FILE:filename BYNODE:iunby} {ALLTIME} + for each in self.wel1_bynode_qsum: + if each[0].split('.')[1].lower() == 'bynode': + if len(each) == 2: + f.write('FILE:%s BYNODE:%-10i\n' % (each[0], + int(each[1]))) + elif len(each) == 3: + f.write('FILE:%s BYNODE:%-10i %s\n' % (each[0], + int(each[1]), + each[2])) + + # -Section 3C - {FILE:filename QSUM:iunqs} {ALLTIME} + for each in self.wel1_bynode_qsum: + if each[0].split('.')[1].lower() == 'qsum': + if len(each) == 2: + f.write('FILE:%s QSUM:%-10i\n' % (each[0], + int(each[1]))) + elif len(each) == 3: + f.write('FILE:%s QSUM:%-10i %s\n' % (each[0], + int(each[1]), + each[2])) + + spd = self.stress_period_data.drop('mnw_no') + # force write_transient to keep the list arrays internal because MNW1 doesn't allow open/close + spd.write_transient(f, forceInternal=True) + + # -Un-numbered section PREFIX:MNWNAME + if self.mnwname: + f.write('PREFIX:%s\n' % (self.mnwname)) + + f.close() + + @staticmethod + def ftype(): + return 'MNW1' + + @staticmethod + def defaultunit(): + return 33 + + +def skipcomments(line, f): + if line.strip().startswith('#'): + line = skipcomments(next(f), f) + return line + + +def _parse_1(line): + line = line_parse(line) + mnwmax = pop_item(line, int) + ipakcb = pop_item(line, int) + mnwprint = pop_item(line, int) + next_item = line.pop() + nomoiter = 0 + kspref = 1 + if next_item.isdigit(): + nomoiter = int(next_item) + elif 'ref' in next_item: + line = ' '.join(line) + kspref = re.findall(r'\d+', line) + if len(kspref) > 0: + kspref = int(kspref[0]) + return mnwmax, ipakcb, mnwprint, nomoiter, kspref + + +def _parse_2(line): + line = line.split('!!')[0] + options = ['SKIN', 'NONLINEAR', 'LINEAR'] + losstype = 'skin' + for lt in options: + if lt.lower() in line.lower(): + losstype = lt.lower() + return losstype + + +def _parse_3(line, txt): + def getitem(line, txt): + return line.pop(0).replace(txt + ':', '').strip() + + line = line_parse(line.lower()) + items = [getitem(line, 'file'), + getitem(line, txt)] + if 'alltime' in ' '.join(line): + items.append('alltime') + return items + + +def _parse_5(f, itmp, + qfrcmn_default=None, + qfrcmx_default=None, + qcut_default=''): + data = [] + mnw_no = 0 + mn = False + multi = False + label = '' + for n in range(itmp): + + linetxt = skipcomments(next(f), f).lower() + line = line_parse(linetxt) + + # get the label; strip it out + if 'site:' in linetxt: + label = linetxt.replace(',', ' ').split('site:')[1].split()[0] + label = 'site:' + label + txt = [t for t in line if 'site:' in t] + if len(txt) > 0: # site: might have been in the comments section + line.remove(txt[0]) + + k = pop_item(line, int) - 1 + i = pop_item(line, int) - 1 + j = pop_item(line, int) - 1 + qdes = pop_item(line, float) + + # logic to create column of unique numbers for each MNW + mntxt = '' + if 'mn' in line: + if not mn: + mnw_no -= 1 # this node has same number as previous + if label == '': + label = data[n - 1][-1] + mn = True + mntxt = 'mn' + line.remove('mn') + if 'multi' in line: + multi = True + mntxt = 'multi' + line.remove('multi') + if mn and not multi: + multi = True + + # "The alphanumeric flags MN and DD can appear anywhere + # between columns 41 and 256, inclusive." + dd = '' + if 'dd' in line: + line.remove('dd') + dd = 'dd' + + qwval = pop_item(line, float) + rw = pop_item(line, float) + skin = pop_item(line, float) + hlim = pop_item(line, float) + href = pop_item(line, float) + iqwgrp = pop_item(line) + + cpc = '' + if 'cp:' in linetxt: + cpc = re.findall(r'\d+', line.pop(0)) + # in case there is whitespace between cp: and the value + if len(cpc) == 0: + cpc = pop_item(line) + cpc = 'cp:' + cpc + + qcut = '' + qfrcmn = 0. + qfrcmx = 0. + if 'qcut' in linetxt: + txt = [t for t in line if 'qcut' in t][0] + qcut = txt + line.remove(txt) + elif '%cut' in linetxt: + txt = [t for t in line if '%cut' in t][0] + qcut = txt + line.remove(txt) + if 'qcut' in linetxt or '%cut' in linetxt: + qfrcmn = pop_item(line, float) + qfrcmx = pop_item(line, float) + elif qfrcmn_default is not None and qfrcmx_default is not None: + qfrcmn = qfrcmn_default + qfrcmx = qfrcmx_default + if 'qcut' not in linetxt and '%cut' not in linetxt: + qcut = qcut_default + if 'default' in line: + qfrcmn_default = qfrcmn + qfrcmx_default = qfrcmx + qcut_default = qcut + + idata = [mnw_no, k, i, j, qdes, mntxt, qwval, + rw, skin, hlim, href, dd, iqwgrp, + cpc, qcut, qfrcmn, qfrcmx, label] + data.append(idata) + + # reset MNW designators + # if at the end of the well + if mn and multi: + mnw_no += 1 + mn = False + multi = False + label = '' + elif not mn and not multi: + mnw_no += 1 + label = '' + + return data, qfrcmn_default, qfrcmx_default, qcut_default + + +def _write_5(f, spd): + f.write('{:d} {:d} {:d} {}') + pass diff --git a/flopy/modflow/mfmnw2.py b/flopy/modflow/mfmnw2.py index 83fd8e3125..2f881b1d84 100644 --- a/flopy/modflow/mfmnw2.py +++ b/flopy/modflow/mfmnw2.py @@ -1,1938 +1,1938 @@ -import os -import sys -import warnings - -import numpy as np -from .mfdis import get_layer -from ..utils import check -from ..utils.flopy_io import line_parse, pop_item, get_next_line -from ..utils import MfList -from ..utils.recarray_utils import create_empty_recarray - -from ..pakbase import Package - - -class Mnw(object): - """ - Multi-Node Well object class - - Parameters - ---------- - wellid : str or int - is the name of the well. This is a unique alphanumeric identification - label for each well. The text string is limited to 20 alphanumeric - characters. If the name of the well includes spaces, then enclose the - name in quotes. Flopy converts wellid string to lower case. - nnodes : int - is the number of cells (nodes) associated with this well. - NNODES normally is > 0, but for the case of a vertical borehole, - setting NNODES < 0 will allow the user to specify the elevations of - the tops and bottoms of well screens or open intervals (rather than - grid layer numbers), and the absolute value of NNODES equals the - number of open intervals (or well screens) to be specified in dataset - 2d. If this option is used, then the model will compute the layers in - which the open intervals occur, the lengths of the open intervals, - and the relative vertical position of the open interval within a model - layer (for example, see figure 14 and related discussion). - losstype : str - is a character flag to determine the user-specified model for well loss - (equation 2). Available options (that is, place one of the following - approved words in this field) are: - NONE there are no well corrections and the head in the well is - assumed to equal the head in the cell. This option (hWELL = hn) - is only valid for a single-node well (NNODES = 1). (This is - equivalent to using the original WEL Package of MODFLOW, - but specifying the single-node well within the MNW2 Package - enables the use of constraints.) - THIEM this option allows for only the cell-to-well correction at the - well based on the Thiem (1906) equation; head in the well is - determined from equation 2 as (hWELL = hn + AQn), and the model - computes A on the basis of the user-specified well radius (Rw) - and previously defined values of cell transmissivity and grid - spacing. Coefficients B and C in equation 2 are automatically - set = 0.0. User must define Rw in dataset 2c or 2d. - SKIN this option allows for formation damage or skin corrections at - the well. hWELL = hn + AQn + BQn (from equation 2), where A is - determined by the model from the value of Rw, and B is - determined by the model from Rskin and Kskin. User must define - Rw, Rskin, and Kskin in dataset 2c or 2d. - GENERAL head loss is defined with coefficients A, B, and C and power - exponent P (hWELL = hn + AQn + BQn + CQnP). A is determined by - the model from the value of Rw. User must define Rw, B, C, and - P in dataset 2c or 2d. A value of P = 2.0 is suggested if no - other data are available (the model allows 1.0 <= P <= 3.5). - Entering a value of C = 0 will result in a "linear" model in - which the value of B is entered directly (rather than entering - properties of the skin, as with the SKIN option). - SPECIFYcwc the user specifies an effective conductance value - (equivalent to the combined effects of the A, B, and C - well-loss coefficients expressed in equation 15) between the - well and the cell representing the aquifer, CWC. User must - define CWC in dataset 2c or 2d. If there are multiple screens - within the grid cell or if partial penetration corrections are - to be made, then the effective value of CWC for the node may - be further adjusted automatically by MNW2. - pumploc : int - is an integer flag pertaining to the location along the borehole of - the pump intake (if any). If PUMPLOC = 0, then either there is no pump - or the intake location (or discharge point for an injection well) is - assumed to occur above the first active node associated with the multi- - node well (that is, the node closest to the land surface or to the - wellhead). If PUMPLOC > 0, then the cell in which the intake (or - outflow) is located will be specified in dataset 2e as a LAY-ROW-COL - grid location. For a vertical well only, specifying PUMPLOC < 0, will - enable the option to define the vertical position of the pump intake - (or outflow) as an elevation in dataset 2e (for the given spatial grid - location [ROW-COL] defined for this well in 2d). - qlimit : int - is an integer flag that indicates whether the water level (head) in - the well will be used to constrain the pumping rate. If Qlimit = 0, - then there are no constraints for this well. If Qlimit > 0, then - pumpage will be limited (constrained) by the water level in the well, - and relevant parameters are constant in time and defined below in - dataset 2f. If Qlimit < 0, then pumpage will be limited (constrained) - by the water level in the well, and relevant parameters can vary with - time and are defined for every stress period in dataset 4b. - ppflag : int - is an integer flag that determines whether the calculated head in the - well will be corrected for the effect of partial penetration of the - well screen in the cell. If PPFLAG = 0, then the head in the well will - not be adjusted for the effects of partial penetration. If PPFLAG > 0, - then the head in the well will be adjusted for the effects of partial - penetration if the section of well containing the well screen is - vertical (as indicated by identical row-column locations in the grid). - If NNODES < 0 (that is, the open intervals of the well are defined by - top and bottom elevations), then the model will automatically calculate - the fraction of penetration for each node and the relative vertical - position of the well screen. If NNODES > 0, then the fraction of - penetration for each node must be defined in dataset 2d (see below) - and the well screen will be assumed to be centered vertically within - the thickness of the cell (except if the well is located in the - uppermost model layer that is under unconfined conditions, in which - case the bottom of the well screen will be assumed to be aligned with - the bottom boundary of the cell and the assumed length of well screen - will be based on the initial head in that cell). - pumpcap : int - is an integer flag and value that determines whether the discharge of - a pumping (withdrawal) well (Q < 0.0) will be adjusted for changes in - the lift (or total dynamic head) with time. If PUMPCAP = 0, then the - discharge from the well will not be adjusted on the basis of changes - in lift. If PUMPCAP > 0 for a withdrawal well, then the discharge from - the well will be adjusted on the basis of the lift, as calculated from - the most recent water level in the well. In this case, data describing - the head-capacity relation for the pump must be listed in datasets 2g - and 2h, and the use of that relation can be switched on or off for - each stress period using a flag in dataset 4a. The number of entries - (lines) in dataset 2h corresponds to the value of PUMPCAP. If PUMPCAP - does not equal 0, it must be set to an integer value of between 1 and - 25, inclusive. - rw : float - radius of the well (losstype == 'THIEM', 'SKIN', or 'GENERAL') - rskin : float - radius to the outer limit of the skin (losstype == 'SKIN') - kskin : float - hydraulic conductivity of the skin - B : float - coefficient of the well-loss eqn. (eqn. 2 in MNW2 documentation) - (losstype == 'GENERAL') - C : float - coefficient of the well-loss eqn. (eqn. 2 in MNW2 documentation) - (losstype == 'GENERAL') - P : float - coefficient of the well-loss eqn. (eqn. 2 in MNW2 documentation) - (losstype == 'GENERAL') - cwc : float - cell-to-well conductance. - (losstype == 'SPECIFYcwc') - pp : float - fraction of partial penetration for the cell. Only specify if - PFLAG > 0 and NNODES > 0. - k : int - layer index of well (zero-based) - i : int - row index of well (zero-based) - j : int - column index of well (zero-based) - ztop : float - top elevation of open intervals of vertical well. - zbotm : float - bottom elevation of open intervals of vertical well. - node_data : numpy record array - table containing MNW data by node. A blank node_data template can be - created via the ModflowMnw2.get_empty_mnw_data() static method. - - Note: Variables in dataset 2d (e.g. rw) can be entered as a single - value for the entire well (above), or in node_data (or dataset 2d) by - node. Variables not in dataset 2d (such as pumplay) can be included - in node data for convenience (to allow construction of MNW2 package - from a table), but are only written to MNW2 as a single variable. - When writing non-dataset 2d variables to MNW2 input, the first value - for the well will be used. - - Other variables (e.g. hlim) can be entered here as - constant for all stress periods, or by stress period below in stress_period_data. - See MNW2 input instructions for more details. - - Columns are: - k : int - layer index of well (zero-based) - i : int - row index of well (zero-based) - j : int - column index of well (zero-based) - ztop : float - top elevation of open intervals of vertical well. - zbotm : float - bottom elevation of open intervals of vertical well. - wellid : str - losstype : str - pumploc : int - qlimit : int - ppflag : int - pumpcap : int - rw : float - rskin : float - kskin : float - B : float - C : float - P : float - cwc : float - pp : float - pumplay : int - pumprow : int - pumpcol : int - zpump : float - hlim : float - qcut : int - qfrcmn : float - qfrcmx : float - hlift : float - liftq0 : float - liftqmax : float - hwtol : float - liftn : float - qn : float - - stress_period_data : numpy record array - table containing MNW pumping data for all stress periods (dataset 4 in - the MNW2 input instructions). A blank stress_period_data template can - be created via the Mnw.get_empty_stress_period_data() static method. - Columns are: - per : int - stress period - qdes : float - is the actual (or maximum desired, if constraints are to be - applied) volumetric pumping rate (negative for withdrawal or - positive for injection) at the well (L3/T). Qdes should be - set to 0 for nonpumping wells. If constraints are applied, - then the calculated volumetric withdrawal or injection rate - may be adjusted to range from 0 to Qdes and is not allowed - to switch directions between withdrawal and injection - conditions during any stress period. When PUMPCAP > 0, in the - first stress period in which Qdes is specified with a negative - value, Qdes represents the maximum operating discharge for the - pump; in subsequent stress periods, any different negative - values of Qdes are ignored, although values are subject to - adjustment for CapMult. If Qdes >= 0.0, then pump-capacity - adjustments are not applied. - capmult : int - is a flag and multiplier for implementing head-capacity - relations during a given stress period. Only specify if - PUMPCAP > 0 for this well. If CapMult <= 0, then - head-capacity relations are ignored for this stress period. - If CapMult = 1.0, then head-capacity relations defined - in datasets 2g and 2h are used. If CapMult equals any other - positive value (for example, 0.6 or 1.1), then head-capacity - relations are used but adjusted and shifted by multiplying - the discharge value indicated by the head-capacity curve by - the value of CapMult. - cprime : float - is the concentration in the injected fluid. Only specify if - Qdes > 0 and GWT process is active. - hlim : float - qcut : int - qfrcmn : float - qfrcmx : float - Note: If auxiliary variables are also being used, additional columns - for these must be included. - pumplay : int - pumprow : int - pumpcol : int - PUMPLAY, PUMPROW, and PUMPCOL are the layer, row, and column numbers, - respectively, of the cell (node) in this multi-node well where the - pump intake (or outflow) is located. The location defined in dataset - 2e should correspond with one of the nodes listed in 2d for this - multi-node well. These variables are only read if PUMPLOC > 0 in 2b. - zpump : float - is the elevation of the pump intake (or discharge pipe location for an - injection well). Zpump is read only if PUMPLOC < 0; in this case, - the model assumes that the borehole is vertical and will compute the - layer of the grid in which the pump intake is located. - hlim : float - is the limiting water level (head) in the well, which is a minimum for - discharging wells and a maximum for injection wells. For example, in a - discharging well, when hWELL falls below hlim, the flow from the well - is constrained. - qcut : int - is an integer flag that indicates how pumping limits Qfrcmn and - Qfrcmx will be specified. If pumping limits are to be specified as a - rate (L3/T), then set QCUT > 0; if pumping limits are to be specified - as a fraction of the specified pumping rate (Qdes), then set QCUT < 0. - If there is not a minimum pumping rate below which the pump becomes - inactive, then set QCUT = 0. - qfrcmn : float - is the minimum pumping rate or fraction of original pumping rate - (a choice that depends on QCUT) that a well must exceed to remain - active during a stress period. The absolute value of Qfrcmn must be - less than the absolute value of Qfrcmx (defined next). Only specify - if QCUT != 0. - qfrcmx : float - is the minimum pumping rate or fraction of original pumping rate that - must be exceeded to reactivate a well that had been shut off based on - Qfrcmn during a stress period. The absolute value of Qfrcmx must be - greater than the absolute value of Qfrcmn. Only specify if QCUT != 0. - hlift : float - is the reference head (or elevation) corresponding to the discharge - point for the well. This is typically at or above the land surface, - and can be increased to account for additional head loss due to - friction in pipes. - liftq0 : float - is the value of lift (total dynamic head) that exceeds the capacity of - the pump. If the calculated lift equals or exceeds this value, then - the pump is shut off and discharge from the well ceases. - liftqmax : float - is the value of lift (total dynamic head) corresponding to the maximum - pumping (discharge) rate for the pump. If the calculated lift is less - than or equal to LIFTqmax, then the pump will operate at its design - capacity, assumed to equal the user-specified value of Qdes - (in dataset 4a). LIFTqmax will be associated with the value of Qdes in - the first stress period in which Qdes for the well is less than 0.0. - hwtol : float - is a minimum absolute value of change in the computed water level in - the well allowed between successive iterations; if the value of hWELL - changes from one iteration to the next by a value smaller than this - tolerance, then the value of discharge computed from the head capacity - curves will be locked for the remainder of that time step. It is - recommended that HWtol be set equal to a value approximately one or - two orders of magnitude larger than the value of HCLOSE, but if the - solution fails to converge, then this may have to be adjusted. - liftn : float - is a value of lift (total dynamic head) that corresponds to a known - value of discharge (Qn) for the given pump, specified as the second - value in this line. - qn : float - is the value of discharge corresponding to the height of lift - (total dynamic head) specified previously on this line. Sign - (positive or negative) is ignored. - mnwpackage : ModflowMnw2 instance - package that mnw is attached to - - Returns - ------- - None - - """ - by_node_variables = ['k', 'i', 'j', 'ztop', 'zbotm', 'rw', 'rskin', - 'kskin', 'B', 'C', 'P', 'cwc', 'pp'] - - def __init__(self, wellid, - nnodes=1, nper=1, - losstype="skin", pumploc=0, qlimit=0, ppflag=0, pumpcap=0, - rw=1, rskin=2, kskin=10, - B=None, C=0, P=2., cwc=None, pp=1, - k=0, i=0, j=0, ztop=0, zbotm=0, - node_data=None, stress_period_data=None, - pumplay=0, pumprow=0, pumpcol=0, zpump=None, - hlim=None, qcut=None, qfrcmn=None, qfrcmx=None, - hlift=None, liftq0=None, liftqmax=None, hwtol=None, - liftn=None, qn=None, mnwpackage=None): - """ - Class constructor - """ - - self.nper = nper - self.mnwpackage = mnwpackage # associated ModflowMnw2 instance - self.aux = None if mnwpackage is None else mnwpackage.aux - - # dataset 2a - if isinstance(wellid, str): - wellid = wellid.lower() - self.wellid = wellid - self.nnodes = nnodes - # dataset 2b - self.losstype = losstype.lower() - self.pumploc = pumploc - self.qlimit = qlimit - self.ppflag = ppflag - self.pumpcap = pumpcap - # dataset 2c (can be entered by node) - self.rw = rw - self.rskin = rskin - self.kskin = kskin - self.B = B - self.C = C - self.P = P - self.cwc = cwc - self.pp = pp - # dataset 2d (entered by node) - # indices should be lists (for iteration over nodes) - self.k = k - self.i = i - self.j = j - self.ztop = ztop - self.zbotm = zbotm - for v in self.by_node_variables: - if not isinstance(self.__dict__[v], list): - self.__dict__[v] = [self.__dict__[v]] - # dataset 2e - self.pumplay = pumplay - self.pumprow = pumprow - self.pumpcol = pumpcol - self.zpump = zpump - # dataset 2f - self.hlim = hlim - self.qcut = qcut - self.qfrcmn = qfrcmn - self.qfrcmx = qfrcmx - # dataset 2g - self.hlift = hlift - self.liftq0 = liftq0 - self.liftqmax = liftqmax - self.hwtol = hwtol - # dataset 2h - self.liftn = liftn - self.qn = qn - - # dataset 4 - - # accept stress period data (pumping rates) from structured array - # does this need to be Mflist? - self.stress_period_data = self.get_empty_stress_period_data(nper) - if stress_period_data is not None: - for n in stress_period_data.dtype.names: - self.stress_period_data[n] = stress_period_data[n] - - # accept node data from structured array - self.node_data = ModflowMnw2.get_empty_node_data(np.abs(nnodes), - aux_names=self.aux) - if node_data is not None: - for n in node_data.dtype.names: - self.node_data[n] = node_data[n] - # convert strings to lower case - if isinstance(n, str): - for idx, v in enumerate(self.node_data[n]): - self.node_data[n][idx] = self.node_data[n][idx] - - # build recarray of node data from MNW2 input file - if node_data is None: - self.make_node_data() - else: - self._set_attributes_from_node_data() - - for n in ['k', 'i', 'j']: - if len(self.__dict__[n]) > 0: - # need to set for each period - self.stress_period_data[n] = [self.__dict__[n][0]] - - def make_node_data(self): - """ - Make the node data array from variables entered individually. - - Returns - ------- - None - - """ - nnodes = self.nnodes - node_data = ModflowMnw2.get_empty_node_data(np.abs(nnodes), - aux_names=self.aux) - - names = Mnw.get_item2_names(self) - for n in names: - node_data[n] = self.__dict__[n] - self.node_data = node_data - - @staticmethod - def get_empty_stress_period_data(nper=0, aux_names=None, structured=True, - default_value=0): - """ - Get an empty stress_period_data recarray that corresponds to dtype - - Parameters - ---------- - nper : int - - aux_names - structured - default_value - - Returns - ------- - ra : np.recarray - Recarray - - """ - # - dtype = Mnw.get_default_spd_dtype(structured=structured) - if aux_names is not None: - dtype = Package.add_to_dtype(dtype, aux_names, np.float32) - return create_empty_recarray(nper, dtype, default_value=default_value) - - @staticmethod - def get_default_spd_dtype(structured=True): - """ - Get the default stress period data dtype - - Parameters - ---------- - structured : bool - Boolean that defines if a structured (True) or unstructured (False) - dtype will be created (default is True). Not implemented for - unstructured. - - Returns - ------- - dtype : np.dtype - - """ - if structured: - return np.dtype([('k', np.int), - ('i', np.int), - ('j', np.int), - ('per', np.int), - ('qdes', np.float32), - ('capmult', np.int), - ('cprime', np.float32), - ('hlim', np.float32), - ('qcut', np.int), - ('qfrcmn', np.float32), - ('qfrcmx', np.float32)]) - else: - msg = 'Mnw2: get_default_spd_dtype not implemented for ' + \ - 'unstructured grids' - raise NotImplementedError(msg) - - @staticmethod - def get_item2_names(mnw2obj=None, node_data=None): - """ - Get names for unknown things... - - Parameters - ---------- - mnw2obj : Mnw object - Mnw object (default is None) - node_data : unknown - Unknown what is in this parameter (default is None) - - Returns - ------- - names : list - List of dtype names. - - """ - - if node_data is not None: - nnodes = Mnw.get_nnodes(node_data) - losstype = node_data.losstype[0].lower() - ppflag = node_data.ppflag[0] - pumploc = node_data.pumploc[0] - qlimit = node_data.qlimit[0] - pumpcap = node_data.pumpcap[0] - qcut = node_data.qcut[0] - # get names based on mnw2obj attribute values - else: - nnodes = mnw2obj.nnodes - losstype = mnw2obj.losstype.lower() - ppflag = mnw2obj.ppflag - pumploc = mnw2obj.pumploc - qlimit = mnw2obj.qlimit - pumpcap = mnw2obj.pumpcap - qcut = mnw2obj.qcut - - names = ['i', 'j'] - if nnodes > 0: - names += ['k'] - if nnodes < 0: - names += ['ztop', 'zbotm'] - names += ['wellid', 'losstype', 'pumploc', 'qlimit', 'ppflag', - 'pumpcap'] - if losstype.lower() == 'thiem': - names += ['rw'] - elif losstype.lower() == 'skin': - names += ['rw', 'rskin', 'kskin'] - elif losstype.lower() == 'general': - names += ['rw', 'B', 'C', 'P'] - elif losstype.lower() == 'specifycwc': - names += ['cwc'] - if ppflag > 0 and nnodes > 0: - names += ['pp'] - if pumploc != 0: - if pumploc > 0: - names += ['pumplay', 'pumprow', 'pumpcol'] - if pumploc < 0: - names += ['zpump'] - if qlimit > 0: - names += ['hlim', 'qcut'] - if qcut != 0: - names += ['qfrcmn', 'qfrcmx'] - if pumpcap > 0: - names += ['hlift', 'liftq0', 'liftqmax', 'hwtol'] - names += ['liftn', 'qn'] - return names - - @staticmethod - def get_nnodes(node_data): - """ - Get the number of MNW2 nodes. - - Parameters - ---------- - node_data : list - List of nodes??? - - Returns - ------- - nnodes : int - Number of MNW2 nodes - - """ - nnodes = len(node_data) - # check if ztop and zbotm were entered, - # flip nnodes for format 2 - if np.sum(node_data.ztop - node_data.zbotm) > 0: - nnodes *= -1 - return nnodes - - @staticmethod - def sort_node_data(node_data): - # sort by layer (layer input option) - if np.any(np.diff(node_data['k']) < 0): - node_data.sort(order=['k']) - - # reverse sort by ztop if it's specified and not sorted correctly - if np.any(np.diff(node_data['ztop']) > 0): - node_data = np.sort(node_data, order=['ztop'])[::-1] - return node_data - - def check(self, f=None, verbose=True, level=1, checktype=None): - """ - Check mnw object for common errors. - - Parameters - ---------- - f : str or file handle - String defining file name or file handle for summary file - of check method output. If a string is passed a file handle - is created. If f is None, check method does not write - results to a summary file. (default is None) - verbose : bool - Boolean flag used to determine if check method results are - written to the screen - level : int - Check method analysis level. If level=0, summary checks are - performed. If level=1, full checks are performed. - - Returns - ------- - chk : flopy.utils.check object - - """ - chk = self._get_check(f, verbose, level, checktype) - if self.losstype.lower() not in ['none', 'thiem', 'skin', 'general', - 'sepecifycwc']: - chk._add_to_summary(type='Error', k=self.k, i=self.i, j=self.j, - value=self.losstype, desc='Invalid losstype.') - - chk.summarize() - return chk - - def _get_check(self, f, verbose, level, checktype): - if checktype is not None: - return checktype(self, f=f, verbose=verbose, level=level) - else: - return check(self, f=f, verbose=verbose, level=level) - - def _set_attributes_from_node_data(self): - """ - Populates the Mnw object attributes with values from node_data table. - """ - names = Mnw.get_item2_names(node_data=self.node_data) - for n in names: - # assign by node variables as lists if they are being included - if n in self.by_node_variables: # and len(np.unique(self.node_data[n])) > 1: - self.__dict__[n] = list(self.node_data[n]) - else: - self.__dict__[n] = self.node_data[n][0] - - def _write_2(self, f_mnw, float_format=' {:15.7E}', indent=12): - """ - Write out dataset 2 for MNW. - - Parameters - ---------- - f_mnw : package file handle - file handle for MNW2 input file - float_format : str - python format statement for floats (default is ' {:15.7E}'). - indent : int - number of spaces to indent line (default is 12). - - Returns - ------- - None - - """ - # enforce sorting of node data - self.node_data = Mnw.sort_node_data(self.node_data) - - # update object attributes with values from node_data - self._set_attributes_from_node_data() - - indent = ' ' * indent - # dataset 2a - fmt = '{} {:.0f}\n' - f_mnw.write(fmt.format(self.wellid, self.nnodes)) - # dataset 2b - fmt = indent + '{} {:.0f} {:.0f} {:.0f} {:.0f}\n' - f_mnw.write(fmt.format(self.losstype, - self.pumploc, - self.qlimit, - self.ppflag, - self.pumpcap)) - - # dataset 2c - def _assign_by_node_var(var): - """Assign negative number if variable is entered by node.""" - if len(np.unique(var)) > 1: - return -1 - return var[0] - - if self.losstype.lower() != 'none': - if self.losstype.lower() != 'specifycwc': - fmt = indent + float_format + ' ' - f_mnw.write(fmt.format(_assign_by_node_var(self.rw))) - if self.losstype.lower() == 'skin': - fmt = '{0} {0}'.format(float_format) - f_mnw.write(fmt.format(_assign_by_node_var(self.rskin), - _assign_by_node_var(self.kskin))) - elif self.losstype.lower() == 'general': - fmt = '{0} {0} {0}'.format(float_format) - f_mnw.write(fmt.format(_assign_by_node_var(self.B), - _assign_by_node_var(self.C), - _assign_by_node_var(self.P))) - else: - fmt = indent + float_format - f_mnw.write(fmt.format(_assign_by_node_var(self.cwc))) - f_mnw.write('\n') - # dataset 2d - if self.nnodes > 0: - def _getloc(n): - """Output for dataset 2d1.""" - return indent + '{:.0f} {:.0f} {:.0f}'.format(self.k[n] + 1, - self.i[n] + 1, - self.j[n] + 1) - elif self.nnodes < 0: - def _getloc(n): - """Output for dataset 2d2.""" - fmt = indent + '{0} {0} '.format( - float_format) + '{:.0f} {:.0f}' - return fmt.format(self.node_data.ztop[n], - self.node_data.zbotm[n], - self.node_data.i[n] + 1, - self.node_data.j[n] + 1) - for n in range(np.abs(self.nnodes)): - f_mnw.write(_getloc(n)) - for var in ['rw', 'rskin', 'kskin', 'B', 'C', 'P', 'cwc', 'pp']: - val = self.__dict__[var] - if val is None: - continue - # only write variables by node if they are unique lists > length 1 - if len(np.unique(val)) > 1: - # if isinstance(val, list) or val < 0: - fmt = ' ' + float_format - f_mnw.write(fmt.format(self.node_data[var][n])) - f_mnw.write('\n') - # dataset 2e - if self.pumploc != 0: - if self.pumploc > 0: - f_mnw.write( - indent + '{:.0f} {:.0f} {:.0f}\n'.format(self.pumplay, - self.pumprow, - self.pumpcol)) - elif self.pumploc < 0: - fmt = indent + '{}\n'.format(float_format) - f_mnw.write(fmt.format(self.zpump)) - # dataset 2f - if self.qlimit > 0: - fmt = indent + '{} '.format(float_format) + '{:.0f}' - f_mnw.write(fmt.format(self.hlim, self.qcut)) - if self.qcut != 0: - fmt = ' {0} {0}'.format(float_format) - f_mnw.write(fmt.format(self.qfrcmn, self.qfrcmx)) - f_mnw.write('\n') - # dataset 2g - if self.pumpcap > 0: - fmt = indent + '{0} {0} {0} {0}\n'.format(float_format) - f_mnw.write( - fmt.format(self.hlift, self.liftq0, self.liftqmax, self.hwtol)) - # dataset 2h - if self.pumpcap > 0: - fmt = indent + '{0} {0}\n'.format(float_format) - f_mnw.write(fmt.format(self.liftn, self.qn)) - - -class ModflowMnw2(Package): - """ - Multi-Node Well 2 Package Class - - Parameters - ---------- - model : model object - The model object (of type :class:'flopy.modflow.mf.Modflow') to which - this package will be added. - mnwmax : int - The absolute value of MNWMAX is the maximum number of multi-node wells - (MNW) to be simulated. If MNWMAX is a negative number, NODTOT is read. - nodtot : int - Maximum number of nodes. - The code automatically estimates the maximum number of nodes (NODTOT) - as required for allocation of arrays. However, if a large number of - horizontal wells are being simulated, or possibly for other reasons, - this default estimate proves to be inadequate, a new input option has - been added to allow the user to directly specify a value for NODTOT. - If this is a desired option, then it can be implemented by specifying - a negative value for "MNWMAX"--the first value listed in Record 1 - (Line 1) of the MNW2 input data file. If this is done, then the code - will assume that the very next value on that line will be the desired - value of "NODTOT". The model will then reset "MNWMAX" to its absolute - value. The value of "ipakcb" will become the third value on that - line, etc. - ipakcb : int - is a flag and a unit number: - if ipakcb > 0, then it is the unit number to which MNW cell-by-cell - flow terms will be recorded whenever cell-by-cell budget data are - written to a file (as determined by the outputcontrol options of - MODFLOW). - if ipakcb = 0, then MNW cell-by-cell flow terms will not be printed - or recorded. - if ipakcb < 0, then well injection or withdrawal rates and water - levels in the well and its multiple cells will be printed in - the main MODFLOW listing (output) file whenever cell-by-cell - budget data are written to a file (as determined by the output - control options of MODFLOW). - mnwprnt : integer - Flag controlling the level of detail of information about multi-node - wells to be written to the main MODFLOW listing (output) file. - If MNWPRNT = 0, then only basic well information will be printed in - the main MODFLOW output file; increasing the value of MNWPRNT yields - more information, up to a maximum level of detail corresponding - with MNWPRNT = 2. (default is 0) - aux : list of strings - (listed as "OPTION" in MNW2 input instructions) - is an optional list of character values in the style of "AUXILIARY abc" - or "AUX abc" where "abc" is the name of an auxiliary parameter to be - read for each multi-node well as part of dataset 4a. Up to 20 - parameters can be specified, each of which must be preceded by - "AUXILIARY" or "AUX." These parameters will not be used by the MNW2 - Package, but they will be available for use by other packages. - (default is None) - node_data : numpy record array - master table describing multi-node wells in package. Same format as - node_data tables for each Mnw object. See Mnw class documentation for - more information. - mnw : list or dict of Mnw objects - Can be supplied instead of node_data and stress_period_data tables - (in which case the tables are constructed from the Mnw objects). - Otherwise the a dict of Mnw objects (keyed by wellid) is constructed - from the tables. - stress_period_data : dict of numpy record arrays - master dictionary of record arrays (keyed by stress period) containing - transient input for multi-node wells. Format is the same as stress - period data for individual Mnw objects, except the 'per' column is - replaced by 'wellid' (containing wellid for each MNW). See Mnw class - documentation for more information. - itmp : list of ints - is an integer value for reusing or reading multi-node well data; it - can change each stress period. ITMP must be >= 0 for the first stress - period of a simulation. - if ITMP > 0, then ITMP is the total number of active multi-node wells - simulated during the stress period, and only wells listed in - dataset 4a will be active during the stress period. Characteristics - of each well are defined in datasets 2 and 4. - if ITMP = 0, then no multi-node wells are active for the stress period - and the following dataset is skipped. - if ITMP < 0, then the same number of wells and well information will - be reused from the previous stress period and dataset 4 is skipped. - extension : string - Filename extension (default is 'mnw2') - unitnumber : int - File unit number (default is None). - filenames : str or list of str - Filenames to use for the package and the output files. If - filenames=None the package name will be created using the model name - and package extension and the cbc output name will be created using - the model name and .cbc extension (for example, modflowtest.cbc), - if ipakcbc is a number greater than zero. If a single string is passed - the package will be set to the string and cbc output names will be - created using the model name and .cbc extension, if ipakcbc is a - number greater than zero. To define the names for all package files - (input and output) the length of the list of strings should be 2. - Default is None. - gwt : boolean - Flag indicating whether GW transport process is active - - Attributes - ---------- - - Methods - ------- - - See Also - -------- - - Notes - ----- - - Examples - -------- - - >>> import flopy - >>> ml = flopy.modflow.Modflow() - >>> mnw2 = flopy.modflow.ModflowMnw2(ml, ...) - - """ - - def __init__(self, model, mnwmax=0, nodtot=None, ipakcb=0, mnwprnt=0, - aux=[], - node_data=None, mnw=None, stress_period_data=None, itmp=[], - extension='mnw2', unitnumber=None, filenames=None, - gwt=False): - """ - Package constructor - """ - # set default unit number of one is not specified - if unitnumber is None: - unitnumber = ModflowMnw2.defaultunit() - - # set filenames - if filenames is None: - filenames = [None, None] - elif isinstance(filenames, str): - filenames = [filenames, None] - elif isinstance(filenames, list): - if len(filenames) < 2: - filenames.append(None) - - # update external file information with cbc output, if necessary - if ipakcb is not None: - fname = filenames[1] - model.add_output_file(ipakcb, fname=fname, - package=ModflowMnw2.ftype()) - else: - ipakcb = 0 - - # Fill namefile items - name = [ModflowMnw2.ftype()] - units = [unitnumber] - extra = [''] - - # set package name - fname = [filenames[0]] - - # Call ancestor's init to set self.parent, extension, name and unit number - Package.__init__(self, model, extension=extension, name=name, - unit_number=units, extra=extra, filenames=fname) - - self.url = 'mnw2.htm' - self.nper = self.parent.nrow_ncol_nlay_nper[-1] - self.nper = 1 if self.nper == 0 else self.nper # otherwise iterations from 0, nper won't run - self.structured = self.parent.structured - - # Dataset 0 - self.heading = '# {} package for '.format(self.name[0]) + \ - ' {}, '.format(model.version_types[model.version]) + \ - 'generated by Flopy.' - # Dataset 1 - # maximum number of multi-node wells to be simulated - self.mnwmax = int(mnwmax) - self.nodtot = nodtot # user-specified maximum number of nodes - self.ipakcb = ipakcb - self.mnwprnt = int(mnwprnt) # -verbosity flag - self.aux = aux # -list of optional auxiliary parameters - - # Datasets 2-4 are contained in node_data and stress_period_data tables - # and/or in Mnw objects - self.node_data = self.get_empty_node_data(0, aux_names=aux) - - if node_data is not None: - self.node_data = self.get_empty_node_data(len(node_data), - aux_names=aux) - names = [n for n in node_data.dtype.names if - n in self.node_data.dtype.names] - for n in names: - self.node_data[n] = node_data[ - n] # recarray of Mnw properties by node - self.nodtot = len(self.node_data) - self._sort_node_data() - # self.node_data.sort(order=['wellid', 'k']) - - # Python 3.5.0 produces a segmentation fault when trying to sort BR MNW wells - # self.node_data.sort(order='wellid', axis=0) - self.mnw = mnw # dict or list of Mnw objects - - self.stress_period_data = MfList(self, - {0: self.get_empty_stress_period_data( - 0, - aux_names=aux, - structured=self.structured)}, - dtype=self.get_default_spd_dtype( - structured=self.structured)) - if stress_period_data is not None: - for per, data in stress_period_data.items(): - spd = ModflowMnw2.get_empty_stress_period_data(len(data), - aux_names=aux) - names = [n for n in data.dtype.names if n in spd.dtype.names] - for n in names: - spd[n] = data[n] - spd.sort(order='wellid') - self.stress_period_data[per] = spd - - self.itmp = itmp - self.gwt = gwt - - if mnw is None: - self.make_mnw_objects() - elif node_data is None and mnw is not None: - if isinstance(mnw, list): - self.mnw = {mnwobj.wellid: mnwobj for mnwobj in mnw} - elif isinstance(mnw, Mnw): - self.mnw = {mnw.wellid: mnw} - self.make_node_data(self.mnw) - self.make_stress_period_data(self.mnw) - - if stress_period_data is not None: - if 'k' not in stress_period_data[ - list(stress_period_data.keys())[0]].dtype.names: - self._add_kij_to_stress_period_data() - - self.parent.add_package(self) - - def _add_kij_to_stress_period_data(self): - for per in self.stress_period_data.data.keys(): - for d in ['k', 'i', 'j']: - self.stress_period_data[per][d] = [ - self.mnw[wellid].__dict__[d][0] - for wellid in self.stress_period_data[per].wellid] - - def _sort_node_data(self): - - node_data = self.node_data - node_data_list = [] - wells = sorted(np.unique(node_data['wellid']).tolist()) - for wellid in wells: - nd = node_data[node_data['wellid'] == wellid] - nd = Mnw.sort_node_data(nd) - node_data_list.append(nd) - node_data = np.concatenate(node_data_list, axis=0) - self.node_data = node_data.view(np.recarray) - - @staticmethod - def get_empty_node_data(maxnodes=0, aux_names=None, structured=True, - default_value=0): - """ - get an empty recarray that corresponds to dtype - - Parameters - ---------- - maxnodes : int - Total number of nodes to be simulated (default is 0) - aux_names : list - List of aux name strings (default is None) - structured : bool - Boolean indicating if a structured (True) or unstructured (False) - model (default is True). - default_value : float - Default value for float variables (default is 0). - - Returns - ------- - r : np.recarray - Recarray of default dtype of shape maxnode - """ - dtype = ModflowMnw2.get_default_node_dtype(structured=structured) - if aux_names is not None: - dtype = Package.add_to_dtype(dtype, aux_names, np.float32) - return create_empty_recarray(maxnodes, dtype, - default_value=default_value) - - @staticmethod - def get_default_node_dtype(structured=True): - """ - Get default dtype for node data - - Parameters - ---------- - structured : bool - Boolean indicating if a structured (True) or unstructured (False) - model (default is True). - - Returns - ------- - dtype : np.dtype - node data dtype - - """ - if structured: - return np.dtype([('k', np.int), - ('i', np.int), - ('j', np.int), - ('ztop', np.float32), - ('zbotm', np.float32), - ('wellid', np.object), - ('losstype', np.object), - ('pumploc', np.int), - ('qlimit', np.int), - ('ppflag', np.int), - ('pumpcap', np.int), - ('rw', np.float32), - ('rskin', np.float32), - ('kskin', np.float32), - ('B', np.float32), - ('C', np.float32), - ('P', np.float32), - ('cwc', np.float32), - ('pp', np.float32), - ('pumplay', np.int), - ('pumprow', np.int), - ('pumpcol', np.int), - ('zpump', np.float32), - ('hlim', np.float32), - ('qcut', np.int), - ('qfrcmn', np.float32), - ('qfrcmx', np.float32), - ('hlift', np.float32), - ('liftq0', np.float32), - ('liftqmax', np.float32), - ('hwtol', np.float32), - ('liftn', np.float32), - ('qn', np.float32)]) - else: - msg = 'get_default_node_dtype: unstructured model not supported' - raise NotImplementedError(msg) - - @staticmethod - def get_empty_stress_period_data(itmp=0, aux_names=None, structured=True, - default_value=0): - """ - Get an empty stress period data recarray - - Parameters - ---------- - itmp : int - Number of entries in this stress period (default is 0). - aux_names : list - List of aux names (default is None). - structured : bool - Boolean indicating if a structured (True) or unstructured (False) - model (default is True). - default_value : float - Default value for float variables (default is 0). - - Returns - ------- - r : np.recarray - Recarray of default dtype of shape itmp - - """ - dtype = ModflowMnw2.get_default_spd_dtype(structured=structured) - if aux_names is not None: - dtype = Package.add_to_dtype(dtype, aux_names, np.float32) - return create_empty_recarray(itmp, dtype, default_value=default_value) - - @staticmethod - def get_default_spd_dtype(structured=True): - """ - Get default dtype for stress period data - - Parameters - ---------- - structured : bool - Boolean indicating if a structured (True) or unstructured (False) - model (default is True). - - Returns - ------- - dtype : np.dtype - node data dtype - - """ - if structured: - return np.dtype([('k', np.int), - ('i', np.int), - ('j', np.int), - ('wellid', np.object), - ('qdes', np.float32), - ('capmult', np.int), - ('cprime', np.float32), - ('hlim', np.float32), - ('qcut', np.int), - ('qfrcmn', np.float32), - ('qfrcmx', np.float32)]) - else: - msg = 'get_default_spd_dtype: unstructured model not supported' - raise NotImplementedError(msg) - - @staticmethod - def load(f, model, nper=None, gwt=False, nsol=1, ext_unit_dict=None): - """ - - Parameters - ---------- - f : filename or file handle - File to load. - model : model object - The model object (of type :class:`flopy.modflow.ModflowMnw2`) to - which this package will be added. - nper : int - Number of periods - gwt : bool - nsol : int - ext_unit_dict : dictionary, optional - If the arrays in the file are specified using EXTERNAL, - or older style array control records, then `f` should be a file - handle. In this case ext_unit_dict is required, which can be - constructed using the function - :class:`flopy.utils.mfreadnam.parsenamefile`. - - - Returns - ------- - - """ - - if model.verbose: - sys.stdout.write('loading mnw2 package file...\n') - - structured = model.structured - if nper is None: - nrow, ncol, nlay, nper = model.get_nrow_ncol_nlay_nper() - nper = 1 if nper == 0 else nper # otherwise iterations from 0, nper won't run - - openfile = not hasattr(f, 'read') - if openfile: - filename = f - f = open(filename, 'r') - - # dataset 0 (header) - while True: - line = get_next_line(f) - if line[0] != '#': - break - # dataset 1 - mnwmax, nodtot, ipakcb, mnwprint, option = _parse_1(line) - # dataset 2 - node_data = ModflowMnw2.get_empty_node_data(0) - mnw = {} - for i in range(mnwmax): - # create a Mnw object by parsing dataset 2 - mnwobj = _parse_2(f) - # populate stress period data table for each well object - # this is filled below under dataset 4 - mnwobj.stress_period_data = Mnw.get_empty_stress_period_data(nper, - aux_names=option) - mnw[mnwobj.wellid] = mnwobj - # master table with all node data - node_data = np.append(node_data, mnwobj.node_data).view( - np.recarray) - - stress_period_data = {} # stress period data table for package (flopy convention) - itmp = [] - for per in range(0, nper): - # dataset 3 - itmp_per = int(line_parse(get_next_line(f))[0]) - # dataset4 - # dict might be better here to only load submitted values - if itmp_per > 0: - current_4 = ModflowMnw2.get_empty_stress_period_data(itmp_per, - aux_names=option) - for i in range(itmp_per): - wellid, qdes, capmult, cprime, xyz = _parse_4a( - get_next_line(f), - mnw, - gwt=gwt) - hlim, qcut, qfrcmn, qfrcmx = 0, 0, 0, 0 - if mnw[wellid].qlimit < 0: - hlim, qcut, qfrcmn, qfrcmx = _parse_4b( - get_next_line(f)) - # update package stress period data table - ndw = node_data[node_data.wellid == wellid] - kij = [ndw.k[0], ndw.i[0], ndw.j[0]] - current_4[i] = tuple(kij + [wellid, qdes, capmult, cprime, - hlim, qcut, qfrcmn, - qfrcmx] + xyz) - # update well stress period data table - mnw[wellid].stress_period_data[per] = tuple( - kij + [per] + [qdes, capmult, cprime, - hlim, qcut, qfrcmn, qfrcmx] + xyz) - stress_period_data[per] = current_4 - elif itmp_per == 0: # no active mnws this stress period - pass - else: - # copy pumping rates from previous stress period - mnw[wellid].stress_period_data[per] = \ - mnw[wellid].stress_period_data[per - 1] - itmp.append(itmp_per) - - if openfile: - f.close() - - # determine specified unit number - unitnumber = None - filenames = [None, None] - if ext_unit_dict is not None: - for key, value in ext_unit_dict.items(): - if value.filetype == ModflowMnw2.ftype(): - unitnumber = key - filenames[0] = os.path.basename(value.filename) - - if ipakcb > 0: - if key == ipakcb: - filenames[1] = os.path.basename(value.filename) - model.add_pop_key_list(key) - - return ModflowMnw2(model, mnwmax=mnwmax, nodtot=nodtot, ipakcb=ipakcb, - mnwprnt=mnwprint, aux=option, - node_data=node_data, mnw=mnw, - stress_period_data=stress_period_data, itmp=itmp, - unitnumber=unitnumber, filenames=filenames) - - def check(self, f=None, verbose=True, level=1, checktype=None): - """ - Check mnw2 package data for common errors. - - Parameters - ---------- - f : str or file handle - String defining file name or file handle for summary file - of check method output. If a string is passed a file handle - is created. If f is None, check method does not write - results to a summary file. (default is None) - verbose : bool - Boolean flag used to determine if check method results are - written to the screen - level : int - Check method analysis level. If level=0, summary checks are - performed. If level=1, full checks are performed. - - Returns - ------- - chk : check object - - Examples - -------- - - >>> import flopy - >>> m = flopy.modflow.Modflow.load('model.nam') - >>> m.mnw2.check() - """ - chk = self._get_check(f, verbose, level, checktype) - - # itmp - if self.itmp[0] < 0: - chk._add_to_summary(type='Error', value=self.itmp[0], - desc='Itmp must be >= 0 for first stress period.') - invalid_itmp = np.array(self.itmp) > self.mnwmax - if np.any(invalid_itmp): - for v in np.array(self.itmp)[invalid_itmp]: - chk._add_to_summary(type='Error', value=v, - desc='Itmp value greater than MNWMAX') - - chk.summarize() - return chk - - def get_allnode_data(self): - """ - Get a version of the node_data array that has all MNW2 nodes listed - explicitly. For example, MNWs with open intervals encompassing - multiple layers would have a row entry for each layer. Ztop and zbotm - values indicate the top and bottom elevations of the node (these are - the same as the layer top and bottom if the node fully penetrates - that layer). - - Returns - ------- - allnode_data : np.recarray - Numpy record array of same form as node_data, except each row - represents only one node. - - """ - from numpy.lib.recfunctions import stack_arrays - - nd = [] - for i in range(len(self.node_data)): - r = self.node_data[i] - if r['ztop'] - r['zbotm'] > 0: - startK = get_layer(self.parent.dis, r['i'], r['j'], r['ztop']) - endK = get_layer(self.parent.dis, r['i'], r['j'], r['zbotm']) - if startK == endK: - r = r.copy() - r['k'] = startK - nd.append(r) - else: - for k in np.arange(startK, endK + 1): - rk = r.copy() - rk['k'] = k - if k > startK: - loc = (k - 1, rk['i'], rk['j']) - rk['ztop'] = self.parent.dis.botm[loc] - if k < endK: - loc = (k, rk['i'], rk['j']) - rk['zbotm'] = self.parent.dis.botm[loc] - nd.append(rk) - else: - nd.append(r) - return stack_arrays(nd, usemask=False).view(np.recarray) - - def make_mnw_objects(self): - """ - Make a Mnw object - - Returns - ------- - None - - """ - node_data = self.node_data - stress_period_data = self.stress_period_data - self.mnw = {} - mnws = np.unique(node_data['wellid']) - for wellid in mnws: - nd = node_data[node_data.wellid == wellid] - nnodes = Mnw.get_nnodes(nd) - # if tops and bottoms are specified, flip nnodes - # maxtop = np.max(nd.ztop) - # minbot = np.min(nd.zbotm) - # if maxtop - minbot > 0 and nnodes > 0: - # nnodes *= -1 - # reshape stress period data to well - mnwspd = Mnw.get_empty_stress_period_data(self.nper, - aux_names=self.aux) - for per, itmp in enumerate(self.itmp): - inds = stress_period_data[per].wellid == wellid - if itmp > 0 and np.any(inds): - names = [n for n in - stress_period_data[per][inds].dtype.names if - n in mnwspd.dtype.names] - mnwspd[per]['per'] = per - for n in names: - mnwspd[per][n] = stress_period_data[per][inds][n][0] - elif itmp == 0: - continue - elif itmp < 0: - mnwspd[per] = mnwspd[per - 1] - - self.mnw[wellid] = Mnw(wellid, - nnodes=nnodes, nper=self.nper, - node_data=nd, stress_period_data=mnwspd, - mnwpackage=self) - - def make_node_data(self, mnwobjs): - """ - Make node_data recarray from Mnw objects - - Parameters - ---------- - mnwobjs : Mnw object - - Returns - ------- - None - - """ - if isinstance(mnwobjs, dict): - mnwobjs = list(mnwobjs.values()) - elif isinstance(mnwobjs, Mnw): - mnwobjs = [mnwobjs] - - mnwobj_node_data = [] - for mnwobj in mnwobjs: - for rec in mnwobj.node_data: - mnwobj_node_data.append(rec) - node_data = ModflowMnw2.get_empty_node_data(len(mnwobj_node_data)) - - for ix, node in enumerate(mnwobj_node_data): - for jx, name in enumerate(node_data.dtype.names): - node_data[name][ix] = node[jx] - - self.node_data = node_data - - def make_stress_period_data(self, mnwobjs): - """ - Make stress_period_data recarray from Mnw objects - - Parameters - ---------- - mnwobjs : Mnw object - - Returns - ------- - None - - """ - if isinstance(mnwobjs, dict): - mnwobjs = list(mnwobjs.values()) - elif isinstance(mnwobjs, Mnw): - mnwobjs = [mnwobjs] - stress_period_data = {} - for per, itmp in enumerate(self.itmp): - if itmp > 0: - stress_period_data[ - per] = ModflowMnw2.get_empty_stress_period_data(itmp, - aux_names=self.aux) - i = 0 - for mnw in mnwobjs: - if per in mnw.stress_period_data.per: - i += 1 - if i > itmp: - raise ItmpError(itmp, i) - names = [n for n in mnw.stress_period_data.dtype.names - if n in stress_period_data[per].dtype.names] - stress_period_data[per]['wellid'][i - 1] = mnw.wellid - for n in names: - stress_period_data[per][n][i - 1] = \ - mnw.stress_period_data[n][per] - stress_period_data[per].sort(order='wellid') - if i < itmp: - raise ItmpError(itmp, i) - elif itmp == 0: - continue - else: # itmp < 0 - stress_period_data[per] = stress_period_data[per - 1] - self.stress_period_data = MfList(self, - stress_period_data, - dtype=stress_period_data[0].dtype) - - def export(self, f, **kwargs): - """ - Export MNW2 data - - Parameters - ---------- - f : file - kwargs - - Returns - ------- - e : export object - - - """ - # A better strategy would be to build a single 4-D MfList - # (currently the stress period data array has everything in layer 0) - self.node_data_MfList = MfList(self, self.get_allnode_data(), - dtype=self.node_data.dtype) - # make some modifications to ensure proper export - # avoid duplicate entries for qfrc - wellids = np.unique(self.node_data.wellid) - todrop = ['hlim', 'qcut', 'qfrcmn', 'qfrcmx'] - # move duplicate fields from node_data to stress_period_data - for wellid in wellids: - wellnd = self.node_data.wellid == wellid - if np.max(self.node_data.qlimit[wellnd]) > 0: - for per in self.stress_period_data.data.keys(): - for col in todrop: - inds = self.stress_period_data[per].wellid == wellid - self.stress_period_data[per][col][inds] = \ - self.node_data[wellnd][col] - self.node_data_MfList = self.node_data_MfList.drop(todrop) - ''' - todrop = {'qfrcmx', 'qfrcmn'} - names = list(set(self.stress_period_data.dtype.names).difference(todrop)) - dtype = np.dtype([(k, d) for k, d in self.stress_period_data.dtype.descr if k not in todrop]) - spd = {} - for k, v in self.stress_period_data.data.items(): - newarr = np.array(np.zeros_like(self.stress_period_data[k][names]), - dtype=dtype).view(np.recarray) - for n in dtype.names: - newarr[n] = self.stress_period_data[k][n] - spd[k] = newarr - self.stress_period_data = MfList(self, spd, dtype=dtype) - ''' - - return super(ModflowMnw2, self).export(f, **kwargs) - - def _write_1(self, f_mnw): - """ - - Parameters - ---------- - f_mnw : file object - File object for MNW2 input file - - - Returns - ------- - None - - """ - f_mnw.write('{:.0f} '.format(self.mnwmax)) - if self.mnwmax < 0: - f_mnw.write('{:.0f} '.format(self.nodtot)) - f_mnw.write('{:.0f} {:.0f}'.format(self.ipakcb, self.mnwprnt)) - if len(self.aux) > 0: - for abc in self.aux: - f_mnw.write(' aux {}'.format(abc)) - f_mnw.write('\n') - - def write_file(self, filename=None, float_format=' {:15.7E}', - use_tables=True): - """ - Write the package file. - - Parameters - ---------- - filename : str - float_format - use_tables - - Returns - ------- - None - - """ - - if use_tables: - # update mnw objects from node and stress_period_data tables - self.make_mnw_objects() - - if filename is not None: - self.fn_path = filename - - f_mnw = open(self.fn_path, 'w') - - # dataset 0 (header) - f_mnw.write('{0}\n'.format(self.heading)) - - # dataset 1 - self._write_1(f_mnw) - - # dataset 2 - # need a method that assigns attributes from table to objects! - # call make_mnw_objects?? (table is definitive then) - if use_tables: - mnws = np.unique( - self.node_data.wellid).tolist() # preserve any order - else: - mnws = self.mnw.values() - for k in mnws: - self.mnw[k]._write_2(f_mnw, float_format=float_format) - - # dataset 3 - for per in range(self.nper): - f_mnw.write('{:.0f} Stress Period {:.0f}\n'.format(self.itmp[per], - per + 1)) - if self.itmp[per] > 0: - - for n in range(self.itmp[per]): - # dataset 4 - wellid = self.stress_period_data[per].wellid[n] - qdes = self.stress_period_data[per].qdes[n] - fmt = '{} ' + float_format - f_mnw.write(fmt.format(wellid, qdes)) - if self.mnw[wellid].pumpcap > 0: - fmt = ' ' + float_format - f_mnw.write(fmt.format( - *self.stress_period_data[per].capmult[n])) - if qdes > 0 and self.gwt: - f_mnw.write(fmt.format( - *self.stress_period_data[per].cprime[n])) - if len(self.aux) > 0: - for var in self.aux: - fmt = ' ' + float_format - f_mnw.write(fmt.format( - *self.stress_period_data[per][var][n])) - f_mnw.write('\n') - if self.mnw[wellid].qlimit < 0: - hlim, qcut = \ - self.stress_period_data[per][['hlim', 'qcut']][n] - fmt = float_format + ' {:.0f}' - f_mnw.write(fmt.format(hlim, qcut)) - if qcut != 0: - fmt = ' {} {}'.format(float_format) - f_mnw.write(fmt.format(*self.stress_period_data[ - per][['qfrcmn', 'qfrcmx']][n])) - f_mnw.write('\n') - f_mnw.close() - - @staticmethod - def ftype(): - return 'MNW2' - - @staticmethod - def defaultunit(): - return 34 - - -def _parse_1(line): - """ - - Parameters - ---------- - line - - Returns - ------- - - """ - line = line_parse(line) - mnwmax = pop_item(line, int) - nodtot = None - if mnwmax < 0: - nodtot = pop_item(line, int) - ipakcb = pop_item(line, int) - mnwprint = pop_item(line, int) - option = [] # aux names - if len(line) > 0: - option += [line[i] for i in np.arange(1, len(line)) if - 'aux' in line[i - 1].lower()] - return mnwmax, nodtot, ipakcb, mnwprint, option - - -def _parse_2(f): - """ - - Parameters - ---------- - f - - Returns - ------- - - """ - # dataset 2a - line = line_parse(get_next_line(f)) - if len(line) > 2: - warnings.warn('MNW2: {}\n'.format(line) + - 'Extra items in Dataset 2a!' + - 'Check for WELLIDs with space ' + - 'but not enclosed in quotes.') - wellid = pop_item(line).lower() - nnodes = pop_item(line, int) - # dataset 2b - line = line_parse(get_next_line(f)) - losstype = pop_item(line) - pumploc = pop_item(line, int) - qlimit = pop_item(line, int) - ppflag = pop_item(line, int) - pumpcap = pop_item(line, int) - - # dataset 2c - names = ['ztop', 'zbotm', 'k', 'i', 'j', 'rw', 'rskin', 'kskin', 'B', 'C', - 'P', 'cwc', 'pp'] - d2d = {n: [] for n in names} # dataset 2d; dict of lists for each variable - # set default values of 0 for all 2c items - d2dw = dict( - zip(['rw', 'rskin', 'kskin', 'B', 'C', 'P', 'cwc'], [0] * 7)) - if losstype.lower() != 'none': - # update d2dw items - d2dw.update( - _parse_2c(get_next_line(f), losstype)) # dict of values for well - for k, v in d2dw.items(): - if v > 0: - d2d[k].append(v) - # dataset 2d - pp = 1 # partial penetration flag - for i in range(np.abs(nnodes)): - line = line_parse(get_next_line(f)) - if nnodes > 0: - d2d['k'].append(pop_item(line, int) - 1) - d2d['i'].append(pop_item(line, int) - 1) - d2d['j'].append(pop_item(line, int) - 1) - elif nnodes < 0: - d2d['ztop'].append(pop_item(line, float)) - d2d['zbotm'].append(pop_item(line, float)) - d2d['i'].append(pop_item(line, int) - 1) - d2d['j'].append(pop_item(line, int) - 1) - d2di = _parse_2c(line, losstype, rw=d2dw['rw'], rskin=d2dw['rskin'], - kskin=d2dw['kskin'], - B=d2dw['B'], C=d2dw['C'], P=d2dw['P'], - cwc=d2dw['cwc']) - # append only the returned items - for k, v in d2di.items(): - d2d[k].append(v) - if ppflag > 0 and nnodes > 0: - d2d['pp'].append(pop_item(line, float)) - - # dataset 2e - pumplay = None - pumprow = None - pumpcol = None - zpump = None - if pumploc != 0: - line = line_parse(get_next_line(f)) - if pumploc > 0: - pumplay = pop_item(line, int) - pumprow = pop_item(line, int) - pumpcol = pop_item(line, int) - else: - zpump = pop_item(line, float) - # dataset 2f - hlim = None - qcut = None - qfrcmx = None - qfrcmn = None - if qlimit > 0: - # Only specify dataset 2f if the value of Qlimit in dataset 2b is positive. - # Do not enter fractions as percentages. - line = line_parse(get_next_line(f)) - hlim = pop_item(line, float) - qcut = pop_item(line, int) - if qcut != 0: - qfrcmn = pop_item(line, float) - qfrcmx = pop_item(line, float) - # dataset 2g - hlift = None - liftq0 = None - liftqmax = None - hwtol = None - if pumpcap > 0: - # The number of additional data points on the curve (and lines in dataset 2h) - # must correspond to the value of PUMPCAP for this well (where PUMPCAP <= 25). - line = line_parse(get_next_line(f)) - hlift = pop_item(line, float) - liftq0 = pop_item(line, float) - liftqmax = pop_item(line, float) - hwtol = pop_item(line, float) - # dataset 2h - liftn = None - qn = None - if pumpcap > 0: - # Enter data in order of decreasing lift - # (that is, start with the point corresponding - # to the highest value of total dynamic head) and increasing discharge. - # The discharge value for the last data point in the sequence - # must be less than the value of LIFTqmax. - for i in range(len(pumpcap)): - line = line_parse(get_next_line(f)) - liftn = pop_item(line, float) - qn = pop_item(line, float) - - return Mnw(wellid, - nnodes=nnodes, - losstype=losstype, pumploc=pumploc, qlimit=qlimit, - ppflag=ppflag, pumpcap=pumpcap, - k=d2d['k'], i=d2d['i'], j=d2d['j'], ztop=d2d['ztop'], - zbotm=d2d['zbotm'], - rw=d2d['rw'], rskin=d2d['rskin'], kskin=d2d['kskin'], - B=d2d['B'], C=d2d['C'], P=d2d['P'], cwc=d2d['cwc'], - pp=d2d['pp'], - pumplay=pumplay, pumprow=pumprow, pumpcol=pumpcol, zpump=zpump, - hlim=hlim, qcut=qcut, qfrcmn=qfrcmn, qfrcmx=qfrcmx, - hlift=hlift, liftq0=liftq0, liftqmax=liftqmax, hwtol=hwtol, - liftn=liftn, qn=qn) - - -def _parse_2c(line, losstype, rw=-1, rskin=-1, kskin=-1, B=-1, C=-1, P=-1, - cwc=-1): - """ - - Parameters - ---------- - line - losstype - rw - rskin - kskin - B - C - P - cwc - - Returns - ------- - - """ - if not isinstance(line, list): - line = line_parse(line) - nd = {} # dict of dataset 2c/2d items - if losstype.lower() != 'specifycwc': - if rw < 0: - nd['rw'] = pop_item(line, float) - if losstype.lower() == 'skin': - if rskin < 0: - nd['rskin'] = pop_item(line, float) - if kskin < 0: - nd['kskin'] = pop_item(line, float) - elif losstype.lower() == 'general': - if B < 0: - nd['B'] = pop_item(line, float) - if C < 0: - nd['C'] = pop_item(line, float) - if P < 0: - nd['P'] = pop_item(line, float) - else: - if cwc < 0: - nd['cwc'] = pop_item(line, float) - return nd - - -def _parse_4a(line, mnw, gwt=False): - """ - - Parameters - ---------- - line - mnw - gwt - - Returns - ------- - - """ - capmult = 0 - cprime = 0 - line = line_parse(line) - wellid = pop_item(line).lower() - pumpcap = mnw[wellid].pumpcap - qdes = pop_item(line, float) - if pumpcap > 0: - capmult = pop_item(line, int) - if qdes > 0 and gwt: - cprime = pop_item(line, float) - xyz = line - return wellid, qdes, capmult, cprime, xyz - - -def _parse_4b(line): - """ - - Parameters - ---------- - line - - Returns - ------- - - """ - qfrcmn = 0 - qfrcmx = 0 - line = line_parse(line) - hlim = pop_item(line, float) - qcut = pop_item(line, int) - if qcut != 0: - qfrcmn = pop_item(line, float) - qfrcmx = pop_item(line, float) - return hlim, qcut, qfrcmn, qfrcmx - - -class ItmpError(Exception): - def __init__(self, itmp, nactivewells): - self.itmp = itmp - self.nactivewells = nactivewells - - def __str__(self): - s = '\n\nItmp value of {} '.format(self.itmp) + \ - 'is positive but does not equal the number of active wells ' + \ - 'specified ({}). '.format(self.nactivewells) + \ - 'See MNW2 package documentation for details.' - return s +import os +import sys +import warnings + +import numpy as np +from .mfdis import get_layer +from ..utils import check +from ..utils.flopy_io import line_parse, pop_item, get_next_line +from ..utils import MfList +from ..utils.recarray_utils import create_empty_recarray + +from ..pakbase import Package + + +class Mnw(object): + """ + Multi-Node Well object class + + Parameters + ---------- + wellid : str or int + is the name of the well. This is a unique alphanumeric identification + label for each well. The text string is limited to 20 alphanumeric + characters. If the name of the well includes spaces, then enclose the + name in quotes. Flopy converts wellid string to lower case. + nnodes : int + is the number of cells (nodes) associated with this well. + NNODES normally is > 0, but for the case of a vertical borehole, + setting NNODES < 0 will allow the user to specify the elevations of + the tops and bottoms of well screens or open intervals (rather than + grid layer numbers), and the absolute value of NNODES equals the + number of open intervals (or well screens) to be specified in dataset + 2d. If this option is used, then the model will compute the layers in + which the open intervals occur, the lengths of the open intervals, + and the relative vertical position of the open interval within a model + layer (for example, see figure 14 and related discussion). + losstype : str + is a character flag to determine the user-specified model for well loss + (equation 2). Available options (that is, place one of the following + approved words in this field) are: + NONE there are no well corrections and the head in the well is + assumed to equal the head in the cell. This option (hWELL = hn) + is only valid for a single-node well (NNODES = 1). (This is + equivalent to using the original WEL Package of MODFLOW, + but specifying the single-node well within the MNW2 Package + enables the use of constraints.) + THIEM this option allows for only the cell-to-well correction at the + well based on the Thiem (1906) equation; head in the well is + determined from equation 2 as (hWELL = hn + AQn), and the model + computes A on the basis of the user-specified well radius (Rw) + and previously defined values of cell transmissivity and grid + spacing. Coefficients B and C in equation 2 are automatically + set = 0.0. User must define Rw in dataset 2c or 2d. + SKIN this option allows for formation damage or skin corrections at + the well. hWELL = hn + AQn + BQn (from equation 2), where A is + determined by the model from the value of Rw, and B is + determined by the model from Rskin and Kskin. User must define + Rw, Rskin, and Kskin in dataset 2c or 2d. + GENERAL head loss is defined with coefficients A, B, and C and power + exponent P (hWELL = hn + AQn + BQn + CQnP). A is determined by + the model from the value of Rw. User must define Rw, B, C, and + P in dataset 2c or 2d. A value of P = 2.0 is suggested if no + other data are available (the model allows 1.0 <= P <= 3.5). + Entering a value of C = 0 will result in a "linear" model in + which the value of B is entered directly (rather than entering + properties of the skin, as with the SKIN option). + SPECIFYcwc the user specifies an effective conductance value + (equivalent to the combined effects of the A, B, and C + well-loss coefficients expressed in equation 15) between the + well and the cell representing the aquifer, CWC. User must + define CWC in dataset 2c or 2d. If there are multiple screens + within the grid cell or if partial penetration corrections are + to be made, then the effective value of CWC for the node may + be further adjusted automatically by MNW2. + pumploc : int + is an integer flag pertaining to the location along the borehole of + the pump intake (if any). If PUMPLOC = 0, then either there is no pump + or the intake location (or discharge point for an injection well) is + assumed to occur above the first active node associated with the multi- + node well (that is, the node closest to the land surface or to the + wellhead). If PUMPLOC > 0, then the cell in which the intake (or + outflow) is located will be specified in dataset 2e as a LAY-ROW-COL + grid location. For a vertical well only, specifying PUMPLOC < 0, will + enable the option to define the vertical position of the pump intake + (or outflow) as an elevation in dataset 2e (for the given spatial grid + location [ROW-COL] defined for this well in 2d). + qlimit : int + is an integer flag that indicates whether the water level (head) in + the well will be used to constrain the pumping rate. If Qlimit = 0, + then there are no constraints for this well. If Qlimit > 0, then + pumpage will be limited (constrained) by the water level in the well, + and relevant parameters are constant in time and defined below in + dataset 2f. If Qlimit < 0, then pumpage will be limited (constrained) + by the water level in the well, and relevant parameters can vary with + time and are defined for every stress period in dataset 4b. + ppflag : int + is an integer flag that determines whether the calculated head in the + well will be corrected for the effect of partial penetration of the + well screen in the cell. If PPFLAG = 0, then the head in the well will + not be adjusted for the effects of partial penetration. If PPFLAG > 0, + then the head in the well will be adjusted for the effects of partial + penetration if the section of well containing the well screen is + vertical (as indicated by identical row-column locations in the grid). + If NNODES < 0 (that is, the open intervals of the well are defined by + top and bottom elevations), then the model will automatically calculate + the fraction of penetration for each node and the relative vertical + position of the well screen. If NNODES > 0, then the fraction of + penetration for each node must be defined in dataset 2d (see below) + and the well screen will be assumed to be centered vertically within + the thickness of the cell (except if the well is located in the + uppermost model layer that is under unconfined conditions, in which + case the bottom of the well screen will be assumed to be aligned with + the bottom boundary of the cell and the assumed length of well screen + will be based on the initial head in that cell). + pumpcap : int + is an integer flag and value that determines whether the discharge of + a pumping (withdrawal) well (Q < 0.0) will be adjusted for changes in + the lift (or total dynamic head) with time. If PUMPCAP = 0, then the + discharge from the well will not be adjusted on the basis of changes + in lift. If PUMPCAP > 0 for a withdrawal well, then the discharge from + the well will be adjusted on the basis of the lift, as calculated from + the most recent water level in the well. In this case, data describing + the head-capacity relation for the pump must be listed in datasets 2g + and 2h, and the use of that relation can be switched on or off for + each stress period using a flag in dataset 4a. The number of entries + (lines) in dataset 2h corresponds to the value of PUMPCAP. If PUMPCAP + does not equal 0, it must be set to an integer value of between 1 and + 25, inclusive. + rw : float + radius of the well (losstype == 'THIEM', 'SKIN', or 'GENERAL') + rskin : float + radius to the outer limit of the skin (losstype == 'SKIN') + kskin : float + hydraulic conductivity of the skin + B : float + coefficient of the well-loss eqn. (eqn. 2 in MNW2 documentation) + (losstype == 'GENERAL') + C : float + coefficient of the well-loss eqn. (eqn. 2 in MNW2 documentation) + (losstype == 'GENERAL') + P : float + coefficient of the well-loss eqn. (eqn. 2 in MNW2 documentation) + (losstype == 'GENERAL') + cwc : float + cell-to-well conductance. + (losstype == 'SPECIFYcwc') + pp : float + fraction of partial penetration for the cell. Only specify if + PFLAG > 0 and NNODES > 0. + k : int + layer index of well (zero-based) + i : int + row index of well (zero-based) + j : int + column index of well (zero-based) + ztop : float + top elevation of open intervals of vertical well. + zbotm : float + bottom elevation of open intervals of vertical well. + node_data : numpy record array + table containing MNW data by node. A blank node_data template can be + created via the ModflowMnw2.get_empty_mnw_data() static method. + + Note: Variables in dataset 2d (e.g. rw) can be entered as a single + value for the entire well (above), or in node_data (or dataset 2d) by + node. Variables not in dataset 2d (such as pumplay) can be included + in node data for convenience (to allow construction of MNW2 package + from a table), but are only written to MNW2 as a single variable. + When writing non-dataset 2d variables to MNW2 input, the first value + for the well will be used. + + Other variables (e.g. hlim) can be entered here as + constant for all stress periods, or by stress period below in stress_period_data. + See MNW2 input instructions for more details. + + Columns are: + k : int + layer index of well (zero-based) + i : int + row index of well (zero-based) + j : int + column index of well (zero-based) + ztop : float + top elevation of open intervals of vertical well. + zbotm : float + bottom elevation of open intervals of vertical well. + wellid : str + losstype : str + pumploc : int + qlimit : int + ppflag : int + pumpcap : int + rw : float + rskin : float + kskin : float + B : float + C : float + P : float + cwc : float + pp : float + pumplay : int + pumprow : int + pumpcol : int + zpump : float + hlim : float + qcut : int + qfrcmn : float + qfrcmx : float + hlift : float + liftq0 : float + liftqmax : float + hwtol : float + liftn : float + qn : float + + stress_period_data : numpy record array + table containing MNW pumping data for all stress periods (dataset 4 in + the MNW2 input instructions). A blank stress_period_data template can + be created via the Mnw.get_empty_stress_period_data() static method. + Columns are: + per : int + stress period + qdes : float + is the actual (or maximum desired, if constraints are to be + applied) volumetric pumping rate (negative for withdrawal or + positive for injection) at the well (L3/T). Qdes should be + set to 0 for nonpumping wells. If constraints are applied, + then the calculated volumetric withdrawal or injection rate + may be adjusted to range from 0 to Qdes and is not allowed + to switch directions between withdrawal and injection + conditions during any stress period. When PUMPCAP > 0, in the + first stress period in which Qdes is specified with a negative + value, Qdes represents the maximum operating discharge for the + pump; in subsequent stress periods, any different negative + values of Qdes are ignored, although values are subject to + adjustment for CapMult. If Qdes >= 0.0, then pump-capacity + adjustments are not applied. + capmult : int + is a flag and multiplier for implementing head-capacity + relations during a given stress period. Only specify if + PUMPCAP > 0 for this well. If CapMult <= 0, then + head-capacity relations are ignored for this stress period. + If CapMult = 1.0, then head-capacity relations defined + in datasets 2g and 2h are used. If CapMult equals any other + positive value (for example, 0.6 or 1.1), then head-capacity + relations are used but adjusted and shifted by multiplying + the discharge value indicated by the head-capacity curve by + the value of CapMult. + cprime : float + is the concentration in the injected fluid. Only specify if + Qdes > 0 and GWT process is active. + hlim : float + qcut : int + qfrcmn : float + qfrcmx : float + Note: If auxiliary variables are also being used, additional columns + for these must be included. + pumplay : int + pumprow : int + pumpcol : int + PUMPLAY, PUMPROW, and PUMPCOL are the layer, row, and column numbers, + respectively, of the cell (node) in this multi-node well where the + pump intake (or outflow) is located. The location defined in dataset + 2e should correspond with one of the nodes listed in 2d for this + multi-node well. These variables are only read if PUMPLOC > 0 in 2b. + zpump : float + is the elevation of the pump intake (or discharge pipe location for an + injection well). Zpump is read only if PUMPLOC < 0; in this case, + the model assumes that the borehole is vertical and will compute the + layer of the grid in which the pump intake is located. + hlim : float + is the limiting water level (head) in the well, which is a minimum for + discharging wells and a maximum for injection wells. For example, in a + discharging well, when hWELL falls below hlim, the flow from the well + is constrained. + qcut : int + is an integer flag that indicates how pumping limits Qfrcmn and + Qfrcmx will be specified. If pumping limits are to be specified as a + rate (L3/T), then set QCUT > 0; if pumping limits are to be specified + as a fraction of the specified pumping rate (Qdes), then set QCUT < 0. + If there is not a minimum pumping rate below which the pump becomes + inactive, then set QCUT = 0. + qfrcmn : float + is the minimum pumping rate or fraction of original pumping rate + (a choice that depends on QCUT) that a well must exceed to remain + active during a stress period. The absolute value of Qfrcmn must be + less than the absolute value of Qfrcmx (defined next). Only specify + if QCUT != 0. + qfrcmx : float + is the minimum pumping rate or fraction of original pumping rate that + must be exceeded to reactivate a well that had been shut off based on + Qfrcmn during a stress period. The absolute value of Qfrcmx must be + greater than the absolute value of Qfrcmn. Only specify if QCUT != 0. + hlift : float + is the reference head (or elevation) corresponding to the discharge + point for the well. This is typically at or above the land surface, + and can be increased to account for additional head loss due to + friction in pipes. + liftq0 : float + is the value of lift (total dynamic head) that exceeds the capacity of + the pump. If the calculated lift equals or exceeds this value, then + the pump is shut off and discharge from the well ceases. + liftqmax : float + is the value of lift (total dynamic head) corresponding to the maximum + pumping (discharge) rate for the pump. If the calculated lift is less + than or equal to LIFTqmax, then the pump will operate at its design + capacity, assumed to equal the user-specified value of Qdes + (in dataset 4a). LIFTqmax will be associated with the value of Qdes in + the first stress period in which Qdes for the well is less than 0.0. + hwtol : float + is a minimum absolute value of change in the computed water level in + the well allowed between successive iterations; if the value of hWELL + changes from one iteration to the next by a value smaller than this + tolerance, then the value of discharge computed from the head capacity + curves will be locked for the remainder of that time step. It is + recommended that HWtol be set equal to a value approximately one or + two orders of magnitude larger than the value of HCLOSE, but if the + solution fails to converge, then this may have to be adjusted. + liftn : float + is a value of lift (total dynamic head) that corresponds to a known + value of discharge (Qn) for the given pump, specified as the second + value in this line. + qn : float + is the value of discharge corresponding to the height of lift + (total dynamic head) specified previously on this line. Sign + (positive or negative) is ignored. + mnwpackage : ModflowMnw2 instance + package that mnw is attached to + + Returns + ------- + None + + """ + by_node_variables = ['k', 'i', 'j', 'ztop', 'zbotm', 'rw', 'rskin', + 'kskin', 'B', 'C', 'P', 'cwc', 'pp'] + + def __init__(self, wellid, + nnodes=1, nper=1, + losstype="skin", pumploc=0, qlimit=0, ppflag=0, pumpcap=0, + rw=1, rskin=2, kskin=10, + B=None, C=0, P=2., cwc=None, pp=1, + k=0, i=0, j=0, ztop=0, zbotm=0, + node_data=None, stress_period_data=None, + pumplay=0, pumprow=0, pumpcol=0, zpump=None, + hlim=None, qcut=None, qfrcmn=None, qfrcmx=None, + hlift=None, liftq0=None, liftqmax=None, hwtol=None, + liftn=None, qn=None, mnwpackage=None): + """ + Class constructor + """ + + self.nper = nper + self.mnwpackage = mnwpackage # associated ModflowMnw2 instance + self.aux = None if mnwpackage is None else mnwpackage.aux + + # dataset 2a + if isinstance(wellid, str): + wellid = wellid.lower() + self.wellid = wellid + self.nnodes = nnodes + # dataset 2b + self.losstype = losstype.lower() + self.pumploc = pumploc + self.qlimit = qlimit + self.ppflag = ppflag + self.pumpcap = pumpcap + # dataset 2c (can be entered by node) + self.rw = rw + self.rskin = rskin + self.kskin = kskin + self.B = B + self.C = C + self.P = P + self.cwc = cwc + self.pp = pp + # dataset 2d (entered by node) + # indices should be lists (for iteration over nodes) + self.k = k + self.i = i + self.j = j + self.ztop = ztop + self.zbotm = zbotm + for v in self.by_node_variables: + if not isinstance(self.__dict__[v], list): + self.__dict__[v] = [self.__dict__[v]] + # dataset 2e + self.pumplay = pumplay + self.pumprow = pumprow + self.pumpcol = pumpcol + self.zpump = zpump + # dataset 2f + self.hlim = hlim + self.qcut = qcut + self.qfrcmn = qfrcmn + self.qfrcmx = qfrcmx + # dataset 2g + self.hlift = hlift + self.liftq0 = liftq0 + self.liftqmax = liftqmax + self.hwtol = hwtol + # dataset 2h + self.liftn = liftn + self.qn = qn + + # dataset 4 + + # accept stress period data (pumping rates) from structured array + # does this need to be Mflist? + self.stress_period_data = self.get_empty_stress_period_data(nper) + if stress_period_data is not None: + for n in stress_period_data.dtype.names: + self.stress_period_data[n] = stress_period_data[n] + + # accept node data from structured array + self.node_data = ModflowMnw2.get_empty_node_data(np.abs(nnodes), + aux_names=self.aux) + if node_data is not None: + for n in node_data.dtype.names: + self.node_data[n] = node_data[n] + # convert strings to lower case + if isinstance(n, str): + for idx, v in enumerate(self.node_data[n]): + self.node_data[n][idx] = self.node_data[n][idx] + + # build recarray of node data from MNW2 input file + if node_data is None: + self.make_node_data() + else: + self._set_attributes_from_node_data() + + for n in ['k', 'i', 'j']: + if len(self.__dict__[n]) > 0: + # need to set for each period + self.stress_period_data[n] = [self.__dict__[n][0]] + + def make_node_data(self): + """ + Make the node data array from variables entered individually. + + Returns + ------- + None + + """ + nnodes = self.nnodes + node_data = ModflowMnw2.get_empty_node_data(np.abs(nnodes), + aux_names=self.aux) + + names = Mnw.get_item2_names(self) + for n in names: + node_data[n] = self.__dict__[n] + self.node_data = node_data + + @staticmethod + def get_empty_stress_period_data(nper=0, aux_names=None, structured=True, + default_value=0): + """ + Get an empty stress_period_data recarray that corresponds to dtype + + Parameters + ---------- + nper : int + + aux_names + structured + default_value + + Returns + ------- + ra : np.recarray + Recarray + + """ + # + dtype = Mnw.get_default_spd_dtype(structured=structured) + if aux_names is not None: + dtype = Package.add_to_dtype(dtype, aux_names, np.float32) + return create_empty_recarray(nper, dtype, default_value=default_value) + + @staticmethod + def get_default_spd_dtype(structured=True): + """ + Get the default stress period data dtype + + Parameters + ---------- + structured : bool + Boolean that defines if a structured (True) or unstructured (False) + dtype will be created (default is True). Not implemented for + unstructured. + + Returns + ------- + dtype : np.dtype + + """ + if structured: + return np.dtype([('k', np.int), + ('i', np.int), + ('j', np.int), + ('per', np.int), + ('qdes', np.float32), + ('capmult', np.int), + ('cprime', np.float32), + ('hlim', np.float32), + ('qcut', np.int), + ('qfrcmn', np.float32), + ('qfrcmx', np.float32)]) + else: + msg = 'Mnw2: get_default_spd_dtype not implemented for ' + \ + 'unstructured grids' + raise NotImplementedError(msg) + + @staticmethod + def get_item2_names(mnw2obj=None, node_data=None): + """ + Get names for unknown things... + + Parameters + ---------- + mnw2obj : Mnw object + Mnw object (default is None) + node_data : unknown + Unknown what is in this parameter (default is None) + + Returns + ------- + names : list + List of dtype names. + + """ + + if node_data is not None: + nnodes = Mnw.get_nnodes(node_data) + losstype = node_data.losstype[0].lower() + ppflag = node_data.ppflag[0] + pumploc = node_data.pumploc[0] + qlimit = node_data.qlimit[0] + pumpcap = node_data.pumpcap[0] + qcut = node_data.qcut[0] + # get names based on mnw2obj attribute values + else: + nnodes = mnw2obj.nnodes + losstype = mnw2obj.losstype.lower() + ppflag = mnw2obj.ppflag + pumploc = mnw2obj.pumploc + qlimit = mnw2obj.qlimit + pumpcap = mnw2obj.pumpcap + qcut = mnw2obj.qcut + + names = ['i', 'j'] + if nnodes > 0: + names += ['k'] + if nnodes < 0: + names += ['ztop', 'zbotm'] + names += ['wellid', 'losstype', 'pumploc', 'qlimit', 'ppflag', + 'pumpcap'] + if losstype.lower() == 'thiem': + names += ['rw'] + elif losstype.lower() == 'skin': + names += ['rw', 'rskin', 'kskin'] + elif losstype.lower() == 'general': + names += ['rw', 'B', 'C', 'P'] + elif losstype.lower() == 'specifycwc': + names += ['cwc'] + if ppflag > 0 and nnodes > 0: + names += ['pp'] + if pumploc != 0: + if pumploc > 0: + names += ['pumplay', 'pumprow', 'pumpcol'] + if pumploc < 0: + names += ['zpump'] + if qlimit > 0: + names += ['hlim', 'qcut'] + if qcut != 0: + names += ['qfrcmn', 'qfrcmx'] + if pumpcap > 0: + names += ['hlift', 'liftq0', 'liftqmax', 'hwtol'] + names += ['liftn', 'qn'] + return names + + @staticmethod + def get_nnodes(node_data): + """ + Get the number of MNW2 nodes. + + Parameters + ---------- + node_data : list + List of nodes??? + + Returns + ------- + nnodes : int + Number of MNW2 nodes + + """ + nnodes = len(node_data) + # check if ztop and zbotm were entered, + # flip nnodes for format 2 + if np.sum(node_data.ztop - node_data.zbotm) > 0: + nnodes *= -1 + return nnodes + + @staticmethod + def sort_node_data(node_data): + # sort by layer (layer input option) + if np.any(np.diff(node_data['k']) < 0): + node_data.sort(order=['k']) + + # reverse sort by ztop if it's specified and not sorted correctly + if np.any(np.diff(node_data['ztop']) > 0): + node_data = np.sort(node_data, order=['ztop'])[::-1] + return node_data + + def check(self, f=None, verbose=True, level=1, checktype=None): + """ + Check mnw object for common errors. + + Parameters + ---------- + f : str or file handle + String defining file name or file handle for summary file + of check method output. If a string is passed a file handle + is created. If f is None, check method does not write + results to a summary file. (default is None) + verbose : bool + Boolean flag used to determine if check method results are + written to the screen + level : int + Check method analysis level. If level=0, summary checks are + performed. If level=1, full checks are performed. + + Returns + ------- + chk : flopy.utils.check object + + """ + chk = self._get_check(f, verbose, level, checktype) + if self.losstype.lower() not in ['none', 'thiem', 'skin', 'general', + 'sepecifycwc']: + chk._add_to_summary(type='Error', k=self.k, i=self.i, j=self.j, + value=self.losstype, desc='Invalid losstype.') + + chk.summarize() + return chk + + def _get_check(self, f, verbose, level, checktype): + if checktype is not None: + return checktype(self, f=f, verbose=verbose, level=level) + else: + return check(self, f=f, verbose=verbose, level=level) + + def _set_attributes_from_node_data(self): + """ + Populates the Mnw object attributes with values from node_data table. + """ + names = Mnw.get_item2_names(node_data=self.node_data) + for n in names: + # assign by node variables as lists if they are being included + if n in self.by_node_variables: # and len(np.unique(self.node_data[n])) > 1: + self.__dict__[n] = list(self.node_data[n]) + else: + self.__dict__[n] = self.node_data[n][0] + + def _write_2(self, f_mnw, float_format=' {:15.7E}', indent=12): + """ + Write out dataset 2 for MNW. + + Parameters + ---------- + f_mnw : package file handle + file handle for MNW2 input file + float_format : str + python format statement for floats (default is ' {:15.7E}'). + indent : int + number of spaces to indent line (default is 12). + + Returns + ------- + None + + """ + # enforce sorting of node data + self.node_data = Mnw.sort_node_data(self.node_data) + + # update object attributes with values from node_data + self._set_attributes_from_node_data() + + indent = ' ' * indent + # dataset 2a + fmt = '{} {:.0f}\n' + f_mnw.write(fmt.format(self.wellid, self.nnodes)) + # dataset 2b + fmt = indent + '{} {:.0f} {:.0f} {:.0f} {:.0f}\n' + f_mnw.write(fmt.format(self.losstype, + self.pumploc, + self.qlimit, + self.ppflag, + self.pumpcap)) + + # dataset 2c + def _assign_by_node_var(var): + """Assign negative number if variable is entered by node.""" + if len(np.unique(var)) > 1: + return -1 + return var[0] + + if self.losstype.lower() != 'none': + if self.losstype.lower() != 'specifycwc': + fmt = indent + float_format + ' ' + f_mnw.write(fmt.format(_assign_by_node_var(self.rw))) + if self.losstype.lower() == 'skin': + fmt = '{0} {0}'.format(float_format) + f_mnw.write(fmt.format(_assign_by_node_var(self.rskin), + _assign_by_node_var(self.kskin))) + elif self.losstype.lower() == 'general': + fmt = '{0} {0} {0}'.format(float_format) + f_mnw.write(fmt.format(_assign_by_node_var(self.B), + _assign_by_node_var(self.C), + _assign_by_node_var(self.P))) + else: + fmt = indent + float_format + f_mnw.write(fmt.format(_assign_by_node_var(self.cwc))) + f_mnw.write('\n') + # dataset 2d + if self.nnodes > 0: + def _getloc(n): + """Output for dataset 2d1.""" + return indent + '{:.0f} {:.0f} {:.0f}'.format(self.k[n] + 1, + self.i[n] + 1, + self.j[n] + 1) + elif self.nnodes < 0: + def _getloc(n): + """Output for dataset 2d2.""" + fmt = indent + '{0} {0} '.format( + float_format) + '{:.0f} {:.0f}' + return fmt.format(self.node_data.ztop[n], + self.node_data.zbotm[n], + self.node_data.i[n] + 1, + self.node_data.j[n] + 1) + for n in range(np.abs(self.nnodes)): + f_mnw.write(_getloc(n)) + for var in ['rw', 'rskin', 'kskin', 'B', 'C', 'P', 'cwc', 'pp']: + val = self.__dict__[var] + if val is None: + continue + # only write variables by node if they are unique lists > length 1 + if len(np.unique(val)) > 1: + # if isinstance(val, list) or val < 0: + fmt = ' ' + float_format + f_mnw.write(fmt.format(self.node_data[var][n])) + f_mnw.write('\n') + # dataset 2e + if self.pumploc != 0: + if self.pumploc > 0: + f_mnw.write( + indent + '{:.0f} {:.0f} {:.0f}\n'.format(self.pumplay, + self.pumprow, + self.pumpcol)) + elif self.pumploc < 0: + fmt = indent + '{}\n'.format(float_format) + f_mnw.write(fmt.format(self.zpump)) + # dataset 2f + if self.qlimit > 0: + fmt = indent + '{} '.format(float_format) + '{:.0f}' + f_mnw.write(fmt.format(self.hlim, self.qcut)) + if self.qcut != 0: + fmt = ' {0} {0}'.format(float_format) + f_mnw.write(fmt.format(self.qfrcmn, self.qfrcmx)) + f_mnw.write('\n') + # dataset 2g + if self.pumpcap > 0: + fmt = indent + '{0} {0} {0} {0}\n'.format(float_format) + f_mnw.write( + fmt.format(self.hlift, self.liftq0, self.liftqmax, self.hwtol)) + # dataset 2h + if self.pumpcap > 0: + fmt = indent + '{0} {0}\n'.format(float_format) + f_mnw.write(fmt.format(self.liftn, self.qn)) + + +class ModflowMnw2(Package): + """ + Multi-Node Well 2 Package Class + + Parameters + ---------- + model : model object + The model object (of type :class:'flopy.modflow.mf.Modflow') to which + this package will be added. + mnwmax : int + The absolute value of MNWMAX is the maximum number of multi-node wells + (MNW) to be simulated. If MNWMAX is a negative number, NODTOT is read. + nodtot : int + Maximum number of nodes. + The code automatically estimates the maximum number of nodes (NODTOT) + as required for allocation of arrays. However, if a large number of + horizontal wells are being simulated, or possibly for other reasons, + this default estimate proves to be inadequate, a new input option has + been added to allow the user to directly specify a value for NODTOT. + If this is a desired option, then it can be implemented by specifying + a negative value for "MNWMAX"--the first value listed in Record 1 + (Line 1) of the MNW2 input data file. If this is done, then the code + will assume that the very next value on that line will be the desired + value of "NODTOT". The model will then reset "MNWMAX" to its absolute + value. The value of "ipakcb" will become the third value on that + line, etc. + ipakcb : int + is a flag and a unit number: + if ipakcb > 0, then it is the unit number to which MNW cell-by-cell + flow terms will be recorded whenever cell-by-cell budget data are + written to a file (as determined by the outputcontrol options of + MODFLOW). + if ipakcb = 0, then MNW cell-by-cell flow terms will not be printed + or recorded. + if ipakcb < 0, then well injection or withdrawal rates and water + levels in the well and its multiple cells will be printed in + the main MODFLOW listing (output) file whenever cell-by-cell + budget data are written to a file (as determined by the output + control options of MODFLOW). + mnwprnt : integer + Flag controlling the level of detail of information about multi-node + wells to be written to the main MODFLOW listing (output) file. + If MNWPRNT = 0, then only basic well information will be printed in + the main MODFLOW output file; increasing the value of MNWPRNT yields + more information, up to a maximum level of detail corresponding + with MNWPRNT = 2. (default is 0) + aux : list of strings + (listed as "OPTION" in MNW2 input instructions) + is an optional list of character values in the style of "AUXILIARY abc" + or "AUX abc" where "abc" is the name of an auxiliary parameter to be + read for each multi-node well as part of dataset 4a. Up to 20 + parameters can be specified, each of which must be preceded by + "AUXILIARY" or "AUX." These parameters will not be used by the MNW2 + Package, but they will be available for use by other packages. + (default is None) + node_data : numpy record array + master table describing multi-node wells in package. Same format as + node_data tables for each Mnw object. See Mnw class documentation for + more information. + mnw : list or dict of Mnw objects + Can be supplied instead of node_data and stress_period_data tables + (in which case the tables are constructed from the Mnw objects). + Otherwise the a dict of Mnw objects (keyed by wellid) is constructed + from the tables. + stress_period_data : dict of numpy record arrays + master dictionary of record arrays (keyed by stress period) containing + transient input for multi-node wells. Format is the same as stress + period data for individual Mnw objects, except the 'per' column is + replaced by 'wellid' (containing wellid for each MNW). See Mnw class + documentation for more information. + itmp : list of ints + is an integer value for reusing or reading multi-node well data; it + can change each stress period. ITMP must be >= 0 for the first stress + period of a simulation. + if ITMP > 0, then ITMP is the total number of active multi-node wells + simulated during the stress period, and only wells listed in + dataset 4a will be active during the stress period. Characteristics + of each well are defined in datasets 2 and 4. + if ITMP = 0, then no multi-node wells are active for the stress period + and the following dataset is skipped. + if ITMP < 0, then the same number of wells and well information will + be reused from the previous stress period and dataset 4 is skipped. + extension : string + Filename extension (default is 'mnw2') + unitnumber : int + File unit number (default is None). + filenames : str or list of str + Filenames to use for the package and the output files. If + filenames=None the package name will be created using the model name + and package extension and the cbc output name will be created using + the model name and .cbc extension (for example, modflowtest.cbc), + if ipakcbc is a number greater than zero. If a single string is passed + the package will be set to the string and cbc output names will be + created using the model name and .cbc extension, if ipakcbc is a + number greater than zero. To define the names for all package files + (input and output) the length of the list of strings should be 2. + Default is None. + gwt : boolean + Flag indicating whether GW transport process is active + + Attributes + ---------- + + Methods + ------- + + See Also + -------- + + Notes + ----- + + Examples + -------- + + >>> import flopy + >>> ml = flopy.modflow.Modflow() + >>> mnw2 = flopy.modflow.ModflowMnw2(ml, ...) + + """ + + def __init__(self, model, mnwmax=0, nodtot=None, ipakcb=0, mnwprnt=0, + aux=[], + node_data=None, mnw=None, stress_period_data=None, itmp=[], + extension='mnw2', unitnumber=None, filenames=None, + gwt=False): + """ + Package constructor + """ + # set default unit number of one is not specified + if unitnumber is None: + unitnumber = ModflowMnw2.defaultunit() + + # set filenames + if filenames is None: + filenames = [None, None] + elif isinstance(filenames, str): + filenames = [filenames, None] + elif isinstance(filenames, list): + if len(filenames) < 2: + filenames.append(None) + + # update external file information with cbc output, if necessary + if ipakcb is not None: + fname = filenames[1] + model.add_output_file(ipakcb, fname=fname, + package=ModflowMnw2.ftype()) + else: + ipakcb = 0 + + # Fill namefile items + name = [ModflowMnw2.ftype()] + units = [unitnumber] + extra = [''] + + # set package name + fname = [filenames[0]] + + # Call ancestor's init to set self.parent, extension, name and unit number + Package.__init__(self, model, extension=extension, name=name, + unit_number=units, extra=extra, filenames=fname) + + self.url = 'mnw2.htm' + self.nper = self.parent.nrow_ncol_nlay_nper[-1] + self.nper = 1 if self.nper == 0 else self.nper # otherwise iterations from 0, nper won't run + self.structured = self.parent.structured + + # Dataset 0 + self.heading = '# {} package for '.format(self.name[0]) + \ + ' {}, '.format(model.version_types[model.version]) + \ + 'generated by Flopy.' + # Dataset 1 + # maximum number of multi-node wells to be simulated + self.mnwmax = int(mnwmax) + self.nodtot = nodtot # user-specified maximum number of nodes + self.ipakcb = ipakcb + self.mnwprnt = int(mnwprnt) # -verbosity flag + self.aux = aux # -list of optional auxiliary parameters + + # Datasets 2-4 are contained in node_data and stress_period_data tables + # and/or in Mnw objects + self.node_data = self.get_empty_node_data(0, aux_names=aux) + + if node_data is not None: + self.node_data = self.get_empty_node_data(len(node_data), + aux_names=aux) + names = [n for n in node_data.dtype.names if + n in self.node_data.dtype.names] + for n in names: + self.node_data[n] = node_data[ + n] # recarray of Mnw properties by node + self.nodtot = len(self.node_data) + self._sort_node_data() + # self.node_data.sort(order=['wellid', 'k']) + + # Python 3.5.0 produces a segmentation fault when trying to sort BR MNW wells + # self.node_data.sort(order='wellid', axis=0) + self.mnw = mnw # dict or list of Mnw objects + + self.stress_period_data = MfList(self, + {0: self.get_empty_stress_period_data( + 0, + aux_names=aux, + structured=self.structured)}, + dtype=self.get_default_spd_dtype( + structured=self.structured)) + if stress_period_data is not None: + for per, data in stress_period_data.items(): + spd = ModflowMnw2.get_empty_stress_period_data(len(data), + aux_names=aux) + names = [n for n in data.dtype.names if n in spd.dtype.names] + for n in names: + spd[n] = data[n] + spd.sort(order='wellid') + self.stress_period_data[per] = spd + + self.itmp = itmp + self.gwt = gwt + + if mnw is None: + self.make_mnw_objects() + elif node_data is None and mnw is not None: + if isinstance(mnw, list): + self.mnw = {mnwobj.wellid: mnwobj for mnwobj in mnw} + elif isinstance(mnw, Mnw): + self.mnw = {mnw.wellid: mnw} + self.make_node_data(self.mnw) + self.make_stress_period_data(self.mnw) + + if stress_period_data is not None: + if 'k' not in stress_period_data[ + list(stress_period_data.keys())[0]].dtype.names: + self._add_kij_to_stress_period_data() + + self.parent.add_package(self) + + def _add_kij_to_stress_period_data(self): + for per in self.stress_period_data.data.keys(): + for d in ['k', 'i', 'j']: + self.stress_period_data[per][d] = [ + self.mnw[wellid].__dict__[d][0] + for wellid in self.stress_period_data[per].wellid] + + def _sort_node_data(self): + + node_data = self.node_data + node_data_list = [] + wells = sorted(np.unique(node_data['wellid']).tolist()) + for wellid in wells: + nd = node_data[node_data['wellid'] == wellid] + nd = Mnw.sort_node_data(nd) + node_data_list.append(nd) + node_data = np.concatenate(node_data_list, axis=0) + self.node_data = node_data.view(np.recarray) + + @staticmethod + def get_empty_node_data(maxnodes=0, aux_names=None, structured=True, + default_value=0): + """ + get an empty recarray that corresponds to dtype + + Parameters + ---------- + maxnodes : int + Total number of nodes to be simulated (default is 0) + aux_names : list + List of aux name strings (default is None) + structured : bool + Boolean indicating if a structured (True) or unstructured (False) + model (default is True). + default_value : float + Default value for float variables (default is 0). + + Returns + ------- + r : np.recarray + Recarray of default dtype of shape maxnode + """ + dtype = ModflowMnw2.get_default_node_dtype(structured=structured) + if aux_names is not None: + dtype = Package.add_to_dtype(dtype, aux_names, np.float32) + return create_empty_recarray(maxnodes, dtype, + default_value=default_value) + + @staticmethod + def get_default_node_dtype(structured=True): + """ + Get default dtype for node data + + Parameters + ---------- + structured : bool + Boolean indicating if a structured (True) or unstructured (False) + model (default is True). + + Returns + ------- + dtype : np.dtype + node data dtype + + """ + if structured: + return np.dtype([('k', np.int), + ('i', np.int), + ('j', np.int), + ('ztop', np.float32), + ('zbotm', np.float32), + ('wellid', np.object), + ('losstype', np.object), + ('pumploc', np.int), + ('qlimit', np.int), + ('ppflag', np.int), + ('pumpcap', np.int), + ('rw', np.float32), + ('rskin', np.float32), + ('kskin', np.float32), + ('B', np.float32), + ('C', np.float32), + ('P', np.float32), + ('cwc', np.float32), + ('pp', np.float32), + ('pumplay', np.int), + ('pumprow', np.int), + ('pumpcol', np.int), + ('zpump', np.float32), + ('hlim', np.float32), + ('qcut', np.int), + ('qfrcmn', np.float32), + ('qfrcmx', np.float32), + ('hlift', np.float32), + ('liftq0', np.float32), + ('liftqmax', np.float32), + ('hwtol', np.float32), + ('liftn', np.float32), + ('qn', np.float32)]) + else: + msg = 'get_default_node_dtype: unstructured model not supported' + raise NotImplementedError(msg) + + @staticmethod + def get_empty_stress_period_data(itmp=0, aux_names=None, structured=True, + default_value=0): + """ + Get an empty stress period data recarray + + Parameters + ---------- + itmp : int + Number of entries in this stress period (default is 0). + aux_names : list + List of aux names (default is None). + structured : bool + Boolean indicating if a structured (True) or unstructured (False) + model (default is True). + default_value : float + Default value for float variables (default is 0). + + Returns + ------- + r : np.recarray + Recarray of default dtype of shape itmp + + """ + dtype = ModflowMnw2.get_default_spd_dtype(structured=structured) + if aux_names is not None: + dtype = Package.add_to_dtype(dtype, aux_names, np.float32) + return create_empty_recarray(itmp, dtype, default_value=default_value) + + @staticmethod + def get_default_spd_dtype(structured=True): + """ + Get default dtype for stress period data + + Parameters + ---------- + structured : bool + Boolean indicating if a structured (True) or unstructured (False) + model (default is True). + + Returns + ------- + dtype : np.dtype + node data dtype + + """ + if structured: + return np.dtype([('k', np.int), + ('i', np.int), + ('j', np.int), + ('wellid', np.object), + ('qdes', np.float32), + ('capmult', np.int), + ('cprime', np.float32), + ('hlim', np.float32), + ('qcut', np.int), + ('qfrcmn', np.float32), + ('qfrcmx', np.float32)]) + else: + msg = 'get_default_spd_dtype: unstructured model not supported' + raise NotImplementedError(msg) + + @staticmethod + def load(f, model, nper=None, gwt=False, nsol=1, ext_unit_dict=None): + """ + + Parameters + ---------- + f : filename or file handle + File to load. + model : model object + The model object (of type :class:`flopy.modflow.ModflowMnw2`) to + which this package will be added. + nper : int + Number of periods + gwt : bool + nsol : int + ext_unit_dict : dictionary, optional + If the arrays in the file are specified using EXTERNAL, + or older style array control records, then `f` should be a file + handle. In this case ext_unit_dict is required, which can be + constructed using the function + :class:`flopy.utils.mfreadnam.parsenamefile`. + + + Returns + ------- + + """ + + if model.verbose: + sys.stdout.write('loading mnw2 package file...\n') + + structured = model.structured + if nper is None: + nrow, ncol, nlay, nper = model.get_nrow_ncol_nlay_nper() + nper = 1 if nper == 0 else nper # otherwise iterations from 0, nper won't run + + openfile = not hasattr(f, 'read') + if openfile: + filename = f + f = open(filename, 'r') + + # dataset 0 (header) + while True: + line = get_next_line(f) + if line[0] != '#': + break + # dataset 1 + mnwmax, nodtot, ipakcb, mnwprint, option = _parse_1(line) + # dataset 2 + node_data = ModflowMnw2.get_empty_node_data(0) + mnw = {} + for i in range(mnwmax): + # create a Mnw object by parsing dataset 2 + mnwobj = _parse_2(f) + # populate stress period data table for each well object + # this is filled below under dataset 4 + mnwobj.stress_period_data = Mnw.get_empty_stress_period_data(nper, + aux_names=option) + mnw[mnwobj.wellid] = mnwobj + # master table with all node data + node_data = np.append(node_data, mnwobj.node_data).view( + np.recarray) + + stress_period_data = {} # stress period data table for package (flopy convention) + itmp = [] + for per in range(0, nper): + # dataset 3 + itmp_per = int(line_parse(get_next_line(f))[0]) + # dataset4 + # dict might be better here to only load submitted values + if itmp_per > 0: + current_4 = ModflowMnw2.get_empty_stress_period_data(itmp_per, + aux_names=option) + for i in range(itmp_per): + wellid, qdes, capmult, cprime, xyz = _parse_4a( + get_next_line(f), + mnw, + gwt=gwt) + hlim, qcut, qfrcmn, qfrcmx = 0, 0, 0, 0 + if mnw[wellid].qlimit < 0: + hlim, qcut, qfrcmn, qfrcmx = _parse_4b( + get_next_line(f)) + # update package stress period data table + ndw = node_data[node_data.wellid == wellid] + kij = [ndw.k[0], ndw.i[0], ndw.j[0]] + current_4[i] = tuple(kij + [wellid, qdes, capmult, cprime, + hlim, qcut, qfrcmn, + qfrcmx] + xyz) + # update well stress period data table + mnw[wellid].stress_period_data[per] = tuple( + kij + [per] + [qdes, capmult, cprime, + hlim, qcut, qfrcmn, qfrcmx] + xyz) + stress_period_data[per] = current_4 + elif itmp_per == 0: # no active mnws this stress period + pass + else: + # copy pumping rates from previous stress period + mnw[wellid].stress_period_data[per] = \ + mnw[wellid].stress_period_data[per - 1] + itmp.append(itmp_per) + + if openfile: + f.close() + + # determine specified unit number + unitnumber = None + filenames = [None, None] + if ext_unit_dict is not None: + for key, value in ext_unit_dict.items(): + if value.filetype == ModflowMnw2.ftype(): + unitnumber = key + filenames[0] = os.path.basename(value.filename) + + if ipakcb > 0: + if key == ipakcb: + filenames[1] = os.path.basename(value.filename) + model.add_pop_key_list(key) + + return ModflowMnw2(model, mnwmax=mnwmax, nodtot=nodtot, ipakcb=ipakcb, + mnwprnt=mnwprint, aux=option, + node_data=node_data, mnw=mnw, + stress_period_data=stress_period_data, itmp=itmp, + unitnumber=unitnumber, filenames=filenames) + + def check(self, f=None, verbose=True, level=1, checktype=None): + """ + Check mnw2 package data for common errors. + + Parameters + ---------- + f : str or file handle + String defining file name or file handle for summary file + of check method output. If a string is passed a file handle + is created. If f is None, check method does not write + results to a summary file. (default is None) + verbose : bool + Boolean flag used to determine if check method results are + written to the screen + level : int + Check method analysis level. If level=0, summary checks are + performed. If level=1, full checks are performed. + + Returns + ------- + chk : check object + + Examples + -------- + + >>> import flopy + >>> m = flopy.modflow.Modflow.load('model.nam') + >>> m.mnw2.check() + """ + chk = self._get_check(f, verbose, level, checktype) + + # itmp + if self.itmp[0] < 0: + chk._add_to_summary(type='Error', value=self.itmp[0], + desc='Itmp must be >= 0 for first stress period.') + invalid_itmp = np.array(self.itmp) > self.mnwmax + if np.any(invalid_itmp): + for v in np.array(self.itmp)[invalid_itmp]: + chk._add_to_summary(type='Error', value=v, + desc='Itmp value greater than MNWMAX') + + chk.summarize() + return chk + + def get_allnode_data(self): + """ + Get a version of the node_data array that has all MNW2 nodes listed + explicitly. For example, MNWs with open intervals encompassing + multiple layers would have a row entry for each layer. Ztop and zbotm + values indicate the top and bottom elevations of the node (these are + the same as the layer top and bottom if the node fully penetrates + that layer). + + Returns + ------- + allnode_data : np.recarray + Numpy record array of same form as node_data, except each row + represents only one node. + + """ + from numpy.lib.recfunctions import stack_arrays + + nd = [] + for i in range(len(self.node_data)): + r = self.node_data[i] + if r['ztop'] - r['zbotm'] > 0: + startK = get_layer(self.parent.dis, r['i'], r['j'], r['ztop']) + endK = get_layer(self.parent.dis, r['i'], r['j'], r['zbotm']) + if startK == endK: + r = r.copy() + r['k'] = startK + nd.append(r) + else: + for k in np.arange(startK, endK + 1): + rk = r.copy() + rk['k'] = k + if k > startK: + loc = (k - 1, rk['i'], rk['j']) + rk['ztop'] = self.parent.dis.botm[loc] + if k < endK: + loc = (k, rk['i'], rk['j']) + rk['zbotm'] = self.parent.dis.botm[loc] + nd.append(rk) + else: + nd.append(r) + return stack_arrays(nd, usemask=False).view(np.recarray) + + def make_mnw_objects(self): + """ + Make a Mnw object + + Returns + ------- + None + + """ + node_data = self.node_data + stress_period_data = self.stress_period_data + self.mnw = {} + mnws = np.unique(node_data['wellid']) + for wellid in mnws: + nd = node_data[node_data.wellid == wellid] + nnodes = Mnw.get_nnodes(nd) + # if tops and bottoms are specified, flip nnodes + # maxtop = np.max(nd.ztop) + # minbot = np.min(nd.zbotm) + # if maxtop - minbot > 0 and nnodes > 0: + # nnodes *= -1 + # reshape stress period data to well + mnwspd = Mnw.get_empty_stress_period_data(self.nper, + aux_names=self.aux) + for per, itmp in enumerate(self.itmp): + inds = stress_period_data[per].wellid == wellid + if itmp > 0 and np.any(inds): + names = [n for n in + stress_period_data[per][inds].dtype.names if + n in mnwspd.dtype.names] + mnwspd[per]['per'] = per + for n in names: + mnwspd[per][n] = stress_period_data[per][inds][n][0] + elif itmp == 0: + continue + elif itmp < 0: + mnwspd[per] = mnwspd[per - 1] + + self.mnw[wellid] = Mnw(wellid, + nnodes=nnodes, nper=self.nper, + node_data=nd, stress_period_data=mnwspd, + mnwpackage=self) + + def make_node_data(self, mnwobjs): + """ + Make node_data recarray from Mnw objects + + Parameters + ---------- + mnwobjs : Mnw object + + Returns + ------- + None + + """ + if isinstance(mnwobjs, dict): + mnwobjs = list(mnwobjs.values()) + elif isinstance(mnwobjs, Mnw): + mnwobjs = [mnwobjs] + + mnwobj_node_data = [] + for mnwobj in mnwobjs: + for rec in mnwobj.node_data: + mnwobj_node_data.append(rec) + node_data = ModflowMnw2.get_empty_node_data(len(mnwobj_node_data)) + + for ix, node in enumerate(mnwobj_node_data): + for jx, name in enumerate(node_data.dtype.names): + node_data[name][ix] = node[jx] + + self.node_data = node_data + + def make_stress_period_data(self, mnwobjs): + """ + Make stress_period_data recarray from Mnw objects + + Parameters + ---------- + mnwobjs : Mnw object + + Returns + ------- + None + + """ + if isinstance(mnwobjs, dict): + mnwobjs = list(mnwobjs.values()) + elif isinstance(mnwobjs, Mnw): + mnwobjs = [mnwobjs] + stress_period_data = {} + for per, itmp in enumerate(self.itmp): + if itmp > 0: + stress_period_data[ + per] = ModflowMnw2.get_empty_stress_period_data(itmp, + aux_names=self.aux) + i = 0 + for mnw in mnwobjs: + if per in mnw.stress_period_data.per: + i += 1 + if i > itmp: + raise ItmpError(itmp, i) + names = [n for n in mnw.stress_period_data.dtype.names + if n in stress_period_data[per].dtype.names] + stress_period_data[per]['wellid'][i - 1] = mnw.wellid + for n in names: + stress_period_data[per][n][i - 1] = \ + mnw.stress_period_data[n][per] + stress_period_data[per].sort(order='wellid') + if i < itmp: + raise ItmpError(itmp, i) + elif itmp == 0: + continue + else: # itmp < 0 + stress_period_data[per] = stress_period_data[per - 1] + self.stress_period_data = MfList(self, + stress_period_data, + dtype=stress_period_data[0].dtype) + + def export(self, f, **kwargs): + """ + Export MNW2 data + + Parameters + ---------- + f : file + kwargs + + Returns + ------- + e : export object + + + """ + # A better strategy would be to build a single 4-D MfList + # (currently the stress period data array has everything in layer 0) + self.node_data_MfList = MfList(self, self.get_allnode_data(), + dtype=self.node_data.dtype) + # make some modifications to ensure proper export + # avoid duplicate entries for qfrc + wellids = np.unique(self.node_data.wellid) + todrop = ['hlim', 'qcut', 'qfrcmn', 'qfrcmx'] + # move duplicate fields from node_data to stress_period_data + for wellid in wellids: + wellnd = self.node_data.wellid == wellid + if np.max(self.node_data.qlimit[wellnd]) > 0: + for per in self.stress_period_data.data.keys(): + for col in todrop: + inds = self.stress_period_data[per].wellid == wellid + self.stress_period_data[per][col][inds] = \ + self.node_data[wellnd][col] + self.node_data_MfList = self.node_data_MfList.drop(todrop) + ''' + todrop = {'qfrcmx', 'qfrcmn'} + names = list(set(self.stress_period_data.dtype.names).difference(todrop)) + dtype = np.dtype([(k, d) for k, d in self.stress_period_data.dtype.descr if k not in todrop]) + spd = {} + for k, v in self.stress_period_data.data.items(): + newarr = np.array(np.zeros_like(self.stress_period_data[k][names]), + dtype=dtype).view(np.recarray) + for n in dtype.names: + newarr[n] = self.stress_period_data[k][n] + spd[k] = newarr + self.stress_period_data = MfList(self, spd, dtype=dtype) + ''' + + return super(ModflowMnw2, self).export(f, **kwargs) + + def _write_1(self, f_mnw): + """ + + Parameters + ---------- + f_mnw : file object + File object for MNW2 input file + + + Returns + ------- + None + + """ + f_mnw.write('{:.0f} '.format(self.mnwmax)) + if self.mnwmax < 0: + f_mnw.write('{:.0f} '.format(self.nodtot)) + f_mnw.write('{:.0f} {:.0f}'.format(self.ipakcb, self.mnwprnt)) + if len(self.aux) > 0: + for abc in self.aux: + f_mnw.write(' aux {}'.format(abc)) + f_mnw.write('\n') + + def write_file(self, filename=None, float_format=' {:15.7E}', + use_tables=True): + """ + Write the package file. + + Parameters + ---------- + filename : str + float_format + use_tables + + Returns + ------- + None + + """ + + if use_tables: + # update mnw objects from node and stress_period_data tables + self.make_mnw_objects() + + if filename is not None: + self.fn_path = filename + + f_mnw = open(self.fn_path, 'w') + + # dataset 0 (header) + f_mnw.write('{0}\n'.format(self.heading)) + + # dataset 1 + self._write_1(f_mnw) + + # dataset 2 + # need a method that assigns attributes from table to objects! + # call make_mnw_objects?? (table is definitive then) + if use_tables: + mnws = np.unique( + self.node_data.wellid).tolist() # preserve any order + else: + mnws = self.mnw.values() + for k in mnws: + self.mnw[k]._write_2(f_mnw, float_format=float_format) + + # dataset 3 + for per in range(self.nper): + f_mnw.write('{:.0f} Stress Period {:.0f}\n'.format(self.itmp[per], + per + 1)) + if self.itmp[per] > 0: + + for n in range(self.itmp[per]): + # dataset 4 + wellid = self.stress_period_data[per].wellid[n] + qdes = self.stress_period_data[per].qdes[n] + fmt = '{} ' + float_format + f_mnw.write(fmt.format(wellid, qdes)) + if self.mnw[wellid].pumpcap > 0: + fmt = ' ' + float_format + f_mnw.write(fmt.format( + *self.stress_period_data[per].capmult[n])) + if qdes > 0 and self.gwt: + f_mnw.write(fmt.format( + *self.stress_period_data[per].cprime[n])) + if len(self.aux) > 0: + for var in self.aux: + fmt = ' ' + float_format + f_mnw.write(fmt.format( + *self.stress_period_data[per][var][n])) + f_mnw.write('\n') + if self.mnw[wellid].qlimit < 0: + hlim, qcut = \ + self.stress_period_data[per][['hlim', 'qcut']][n] + fmt = float_format + ' {:.0f}' + f_mnw.write(fmt.format(hlim, qcut)) + if qcut != 0: + fmt = ' {} {}'.format(float_format) + f_mnw.write(fmt.format(*self.stress_period_data[ + per][['qfrcmn', 'qfrcmx']][n])) + f_mnw.write('\n') + f_mnw.close() + + @staticmethod + def ftype(): + return 'MNW2' + + @staticmethod + def defaultunit(): + return 34 + + +def _parse_1(line): + """ + + Parameters + ---------- + line + + Returns + ------- + + """ + line = line_parse(line) + mnwmax = pop_item(line, int) + nodtot = None + if mnwmax < 0: + nodtot = pop_item(line, int) + ipakcb = pop_item(line, int) + mnwprint = pop_item(line, int) + option = [] # aux names + if len(line) > 0: + option += [line[i] for i in np.arange(1, len(line)) if + 'aux' in line[i - 1].lower()] + return mnwmax, nodtot, ipakcb, mnwprint, option + + +def _parse_2(f): + """ + + Parameters + ---------- + f + + Returns + ------- + + """ + # dataset 2a + line = line_parse(get_next_line(f)) + if len(line) > 2: + warnings.warn('MNW2: {}\n'.format(line) + + 'Extra items in Dataset 2a!' + + 'Check for WELLIDs with space ' + + 'but not enclosed in quotes.') + wellid = pop_item(line).lower() + nnodes = pop_item(line, int) + # dataset 2b + line = line_parse(get_next_line(f)) + losstype = pop_item(line) + pumploc = pop_item(line, int) + qlimit = pop_item(line, int) + ppflag = pop_item(line, int) + pumpcap = pop_item(line, int) + + # dataset 2c + names = ['ztop', 'zbotm', 'k', 'i', 'j', 'rw', 'rskin', 'kskin', 'B', 'C', + 'P', 'cwc', 'pp'] + d2d = {n: [] for n in names} # dataset 2d; dict of lists for each variable + # set default values of 0 for all 2c items + d2dw = dict( + zip(['rw', 'rskin', 'kskin', 'B', 'C', 'P', 'cwc'], [0] * 7)) + if losstype.lower() != 'none': + # update d2dw items + d2dw.update( + _parse_2c(get_next_line(f), losstype)) # dict of values for well + for k, v in d2dw.items(): + if v > 0: + d2d[k].append(v) + # dataset 2d + pp = 1 # partial penetration flag + for i in range(np.abs(nnodes)): + line = line_parse(get_next_line(f)) + if nnodes > 0: + d2d['k'].append(pop_item(line, int) - 1) + d2d['i'].append(pop_item(line, int) - 1) + d2d['j'].append(pop_item(line, int) - 1) + elif nnodes < 0: + d2d['ztop'].append(pop_item(line, float)) + d2d['zbotm'].append(pop_item(line, float)) + d2d['i'].append(pop_item(line, int) - 1) + d2d['j'].append(pop_item(line, int) - 1) + d2di = _parse_2c(line, losstype, rw=d2dw['rw'], rskin=d2dw['rskin'], + kskin=d2dw['kskin'], + B=d2dw['B'], C=d2dw['C'], P=d2dw['P'], + cwc=d2dw['cwc']) + # append only the returned items + for k, v in d2di.items(): + d2d[k].append(v) + if ppflag > 0 and nnodes > 0: + d2d['pp'].append(pop_item(line, float)) + + # dataset 2e + pumplay = None + pumprow = None + pumpcol = None + zpump = None + if pumploc != 0: + line = line_parse(get_next_line(f)) + if pumploc > 0: + pumplay = pop_item(line, int) + pumprow = pop_item(line, int) + pumpcol = pop_item(line, int) + else: + zpump = pop_item(line, float) + # dataset 2f + hlim = None + qcut = None + qfrcmx = None + qfrcmn = None + if qlimit > 0: + # Only specify dataset 2f if the value of Qlimit in dataset 2b is positive. + # Do not enter fractions as percentages. + line = line_parse(get_next_line(f)) + hlim = pop_item(line, float) + qcut = pop_item(line, int) + if qcut != 0: + qfrcmn = pop_item(line, float) + qfrcmx = pop_item(line, float) + # dataset 2g + hlift = None + liftq0 = None + liftqmax = None + hwtol = None + if pumpcap > 0: + # The number of additional data points on the curve (and lines in dataset 2h) + # must correspond to the value of PUMPCAP for this well (where PUMPCAP <= 25). + line = line_parse(get_next_line(f)) + hlift = pop_item(line, float) + liftq0 = pop_item(line, float) + liftqmax = pop_item(line, float) + hwtol = pop_item(line, float) + # dataset 2h + liftn = None + qn = None + if pumpcap > 0: + # Enter data in order of decreasing lift + # (that is, start with the point corresponding + # to the highest value of total dynamic head) and increasing discharge. + # The discharge value for the last data point in the sequence + # must be less than the value of LIFTqmax. + for i in range(len(pumpcap)): + line = line_parse(get_next_line(f)) + liftn = pop_item(line, float) + qn = pop_item(line, float) + + return Mnw(wellid, + nnodes=nnodes, + losstype=losstype, pumploc=pumploc, qlimit=qlimit, + ppflag=ppflag, pumpcap=pumpcap, + k=d2d['k'], i=d2d['i'], j=d2d['j'], ztop=d2d['ztop'], + zbotm=d2d['zbotm'], + rw=d2d['rw'], rskin=d2d['rskin'], kskin=d2d['kskin'], + B=d2d['B'], C=d2d['C'], P=d2d['P'], cwc=d2d['cwc'], + pp=d2d['pp'], + pumplay=pumplay, pumprow=pumprow, pumpcol=pumpcol, zpump=zpump, + hlim=hlim, qcut=qcut, qfrcmn=qfrcmn, qfrcmx=qfrcmx, + hlift=hlift, liftq0=liftq0, liftqmax=liftqmax, hwtol=hwtol, + liftn=liftn, qn=qn) + + +def _parse_2c(line, losstype, rw=-1, rskin=-1, kskin=-1, B=-1, C=-1, P=-1, + cwc=-1): + """ + + Parameters + ---------- + line + losstype + rw + rskin + kskin + B + C + P + cwc + + Returns + ------- + + """ + if not isinstance(line, list): + line = line_parse(line) + nd = {} # dict of dataset 2c/2d items + if losstype.lower() != 'specifycwc': + if rw < 0: + nd['rw'] = pop_item(line, float) + if losstype.lower() == 'skin': + if rskin < 0: + nd['rskin'] = pop_item(line, float) + if kskin < 0: + nd['kskin'] = pop_item(line, float) + elif losstype.lower() == 'general': + if B < 0: + nd['B'] = pop_item(line, float) + if C < 0: + nd['C'] = pop_item(line, float) + if P < 0: + nd['P'] = pop_item(line, float) + else: + if cwc < 0: + nd['cwc'] = pop_item(line, float) + return nd + + +def _parse_4a(line, mnw, gwt=False): + """ + + Parameters + ---------- + line + mnw + gwt + + Returns + ------- + + """ + capmult = 0 + cprime = 0 + line = line_parse(line) + wellid = pop_item(line).lower() + pumpcap = mnw[wellid].pumpcap + qdes = pop_item(line, float) + if pumpcap > 0: + capmult = pop_item(line, int) + if qdes > 0 and gwt: + cprime = pop_item(line, float) + xyz = line + return wellid, qdes, capmult, cprime, xyz + + +def _parse_4b(line): + """ + + Parameters + ---------- + line + + Returns + ------- + + """ + qfrcmn = 0 + qfrcmx = 0 + line = line_parse(line) + hlim = pop_item(line, float) + qcut = pop_item(line, int) + if qcut != 0: + qfrcmn = pop_item(line, float) + qfrcmx = pop_item(line, float) + return hlim, qcut, qfrcmn, qfrcmx + + +class ItmpError(Exception): + def __init__(self, itmp, nactivewells): + self.itmp = itmp + self.nactivewells = nactivewells + + def __str__(self): + s = '\n\nItmp value of {} '.format(self.itmp) + \ + 'is positive but does not equal the number of active wells ' + \ + 'specified ({}). '.format(self.nactivewells) + \ + 'See MNW2 package documentation for details.' + return s diff --git a/flopy/modflow/mfmnwi.py b/flopy/modflow/mfmnwi.py index 84ea1e9dfa..9973119e1c 100644 --- a/flopy/modflow/mfmnwi.py +++ b/flopy/modflow/mfmnwi.py @@ -1,344 +1,344 @@ -import sys - -from ..utils.flopy_io import line_parse, pop_item -from ..pakbase import Package - - -class ModflowMnwi(Package): - """ - 'Multi-Node Well Information Package Class' - - Parameters - ---------- - model : model object - The model object (of type :class:`flopy.modflow.mf.Modflow`) to which - this package will be added. - wel1flag : integer - Flag indicating output to be written for each MNW node at the end of - each stress period - qsumflag :integer - Flag indicating output to be written for each multi-node well - byndflag :integer - Flag indicating output to be written for each MNW node - mnwobs :integer - Number of multi-node wells for which detailed flow, head, and solute - data to be saved - wellid_unit_qndflag_qhbflag_concflag : list of lists - Containing wells and related information to be output - (length : [MNWOBS][4or5]) - extension : string - Filename extension (default is 'mnwi') - unitnumber : int - File unit number (default is None). - filenames : str or list of str - Filenames to use for the package and the output files. If - filenames=None the package name will be created using the model name - and package extension and the output names will be created using - the model name and output extensions. Default is None. - - Attributes - ---------- - - Methods - ------- - - See Also - -------- - - Notes - ----- - - Examples - -------- - - >>> import flopy - >>> ml = flopy.modflow.Modflow() - >>> ghb = flopy.modflow.ModflowMnwi(ml, ...) - - """ - - def __init__(self, model, wel1flag=None, qsumflag=None, byndflag=None, - mnwobs=1, wellid_unit_qndflag_qhbflag_concflag=None, - extension='mnwi', unitnumber=None, filenames=None): - # set default unit number of one is not specified - if unitnumber is None: - unitnumber = ModflowMnwi.defaultunit() - - # determine the number of unique unit numbers in dataset 3 - unique_units = [] - if wellid_unit_qndflag_qhbflag_concflag is not None: - for t in wellid_unit_qndflag_qhbflag_concflag: - iu = int(t[1]) - if iu not in unique_units: - unique_units.append(iu) - - # set filenames - nfn = 4 + len(unique_units) - if filenames is None: - filenames = [None for x in range(nfn)] - elif isinstance(filenames, str): - filenames = [filenames] + [None for x in range(nfn)] - elif isinstance(filenames, list): - if len(filenames) < nfn: - n = nfn - len(filenames) + 1 - filenames = filenames + [None for x in range(n)] - - # update external file information with unit_pc output, if necessary - if wel1flag is not None: - fname = filenames[1] - model.add_output_file(wel1flag, fname=fname, - extension='wel1', - binflag=False, - package=ModflowMnwi.ftype()) - else: - wel1flag = 0 - - # update external file information with unit_ts output, if necessary - if qsumflag is not None: - fname = filenames[2] - model.add_output_file(qsumflag, fname=fname, - extension='qsum', - binflag=False, - package=ModflowMnwi.ftype()) - else: - qsumflag = 0 - - # update external file information with ipunit output, if necessary - if byndflag is not None: - fname = filenames[3] - model.add_output_file(byndflag, fname=fname, - extension='bynd', - binflag=False, - package=ModflowMnwi.ftype()) - else: - byndflag = 0 - - idx = 4 - for iu in unique_units: - fname = filenames[idx] - model.add_output_file(iu, fname=fname, - extension='{:04d}.mnwobs'.format(iu), - binflag=False, - package=ModflowMnwi.ftype()) - idx += 1 - - name = [ModflowMnwi.ftype()] - units = [unitnumber] - extra = [''] - - # set package name - fname = [filenames[0]] - - # Call ancestor's init to set self.parent, extension, name and unit number - Package.__init__(self, model, extension=extension, name=name, - unit_number=units, extra=extra, filenames=fname) - - self.url = 'mnwi.htm' - self.heading = '# {} package for '.format(self.name[0]) + \ - ' {}, '.format(model.version_types[model.version]) + \ - 'generated by Flopy.' - # integer flag indicating output to be written for each MNW node at - # the end of each stress period - self.wel1flag = wel1flag - # integer flag indicating output to be written for each multi-node well - self.qsumflag = qsumflag - # integer flag indicating output to be written for each MNW node - self.byndflag = byndflag - # number of multi-node wells for which detailed flow, head, and solute - # data to be saved - self.mnwobs = mnwobs - # list of lists containing wells and related information to be - # output (length = [MNWOBS][4or5]) - self.wellid_unit_qndflag_qhbflag_concflag = wellid_unit_qndflag_qhbflag_concflag - - # -input format checks: - assert self.wel1flag >= 0, 'WEL1flag must be greater than or equal to zero.' - assert self.qsumflag >= 0, 'QSUMflag must be greater than or equal to zero.' - assert self.byndflag >= 0, 'BYNDflag must be greater than or equal to zero.' - - if len(self.wellid_unit_qndflag_qhbflag_concflag) != self.mnwobs: - print('WARNING: number of listed well ids to be ' + - 'monitored does not match MNWOBS.') - - self.parent.add_package(self) - - @staticmethod - def load(f, model, nper=None, gwt=False, nsol=1, ext_unit_dict=None): - - if model.verbose: - sys.stdout.write('loading mnw2 package file...\n') - - structured = model.structured - if nper is None: - nrow, ncol, nlay, nper = model.get_nrow_ncol_nlay_nper() - # otherwise iterations from 0, nper won't run - nper = 1 if nper == 0 else nper - - openfile = not hasattr(f, 'read') - if openfile: - filename = f - f = open(filename, 'r') - - # dataset 1 - line = line_parse(next(f)) - wel1flag, qsumflag, byndflag = map(int, line) - if wel1flag > 0: - model.add_pop_key_list(wel1flag) - if qsumflag > 0: - model.add_pop_key_list(qsumflag) - if byndflag > 0: - model.add_pop_key_list(byndflag) - - - # dataset 2 - unique_units = [] - mnwobs = pop_item(line_parse(next(f)), int) - wellid_unit_qndflag_qhbflag_concflag = [] - if mnwobs > 0: - for i in range(mnwobs): - # dataset 3 - line = line_parse(next(f)) - wellid = pop_item(line, str) - unit = pop_item(line, int) - qndflag = pop_item(line, int) - qbhflag = pop_item(line, int) - tmp = [wellid, unit, qndflag, qbhflag] - if gwt and len(line) > 0: - tmp.append(pop_item(line, int)) - wellid_unit_qndflag_qhbflag_concflag.append(tmp) - if unit not in unique_units: - unique_units.append(unit) - - if openfile: - f.close() - - for unit in unique_units: - model.add_pop_key_list(unit) - - # determine specified unit number - nfn = 4 + len(unique_units) - unitnumber = None - filenames = [None for x in range(nfn)] - if ext_unit_dict is not None: - unitnumber, filenames[0] = \ - model.get_ext_dict_attr(ext_unit_dict, - filetype=ModflowMnwi.ftype()) - if wel1flag > 0: - iu, filenames[1] = \ - model.get_ext_dict_attr(ext_unit_dict, unit=wel1flag) - if qsumflag > 0: - iu, filenames[2] = \ - model.get_ext_dict_attr(ext_unit_dict, unit=qsumflag) - if byndflag > 0: - iu, filenames[3] = \ - model.get_ext_dict_attr(ext_unit_dict, unit=byndflag) - idx = 4 - for unit in unique_units: - iu, filenames[idx] = \ - model.get_ext_dict_attr(ext_unit_dict, unit=unit) - idx += 1 - - - return ModflowMnwi(model, wel1flag=wel1flag, qsumflag=qsumflag, - byndflag=byndflag, mnwobs=mnwobs, - wellid_unit_qndflag_qhbflag_concflag=wellid_unit_qndflag_qhbflag_concflag, - extension='mnwi', unitnumber=unitnumber, - filenames=filenames) - - def check(self, f=None, verbose=True, level=1, checktype=None): - """ - Check mnwi package data for common errors. - - Parameters - ---------- - f : str or file handle - String defining file name or file handle for summary file - of check method output. If a string is passed a file handle - is created. If f is None, check method does not write - results to a summary file. (default is None) - verbose : bool - Boolean flag used to determine if check method results are - written to the screen - level : int - Check method analysis level. If level=0, summary checks are - performed. If level=1, full checks are performed. - - Returns - ------- - None - - Examples - -------- - - >>> import flopy - >>> m = flopy.modflow.Modflow.load('model.nam') - >>> m.mnwi.check() - """ - chk = self._get_check(f, verbose, level, checktype) - if "MNW2" not in self.parent.get_package_list(): - desc = '\r MNWI package present without MNW2 package.' - chk._add_to_summary(type='Warning', value=0, - desc=desc) - - chk.summarize() - return chk - - def write_file(self): - """ - Write the package file. - - Returns - ------- - None - - """ - - # -open file for writing - f = open(self.fn_path, 'w') - - # header not supported - # # -write header - # f.write('{}\n'.format(self.heading)) - - # dataset 1 - WEL1flag QSUMflag SYNDflag - line = '{:10d}'.format(self.wel1flag) - line += '{:10d}'.format(self.qsumflag) - line += '{:10d}\n'.format(self.byndflag) - f.write(line) - - # dataset 2 - MNWOBS - f.write('{:10d}\n'.format(self.mnwobs)) - - # dataset 3 - WELLID UNIT QNDflag QBHflag {CONCflag} - # (Repeat MNWOBS times) - nitems = len(self.wellid_unit_qndflag_qhbflag_concflag[0]) - for i, t in enumerate(self.wellid_unit_qndflag_qhbflag_concflag): - wellid = t[0] - unit = t[1] - qndflag = t[2] - qhbflag = t[3] - assert qndflag >= 0, 'QNDflag must be greater than or equal to zero.' - assert qhbflag >= 0, 'QHBflag must be greater than or equal to zero.' - line = '{:20s} '.format(wellid) - line += '{:5d} '.format(unit) - line += '{:5d} '.format(qndflag) - line += '{:5d} '.format(qhbflag) - if nitems == 5: - concflag = t[4] - assert 0 <= concflag <= 3, \ - 'CONCflag must be an integer between 0 and 3.' - assert isinstance(concflag, int), \ - 'CONCflag must be an integer between 0 and 3.' - line += '{:5d} '.format(concflag) - line += '\n' - f.write(line) - - f.close() - - @staticmethod - def ftype(): - return 'MNWI' - - @staticmethod - def defaultunit(): - return 58 +import sys + +from ..utils.flopy_io import line_parse, pop_item +from ..pakbase import Package + + +class ModflowMnwi(Package): + """ + 'Multi-Node Well Information Package Class' + + Parameters + ---------- + model : model object + The model object (of type :class:`flopy.modflow.mf.Modflow`) to which + this package will be added. + wel1flag : integer + Flag indicating output to be written for each MNW node at the end of + each stress period + qsumflag :integer + Flag indicating output to be written for each multi-node well + byndflag :integer + Flag indicating output to be written for each MNW node + mnwobs :integer + Number of multi-node wells for which detailed flow, head, and solute + data to be saved + wellid_unit_qndflag_qhbflag_concflag : list of lists + Containing wells and related information to be output + (length : [MNWOBS][4or5]) + extension : string + Filename extension (default is 'mnwi') + unitnumber : int + File unit number (default is None). + filenames : str or list of str + Filenames to use for the package and the output files. If + filenames=None the package name will be created using the model name + and package extension and the output names will be created using + the model name and output extensions. Default is None. + + Attributes + ---------- + + Methods + ------- + + See Also + -------- + + Notes + ----- + + Examples + -------- + + >>> import flopy + >>> ml = flopy.modflow.Modflow() + >>> ghb = flopy.modflow.ModflowMnwi(ml, ...) + + """ + + def __init__(self, model, wel1flag=None, qsumflag=None, byndflag=None, + mnwobs=1, wellid_unit_qndflag_qhbflag_concflag=None, + extension='mnwi', unitnumber=None, filenames=None): + # set default unit number of one is not specified + if unitnumber is None: + unitnumber = ModflowMnwi.defaultunit() + + # determine the number of unique unit numbers in dataset 3 + unique_units = [] + if wellid_unit_qndflag_qhbflag_concflag is not None: + for t in wellid_unit_qndflag_qhbflag_concflag: + iu = int(t[1]) + if iu not in unique_units: + unique_units.append(iu) + + # set filenames + nfn = 4 + len(unique_units) + if filenames is None: + filenames = [None for x in range(nfn)] + elif isinstance(filenames, str): + filenames = [filenames] + [None for x in range(nfn)] + elif isinstance(filenames, list): + if len(filenames) < nfn: + n = nfn - len(filenames) + 1 + filenames = filenames + [None for x in range(n)] + + # update external file information with unit_pc output, if necessary + if wel1flag is not None: + fname = filenames[1] + model.add_output_file(wel1flag, fname=fname, + extension='wel1', + binflag=False, + package=ModflowMnwi.ftype()) + else: + wel1flag = 0 + + # update external file information with unit_ts output, if necessary + if qsumflag is not None: + fname = filenames[2] + model.add_output_file(qsumflag, fname=fname, + extension='qsum', + binflag=False, + package=ModflowMnwi.ftype()) + else: + qsumflag = 0 + + # update external file information with ipunit output, if necessary + if byndflag is not None: + fname = filenames[3] + model.add_output_file(byndflag, fname=fname, + extension='bynd', + binflag=False, + package=ModflowMnwi.ftype()) + else: + byndflag = 0 + + idx = 4 + for iu in unique_units: + fname = filenames[idx] + model.add_output_file(iu, fname=fname, + extension='{:04d}.mnwobs'.format(iu), + binflag=False, + package=ModflowMnwi.ftype()) + idx += 1 + + name = [ModflowMnwi.ftype()] + units = [unitnumber] + extra = [''] + + # set package name + fname = [filenames[0]] + + # Call ancestor's init to set self.parent, extension, name and unit number + Package.__init__(self, model, extension=extension, name=name, + unit_number=units, extra=extra, filenames=fname) + + self.url = 'mnwi.htm' + self.heading = '# {} package for '.format(self.name[0]) + \ + ' {}, '.format(model.version_types[model.version]) + \ + 'generated by Flopy.' + # integer flag indicating output to be written for each MNW node at + # the end of each stress period + self.wel1flag = wel1flag + # integer flag indicating output to be written for each multi-node well + self.qsumflag = qsumflag + # integer flag indicating output to be written for each MNW node + self.byndflag = byndflag + # number of multi-node wells for which detailed flow, head, and solute + # data to be saved + self.mnwobs = mnwobs + # list of lists containing wells and related information to be + # output (length = [MNWOBS][4or5]) + self.wellid_unit_qndflag_qhbflag_concflag = wellid_unit_qndflag_qhbflag_concflag + + # -input format checks: + assert self.wel1flag >= 0, 'WEL1flag must be greater than or equal to zero.' + assert self.qsumflag >= 0, 'QSUMflag must be greater than or equal to zero.' + assert self.byndflag >= 0, 'BYNDflag must be greater than or equal to zero.' + + if len(self.wellid_unit_qndflag_qhbflag_concflag) != self.mnwobs: + print('WARNING: number of listed well ids to be ' + + 'monitored does not match MNWOBS.') + + self.parent.add_package(self) + + @staticmethod + def load(f, model, nper=None, gwt=False, nsol=1, ext_unit_dict=None): + + if model.verbose: + sys.stdout.write('loading mnw2 package file...\n') + + structured = model.structured + if nper is None: + nrow, ncol, nlay, nper = model.get_nrow_ncol_nlay_nper() + # otherwise iterations from 0, nper won't run + nper = 1 if nper == 0 else nper + + openfile = not hasattr(f, 'read') + if openfile: + filename = f + f = open(filename, 'r') + + # dataset 1 + line = line_parse(next(f)) + wel1flag, qsumflag, byndflag = map(int, line) + if wel1flag > 0: + model.add_pop_key_list(wel1flag) + if qsumflag > 0: + model.add_pop_key_list(qsumflag) + if byndflag > 0: + model.add_pop_key_list(byndflag) + + + # dataset 2 + unique_units = [] + mnwobs = pop_item(line_parse(next(f)), int) + wellid_unit_qndflag_qhbflag_concflag = [] + if mnwobs > 0: + for i in range(mnwobs): + # dataset 3 + line = line_parse(next(f)) + wellid = pop_item(line, str) + unit = pop_item(line, int) + qndflag = pop_item(line, int) + qbhflag = pop_item(line, int) + tmp = [wellid, unit, qndflag, qbhflag] + if gwt and len(line) > 0: + tmp.append(pop_item(line, int)) + wellid_unit_qndflag_qhbflag_concflag.append(tmp) + if unit not in unique_units: + unique_units.append(unit) + + if openfile: + f.close() + + for unit in unique_units: + model.add_pop_key_list(unit) + + # determine specified unit number + nfn = 4 + len(unique_units) + unitnumber = None + filenames = [None for x in range(nfn)] + if ext_unit_dict is not None: + unitnumber, filenames[0] = \ + model.get_ext_dict_attr(ext_unit_dict, + filetype=ModflowMnwi.ftype()) + if wel1flag > 0: + iu, filenames[1] = \ + model.get_ext_dict_attr(ext_unit_dict, unit=wel1flag) + if qsumflag > 0: + iu, filenames[2] = \ + model.get_ext_dict_attr(ext_unit_dict, unit=qsumflag) + if byndflag > 0: + iu, filenames[3] = \ + model.get_ext_dict_attr(ext_unit_dict, unit=byndflag) + idx = 4 + for unit in unique_units: + iu, filenames[idx] = \ + model.get_ext_dict_attr(ext_unit_dict, unit=unit) + idx += 1 + + + return ModflowMnwi(model, wel1flag=wel1flag, qsumflag=qsumflag, + byndflag=byndflag, mnwobs=mnwobs, + wellid_unit_qndflag_qhbflag_concflag=wellid_unit_qndflag_qhbflag_concflag, + extension='mnwi', unitnumber=unitnumber, + filenames=filenames) + + def check(self, f=None, verbose=True, level=1, checktype=None): + """ + Check mnwi package data for common errors. + + Parameters + ---------- + f : str or file handle + String defining file name or file handle for summary file + of check method output. If a string is passed a file handle + is created. If f is None, check method does not write + results to a summary file. (default is None) + verbose : bool + Boolean flag used to determine if check method results are + written to the screen + level : int + Check method analysis level. If level=0, summary checks are + performed. If level=1, full checks are performed. + + Returns + ------- + None + + Examples + -------- + + >>> import flopy + >>> m = flopy.modflow.Modflow.load('model.nam') + >>> m.mnwi.check() + """ + chk = self._get_check(f, verbose, level, checktype) + if "MNW2" not in self.parent.get_package_list(): + desc = '\r MNWI package present without MNW2 package.' + chk._add_to_summary(type='Warning', value=0, + desc=desc) + + chk.summarize() + return chk + + def write_file(self): + """ + Write the package file. + + Returns + ------- + None + + """ + + # -open file for writing + f = open(self.fn_path, 'w') + + # header not supported + # # -write header + # f.write('{}\n'.format(self.heading)) + + # dataset 1 - WEL1flag QSUMflag SYNDflag + line = '{:10d}'.format(self.wel1flag) + line += '{:10d}'.format(self.qsumflag) + line += '{:10d}\n'.format(self.byndflag) + f.write(line) + + # dataset 2 - MNWOBS + f.write('{:10d}\n'.format(self.mnwobs)) + + # dataset 3 - WELLID UNIT QNDflag QBHflag {CONCflag} + # (Repeat MNWOBS times) + nitems = len(self.wellid_unit_qndflag_qhbflag_concflag[0]) + for i, t in enumerate(self.wellid_unit_qndflag_qhbflag_concflag): + wellid = t[0] + unit = t[1] + qndflag = t[2] + qhbflag = t[3] + assert qndflag >= 0, 'QNDflag must be greater than or equal to zero.' + assert qhbflag >= 0, 'QHBflag must be greater than or equal to zero.' + line = '{:20s} '.format(wellid) + line += '{:5d} '.format(unit) + line += '{:5d} '.format(qndflag) + line += '{:5d} '.format(qhbflag) + if nitems == 5: + concflag = t[4] + assert 0 <= concflag <= 3, \ + 'CONCflag must be an integer between 0 and 3.' + assert isinstance(concflag, int), \ + 'CONCflag must be an integer between 0 and 3.' + line += '{:5d} '.format(concflag) + line += '\n' + f.write(line) + + f.close() + + @staticmethod + def ftype(): + return 'MNWI' + + @staticmethod + def defaultunit(): + return 58 diff --git a/flopy/modflow/mfnwt.py b/flopy/modflow/mfnwt.py index d608c815d8..3071593561 100644 --- a/flopy/modflow/mfnwt.py +++ b/flopy/modflow/mfnwt.py @@ -1,490 +1,490 @@ -""" -mfnwt module. Contains the ModflowNwt class. Note that the user can access -the ModflowNwt class as `flopy.modflow.ModflowNwt`. - -Additional information for this MODFLOW package can be found at the `Online -MODFLOW Guide -`_. - -""" - -import sys -from ..pakbase import Package - - -class ModflowNwt(Package): - """ - MODFLOW Nwt Package Class. - - Parameters - ---------- - model : model object - The model object (of type :class:`flopy.modflow.mf.Modflow`) to which - this package will be added. - headtol : float - is the maximum head change between outer iterations for solution of - the nonlinear problem. (default is 1e-4). - fluxtol : float - is the maximum l2 norm for solution of the nonlinear problem. - (default is 500). - maxiterout : int - is the maximum number of iterations to be allowed for solution of the - outer (nonlinear) problem. (default is 100). - thickfact : float - is the portion of the cell thickness (length) used for smoothly - adjusting storage and conductance coefficients to zero. - (default is 1e-5). - linmeth : int - is a flag that determines which matrix solver will be used. - A value of 1 indicates GMRES will be used - A value of 2 indicates XMD will be used. - (default is 1). - iprnwt : int - is a flag that indicates whether additional information about solver - convergence will be printed to the main listing file. - (default is 0). - ibotav : int - is a flag that indicates whether corrections will be made to - groundwater head relative to the cell-bottom altitude if the cell is - surrounded by dewatered cells (integer). A value of 1 indicates that a - correction will be made and a value of 0 indicates no correction will - be made. (default is 0). - options : string - SPECIFIED indicates that the optional solver input values listed for - items 1 and 2 will be specified in the NWT input file by the user. - SIMPLE indicates that default solver input values will be defined that - work well for nearly linear models. This would be used for models that - do not include nonlinear stress packages, and models that are either - confined or consist of a single unconfined layer that is thick enough - to contain the water table within a single layer. - MODERATE indicates that default solver input values will be defined - that work well for moderately nonlinear models. This would be used for - models that include nonlinear stress packages, and models that consist - of one or more unconfined layers. The MODERATE option should be used - when the SIMPLE option does not result in successful convergence. - COMPLEX indicates that default solver input values will be defined - that work well for highly nonlinear models. This would be used for - models that include nonlinear stress packages, and models that consist - of one or more unconfined layers representing complex geology and sw/gw - interaction. The COMPLEX option should be used when the MODERATE option - does not result in successful convergence. (default is COMPLEX). - Continue : bool - if the model fails to converge during a time step then it will continue - to solve the following time step. (default is False). Note the capital - C on this option so that it doesn't conflict with a reserved Python - language word. - dbdtheta : float - is a coefficient used to reduce the weight applied to the head change - between nonlinear iterations. dbdtheta is used to control oscillations - in head. Values range between 0.0 and 1.0, and larger values increase - the weight (decrease under-relaxation) applied to the head change. - (default is 0.4). - dbdkappa : float - is a coefficient used to increase the weight applied to the head change - between nonlinear iterations. dbdkappa is used to control oscillations - in head. Values range between 0.0 and 1.0, and larger values increase - the weight applied to the head change. (default is 1.e-5). - dbdgamma : float - is a factor (used to weight the head change for the previous and - current iteration. Values range between 0.0 and 1.0, and greater values - apply more weight to the head change calculated during the current - iteration. (default is 0.) - momfact : float - is the momentum coefficient and ranges between 0.0 and 1.0. Greater - values apply more weight to the head change for the current iteration. - (default is 0.1). - backflag : int - is a flag used to specify whether residual control will be used. A - value of 1 indicates that residual control is active and a value of 0 - indicates residual control is inactive. (default is 1). - maxbackiter : int - is the maximum number of reductions (backtracks) in the head change - between nonlinear iterations (integer). A value between 10 and 50 - works well. (default is 50). - backtol : float - is the proportional decrease in the root-mean-squared error of the - groundwater-flow equation used to determine if residual control is - required at the end of a nonlinear iteration. (default is 1.1). - backreduce : float - is a reduction factor used for residual control that reduces the head - change between nonlinear iterations. Values should be between 0.0 and - 1.0, where smaller values result in smaller head-change values. - (default 0.7). - maxitinner : int - (GMRES) is the maximum number of iterations for the linear solution. - (default is 50). - ilumethod : int - (GMRES) is the index for selection of the method for incomplete - factorization (ILU) used as a preconditioner. (default is 2). - - ilumethod = 1 is ILU with drop tolerance and fill limit. Fill-in terms - less than drop tolerance times the diagonal are discarded. The number - of fill-in terms in each row of L and U is limited to the fill limit. - The fill-limit largest elements are kept in the L and U factors. - - ilumethod=2 is ILU(k) order k incomplete LU factorization. Fill-in - terms of higher order than k in the factorization are discarded. - levfill : int - (GMRES) is the fill limit for ILUMETHOD = 1 and is the level of fill - for ilumethod = 2. Recommended values: 5-10 for method 1, 0-2 for - method 2. (default is 5). - stoptol : float - (GMRES) is the tolerance for convergence of the linear solver. This is - the residual of the linear equations scaled by the norm of the root - mean squared error. Usually 1.e-8 to 1.e-12 works well. - (default is 1.e-10). - msdr : int - (GMRES) is the number of iterations between restarts of the GMRES - Solver. (default is 15). - iacl : int - (XMD) is a flag for the acceleration method: 0 is conjugate gradient, 1 is ORTHOMIN, - 2 is Bi-CGSTAB. (default is 2). - norder : int - (XMD) is a flag for the scheme of ordering the unknowns: 0 is original - ordering, 1 is RCM ordering, 2 is Minimum Degree ordering. - (default is 1). - level : int - (XMD) is the level of fill for incomplete LU factorization. - (default is 5). - north : int - (XMD) is the number of orthogonalization for the ORTHOMIN acceleration - scheme. A number between 4 and 10 is appropriate. Small values require - less storage but more iterations may be required. This number should - equal 2 for the other acceleration methods. (default is 7). - iredsys : int - (XMD) is a flag for reduced system preconditioning (integer): 0-do not - apply reduced system preconditioning, 1-apply reduced system - preconditioning. (default is 0) - rrctols : int - (XMD) is the residual reduction-convergence criteria. (default is 0.). - idroptol : int - (XMD) is a flag for using drop tolerance in the preconditioning: - 0-don't use drop tolerance, 1-use drop tolerance. (default is 1). - epsrn : float - (XMD) is the drop tolerance for preconditioning. (default is 1.e-4). - hclosexmd : float - (XMD) is the head closure criteria for inner (linear) iterations. - (default is 1.e-4). - mxiterxmd : int - (XMD) is the maximum number of iterations for the linear solution. - (default is 50). - extension : list string - Filename extension (default is 'nwt') - unitnumber : int - File unit number (default is None). - filenames : str or list of str - Filenames to use for the package. If filenames=None the package name - will be created using the model name and package extension. If a - single string is passed the package will be set to the string. - Default is None. - - Attributes - ---------- - - Methods - ------- - - See Also - -------- - - Notes - ----- - - Examples - -------- - - >>> import flopy - >>> m = flopy.modflow.Modflow() - >>> nwt = flopy.modflow.ModflowNwt(m) - - """ - - def __init__(self, model, headtol=1E-2, fluxtol=500, maxiterout=100, \ - thickfact=1E-5, linmeth=1, iprnwt=0, ibotav=0, \ - options='COMPLEX', Continue=False, \ - dbdtheta=0.4, dbdkappa=1.e-5, dbdgamma=0., momfact=0.1, \ - backflag=1, maxbackiter=50, backtol=1.1, backreduce=0.70, \ - maxitinner=50, ilumethod=2, levfill=5, stoptol=1.e-10, - msdr=15, \ - iacl=2, norder=1, level=5, north=7, iredsys=0, rrctols=0.0, \ - idroptol=1, epsrn=1.e-4, hclosexmd=1e-4, mxiterxmd=50, \ - extension='nwt', unitnumber=None, filenames=None): - - if model.version != 'mfnwt': - err = 'Error: model version must be mfnwt to use NWT package' - raise Exception(err) - - # set default unit number of one is not specified - if unitnumber is None: - unitnumber = ModflowNwt.defaultunit() - - # set filenames - if filenames is None: - filenames = [None] - elif isinstance(filenames, str): - filenames = [filenames] - - # Fill namefile items - name = [ModflowNwt.ftype()] - units = [unitnumber] - extra = [''] - - # set package name - fname = [filenames[0]] - - # Call ancestor's init to set self.parent, extension, name and unit number - Package.__init__(self, model, extension=extension, name=name, - unit_number=units, extra=extra, filenames=fname) - - self.heading = '# {} package for '.format(self.name[0]) + \ - ' {}, '.format(model.version_types[model.version]) + \ - 'generated by Flopy.' - self.url = 'nwt_newton_solver.htm' - self.headtol = headtol - self.fluxtol = fluxtol - self.maxiterout = maxiterout - self.thickfact = thickfact - self.linmeth = linmeth - self.iprnwt = iprnwt - self.ibotav = ibotav - if isinstance(options, list): - self.options = options - else: - self.options = [options.upper()] - if Continue: - self.options.append('CONTINUE') - self.dbdtheta = dbdtheta - self.dbdkappa = dbdkappa - self.dbdgamma = dbdgamma - self.momfact = momfact - self.backflag = backflag - self.maxbackiter = maxbackiter - self.backtol = backtol - self.backreduce = backreduce - self.maxitinner = maxitinner - self.ilumethod = ilumethod - self.levfill = levfill - self.stoptol = stoptol - self.msdr = msdr - self.iacl = iacl - self.norder = norder - self.level = level - self.north = north - self.iredsys = iredsys - self.rrctols = rrctols - self.idroptol = idroptol - self.epsrn = epsrn - self.hclosexmd = hclosexmd - self.mxiterxmd = mxiterxmd - self.parent.add_package(self) - - def write_file(self): - """ - Write the package file. - - Returns - ------- - None - - """ - # Open file for writing - f = open(self.fn_path, 'w') - f.write('%s\n' % self.heading) - f.write('{:10.3e}{:10.3e}{:10d}{:10.3e}{:10d}{:10d}{:10d}'.format( - self.headtol, self.fluxtol, self.maxiterout, self.thickfact, - self.linmeth, self.iprnwt, self.ibotav)) - isspecified = False - for option in self.options: - f.write('{0:>10s}'.format(option.upper())) - if option.lower() == 'specified': - isspecified = True - if isspecified: - f.write('{0:10.4g}'.format(self.dbdtheta)) - f.write('{0:10.4g}'.format(self.dbdkappa)) - f.write('{0:10.4g}'.format(self.dbdgamma)) - f.write('{0:10.4g}'.format(self.momfact)) - f.write('{0:10d}'.format(self.backflag)) - if self.backflag > 0: - f.write('{0:10d}'.format(self.maxbackiter)) - f.write('{0:10.4g}'.format(self.backtol)) - f.write('{0:10.4g}'.format(self.backreduce)) - f.write('\n') - if self.linmeth == 1: - f.write('{0:10d}'.format(self.maxitinner)) - f.write('{0:10d}'.format(self.ilumethod)) - f.write('{0:10d}'.format(self.levfill)) - f.write('{0:10.4g}'.format(self.stoptol)) - f.write('{0:10d}'.format(self.msdr)) - elif self.linmeth == 2: - f.write('{0:10d}'.format(self.iacl)) - f.write('{0:10d}'.format(self.norder)) - f.write('{0:10d}'.format(self.level)) - f.write('{0:10d}'.format(self.north)) - f.write('{0:10d}'.format(self.iredsys)) - f.write('{0:10.4g}'.format(self.rrctols)) - f.write('{0:10d}'.format(self.idroptol)) - f.write('{0:10.4g}'.format(self.epsrn)) - f.write('{0:10.4g}'.format(self.hclosexmd)) - f.write('{0:10d}'.format(self.mxiterxmd)) - - f.write('\n') - - f.close() - - @staticmethod - def load(f, model, ext_unit_dict=None): - """ - Load an existing package. - - Parameters - ---------- - f : filename or file handle - File to load. - model : model object - The model object (of type :class:`flopy.modflow.mf.Modflow`) to - which this package will be added. - ext_unit_dict : dictionary, optional - If the arrays in the file are specified using EXTERNAL, - or older style array control records, then `f` should be a file - handle. In this case ext_unit_dict is required, which can be - constructed using the function - :class:`flopy.utils.mfreadnam.parsenamefile`. - - Returns - ------- - nwt : ModflowNwt object - - Examples - -------- - - >>> import flopy - >>> m = flopy.modflow.Modflow() - >>> nwt = flopy.modflow.ModflowPcg.load('test.nwt', m) - - """ - import collections - - if model.verbose: - sys.stdout.write('loading nwt package file...\n') - - if model.version != 'mfnwt': - msg = "Warning: model version was reset from " + \ - "'{}' to 'mfnwt' in order to load a NWT file".format( - model.version) - print(msg) - model.version = 'mfnwt' - - openfile = not hasattr(f, 'read') - if openfile: - filename = f - f = open(filename, 'r') - - # dataset 0 -- header - flines = [line.strip() for line in f.readlines() if - not line.strip().startswith('#')] - - if openfile: - f.close() - - line = flines.pop(0) - - # dataset 1 - ifrfm = True # model.free_format_input - - vars = (("headtol", float), ("fluxtol", float), ("maxiterout", int), - ("thickfact", float), ("linmeth", int), ("iprnwt", int), - ("ibotav", int), ("options", str), ("Continue", str)) - vars = collections.OrderedDict(vars) - kwargs = {} - if ifrfm: - t = line.split() - else: - t = [] - try: - for idx, (k, c) in enumerate(vars.items()): - t.append(line[idx * 10:(idx + 1) * 10]) - except: - if model.verbose: - print(' did not parse fixed format dataset 1') - try: - for i, (v, c) in enumerate(vars.items()): - kwargs[v] = c(t[i].strip()) - except: - if model.verbose: - print(' did not generate dataset 1 kwargs') - - if "Continue" in kwargs: - if 'CONTINUE' in kwargs["Continue"].upper(): - kwargs["Continue"] = True - else: - kwargs.pop("Continue") - - specdict = (('dbdtheta', float), ('dbdkappa', float), - ('dbdgamma', float), ('momfact', float), - ('backflag', int), ('maxbackiter', int), - ('backtol', float), ('backreduce', float)) - specdict = collections.OrderedDict(specdict) - ipos = len(kwargs) - if kwargs["options"].lower().strip() == "specified": - for (k, c) in specdict.items(): - if ifrfm: - kwargs[k] = c(t[ipos].strip()) - else: - kwargs[k] = c(line[ipos * 10:(ipos + 1) * 10].strip()) - if k == 'backflag': - if kwargs['backflag'] == 0: - break - ipos += 1 - # dataset 2 - try: - line = flines.pop(0) - except: - raise Exception( - 'Error: OPTIONS set to "Specified" but only one line in NWT file') - - lindict = {} - if kwargs['linmeth'] == 1: - lindict = (('maxitinner', int), ('ilumethod', int), - ('levfill', int), ('stoptol', float), - ('msdr', int)) - elif kwargs['linmeth'] == 2: - lindict = (('iacl', int), ('norder', int), ('level', int), - ('north', int), ('iredsys', int), - ('rrctols', float), - ('idroptol', int), ('epsrn', float), - ('hclosexmd', float), - ('mxiterxmd', int)) - lindict = collections.OrderedDict(lindict) - if ifrfm: - t = line.split() - else: - t = [] - for idx, (k, c) in enumerate(lindict.items()): - t.append(line[idx * 10:(idx + 1) * 10]) - for idx, (k, c) in enumerate(lindict.items()): - # forgive missing value for MXITERXMD (last value) - # (apparently NWT runs without it) - if len(t) > 0: - kwargs[k] = c(t.pop(0)) - - # determine specified unit number - # set package unit number - unitnumber = None - filenames = [None] - if ext_unit_dict is not None: - unitnumber, filenames[0] = \ - model.get_ext_dict_attr(ext_unit_dict, - filetype=ModflowNwt.ftype()) - kwargs['unitnumber'] = unitnumber - kwargs['filenames'] = filenames - - # create and return an instance of the nwt class - return ModflowNwt(model, **kwargs) - - @staticmethod - def ftype(): - return 'NWT' - - @staticmethod - def defaultunit(): - return 32 +""" +mfnwt module. Contains the ModflowNwt class. Note that the user can access +the ModflowNwt class as `flopy.modflow.ModflowNwt`. + +Additional information for this MODFLOW package can be found at the `Online +MODFLOW Guide +`_. + +""" + +import sys +from ..pakbase import Package + + +class ModflowNwt(Package): + """ + MODFLOW Nwt Package Class. + + Parameters + ---------- + model : model object + The model object (of type :class:`flopy.modflow.mf.Modflow`) to which + this package will be added. + headtol : float + is the maximum head change between outer iterations for solution of + the nonlinear problem. (default is 1e-4). + fluxtol : float + is the maximum l2 norm for solution of the nonlinear problem. + (default is 500). + maxiterout : int + is the maximum number of iterations to be allowed for solution of the + outer (nonlinear) problem. (default is 100). + thickfact : float + is the portion of the cell thickness (length) used for smoothly + adjusting storage and conductance coefficients to zero. + (default is 1e-5). + linmeth : int + is a flag that determines which matrix solver will be used. + A value of 1 indicates GMRES will be used + A value of 2 indicates XMD will be used. + (default is 1). + iprnwt : int + is a flag that indicates whether additional information about solver + convergence will be printed to the main listing file. + (default is 0). + ibotav : int + is a flag that indicates whether corrections will be made to + groundwater head relative to the cell-bottom altitude if the cell is + surrounded by dewatered cells (integer). A value of 1 indicates that a + correction will be made and a value of 0 indicates no correction will + be made. (default is 0). + options : string + SPECIFIED indicates that the optional solver input values listed for + items 1 and 2 will be specified in the NWT input file by the user. + SIMPLE indicates that default solver input values will be defined that + work well for nearly linear models. This would be used for models that + do not include nonlinear stress packages, and models that are either + confined or consist of a single unconfined layer that is thick enough + to contain the water table within a single layer. + MODERATE indicates that default solver input values will be defined + that work well for moderately nonlinear models. This would be used for + models that include nonlinear stress packages, and models that consist + of one or more unconfined layers. The MODERATE option should be used + when the SIMPLE option does not result in successful convergence. + COMPLEX indicates that default solver input values will be defined + that work well for highly nonlinear models. This would be used for + models that include nonlinear stress packages, and models that consist + of one or more unconfined layers representing complex geology and sw/gw + interaction. The COMPLEX option should be used when the MODERATE option + does not result in successful convergence. (default is COMPLEX). + Continue : bool + if the model fails to converge during a time step then it will continue + to solve the following time step. (default is False). Note the capital + C on this option so that it doesn't conflict with a reserved Python + language word. + dbdtheta : float + is a coefficient used to reduce the weight applied to the head change + between nonlinear iterations. dbdtheta is used to control oscillations + in head. Values range between 0.0 and 1.0, and larger values increase + the weight (decrease under-relaxation) applied to the head change. + (default is 0.4). + dbdkappa : float + is a coefficient used to increase the weight applied to the head change + between nonlinear iterations. dbdkappa is used to control oscillations + in head. Values range between 0.0 and 1.0, and larger values increase + the weight applied to the head change. (default is 1.e-5). + dbdgamma : float + is a factor (used to weight the head change for the previous and + current iteration. Values range between 0.0 and 1.0, and greater values + apply more weight to the head change calculated during the current + iteration. (default is 0.) + momfact : float + is the momentum coefficient and ranges between 0.0 and 1.0. Greater + values apply more weight to the head change for the current iteration. + (default is 0.1). + backflag : int + is a flag used to specify whether residual control will be used. A + value of 1 indicates that residual control is active and a value of 0 + indicates residual control is inactive. (default is 1). + maxbackiter : int + is the maximum number of reductions (backtracks) in the head change + between nonlinear iterations (integer). A value between 10 and 50 + works well. (default is 50). + backtol : float + is the proportional decrease in the root-mean-squared error of the + groundwater-flow equation used to determine if residual control is + required at the end of a nonlinear iteration. (default is 1.1). + backreduce : float + is a reduction factor used for residual control that reduces the head + change between nonlinear iterations. Values should be between 0.0 and + 1.0, where smaller values result in smaller head-change values. + (default 0.7). + maxitinner : int + (GMRES) is the maximum number of iterations for the linear solution. + (default is 50). + ilumethod : int + (GMRES) is the index for selection of the method for incomplete + factorization (ILU) used as a preconditioner. (default is 2). + + ilumethod = 1 is ILU with drop tolerance and fill limit. Fill-in terms + less than drop tolerance times the diagonal are discarded. The number + of fill-in terms in each row of L and U is limited to the fill limit. + The fill-limit largest elements are kept in the L and U factors. + + ilumethod=2 is ILU(k) order k incomplete LU factorization. Fill-in + terms of higher order than k in the factorization are discarded. + levfill : int + (GMRES) is the fill limit for ILUMETHOD = 1 and is the level of fill + for ilumethod = 2. Recommended values: 5-10 for method 1, 0-2 for + method 2. (default is 5). + stoptol : float + (GMRES) is the tolerance for convergence of the linear solver. This is + the residual of the linear equations scaled by the norm of the root + mean squared error. Usually 1.e-8 to 1.e-12 works well. + (default is 1.e-10). + msdr : int + (GMRES) is the number of iterations between restarts of the GMRES + Solver. (default is 15). + iacl : int + (XMD) is a flag for the acceleration method: 0 is conjugate gradient, 1 is ORTHOMIN, + 2 is Bi-CGSTAB. (default is 2). + norder : int + (XMD) is a flag for the scheme of ordering the unknowns: 0 is original + ordering, 1 is RCM ordering, 2 is Minimum Degree ordering. + (default is 1). + level : int + (XMD) is the level of fill for incomplete LU factorization. + (default is 5). + north : int + (XMD) is the number of orthogonalization for the ORTHOMIN acceleration + scheme. A number between 4 and 10 is appropriate. Small values require + less storage but more iterations may be required. This number should + equal 2 for the other acceleration methods. (default is 7). + iredsys : int + (XMD) is a flag for reduced system preconditioning (integer): 0-do not + apply reduced system preconditioning, 1-apply reduced system + preconditioning. (default is 0) + rrctols : int + (XMD) is the residual reduction-convergence criteria. (default is 0.). + idroptol : int + (XMD) is a flag for using drop tolerance in the preconditioning: + 0-don't use drop tolerance, 1-use drop tolerance. (default is 1). + epsrn : float + (XMD) is the drop tolerance for preconditioning. (default is 1.e-4). + hclosexmd : float + (XMD) is the head closure criteria for inner (linear) iterations. + (default is 1.e-4). + mxiterxmd : int + (XMD) is the maximum number of iterations for the linear solution. + (default is 50). + extension : list string + Filename extension (default is 'nwt') + unitnumber : int + File unit number (default is None). + filenames : str or list of str + Filenames to use for the package. If filenames=None the package name + will be created using the model name and package extension. If a + single string is passed the package will be set to the string. + Default is None. + + Attributes + ---------- + + Methods + ------- + + See Also + -------- + + Notes + ----- + + Examples + -------- + + >>> import flopy + >>> m = flopy.modflow.Modflow() + >>> nwt = flopy.modflow.ModflowNwt(m) + + """ + + def __init__(self, model, headtol=1E-2, fluxtol=500, maxiterout=100, \ + thickfact=1E-5, linmeth=1, iprnwt=0, ibotav=0, \ + options='COMPLEX', Continue=False, \ + dbdtheta=0.4, dbdkappa=1.e-5, dbdgamma=0., momfact=0.1, \ + backflag=1, maxbackiter=50, backtol=1.1, backreduce=0.70, \ + maxitinner=50, ilumethod=2, levfill=5, stoptol=1.e-10, + msdr=15, \ + iacl=2, norder=1, level=5, north=7, iredsys=0, rrctols=0.0, \ + idroptol=1, epsrn=1.e-4, hclosexmd=1e-4, mxiterxmd=50, \ + extension='nwt', unitnumber=None, filenames=None): + + if model.version != 'mfnwt': + err = 'Error: model version must be mfnwt to use NWT package' + raise Exception(err) + + # set default unit number of one is not specified + if unitnumber is None: + unitnumber = ModflowNwt.defaultunit() + + # set filenames + if filenames is None: + filenames = [None] + elif isinstance(filenames, str): + filenames = [filenames] + + # Fill namefile items + name = [ModflowNwt.ftype()] + units = [unitnumber] + extra = [''] + + # set package name + fname = [filenames[0]] + + # Call ancestor's init to set self.parent, extension, name and unit number + Package.__init__(self, model, extension=extension, name=name, + unit_number=units, extra=extra, filenames=fname) + + self.heading = '# {} package for '.format(self.name[0]) + \ + ' {}, '.format(model.version_types[model.version]) + \ + 'generated by Flopy.' + self.url = 'nwt_newton_solver.htm' + self.headtol = headtol + self.fluxtol = fluxtol + self.maxiterout = maxiterout + self.thickfact = thickfact + self.linmeth = linmeth + self.iprnwt = iprnwt + self.ibotav = ibotav + if isinstance(options, list): + self.options = options + else: + self.options = [options.upper()] + if Continue: + self.options.append('CONTINUE') + self.dbdtheta = dbdtheta + self.dbdkappa = dbdkappa + self.dbdgamma = dbdgamma + self.momfact = momfact + self.backflag = backflag + self.maxbackiter = maxbackiter + self.backtol = backtol + self.backreduce = backreduce + self.maxitinner = maxitinner + self.ilumethod = ilumethod + self.levfill = levfill + self.stoptol = stoptol + self.msdr = msdr + self.iacl = iacl + self.norder = norder + self.level = level + self.north = north + self.iredsys = iredsys + self.rrctols = rrctols + self.idroptol = idroptol + self.epsrn = epsrn + self.hclosexmd = hclosexmd + self.mxiterxmd = mxiterxmd + self.parent.add_package(self) + + def write_file(self): + """ + Write the package file. + + Returns + ------- + None + + """ + # Open file for writing + f = open(self.fn_path, 'w') + f.write('%s\n' % self.heading) + f.write('{:10.3e}{:10.3e}{:10d}{:10.3e}{:10d}{:10d}{:10d}'.format( + self.headtol, self.fluxtol, self.maxiterout, self.thickfact, + self.linmeth, self.iprnwt, self.ibotav)) + isspecified = False + for option in self.options: + f.write('{0:>10s}'.format(option.upper())) + if option.lower() == 'specified': + isspecified = True + if isspecified: + f.write('{0:10.4g}'.format(self.dbdtheta)) + f.write('{0:10.4g}'.format(self.dbdkappa)) + f.write('{0:10.4g}'.format(self.dbdgamma)) + f.write('{0:10.4g}'.format(self.momfact)) + f.write('{0:10d}'.format(self.backflag)) + if self.backflag > 0: + f.write('{0:10d}'.format(self.maxbackiter)) + f.write('{0:10.4g}'.format(self.backtol)) + f.write('{0:10.4g}'.format(self.backreduce)) + f.write('\n') + if self.linmeth == 1: + f.write('{0:10d}'.format(self.maxitinner)) + f.write('{0:10d}'.format(self.ilumethod)) + f.write('{0:10d}'.format(self.levfill)) + f.write('{0:10.4g}'.format(self.stoptol)) + f.write('{0:10d}'.format(self.msdr)) + elif self.linmeth == 2: + f.write('{0:10d}'.format(self.iacl)) + f.write('{0:10d}'.format(self.norder)) + f.write('{0:10d}'.format(self.level)) + f.write('{0:10d}'.format(self.north)) + f.write('{0:10d}'.format(self.iredsys)) + f.write('{0:10.4g}'.format(self.rrctols)) + f.write('{0:10d}'.format(self.idroptol)) + f.write('{0:10.4g}'.format(self.epsrn)) + f.write('{0:10.4g}'.format(self.hclosexmd)) + f.write('{0:10d}'.format(self.mxiterxmd)) + + f.write('\n') + + f.close() + + @staticmethod + def load(f, model, ext_unit_dict=None): + """ + Load an existing package. + + Parameters + ---------- + f : filename or file handle + File to load. + model : model object + The model object (of type :class:`flopy.modflow.mf.Modflow`) to + which this package will be added. + ext_unit_dict : dictionary, optional + If the arrays in the file are specified using EXTERNAL, + or older style array control records, then `f` should be a file + handle. In this case ext_unit_dict is required, which can be + constructed using the function + :class:`flopy.utils.mfreadnam.parsenamefile`. + + Returns + ------- + nwt : ModflowNwt object + + Examples + -------- + + >>> import flopy + >>> m = flopy.modflow.Modflow() + >>> nwt = flopy.modflow.ModflowPcg.load('test.nwt', m) + + """ + import collections + + if model.verbose: + sys.stdout.write('loading nwt package file...\n') + + if model.version != 'mfnwt': + msg = "Warning: model version was reset from " + \ + "'{}' to 'mfnwt' in order to load a NWT file".format( + model.version) + print(msg) + model.version = 'mfnwt' + + openfile = not hasattr(f, 'read') + if openfile: + filename = f + f = open(filename, 'r') + + # dataset 0 -- header + flines = [line.strip() for line in f.readlines() if + not line.strip().startswith('#')] + + if openfile: + f.close() + + line = flines.pop(0) + + # dataset 1 + ifrfm = True # model.free_format_input + + vars = (("headtol", float), ("fluxtol", float), ("maxiterout", int), + ("thickfact", float), ("linmeth", int), ("iprnwt", int), + ("ibotav", int), ("options", str), ("Continue", str)) + vars = collections.OrderedDict(vars) + kwargs = {} + if ifrfm: + t = line.split() + else: + t = [] + try: + for idx, (k, c) in enumerate(vars.items()): + t.append(line[idx * 10:(idx + 1) * 10]) + except: + if model.verbose: + print(' did not parse fixed format dataset 1') + try: + for i, (v, c) in enumerate(vars.items()): + kwargs[v] = c(t[i].strip()) + except: + if model.verbose: + print(' did not generate dataset 1 kwargs') + + if "Continue" in kwargs: + if 'CONTINUE' in kwargs["Continue"].upper(): + kwargs["Continue"] = True + else: + kwargs.pop("Continue") + + specdict = (('dbdtheta', float), ('dbdkappa', float), + ('dbdgamma', float), ('momfact', float), + ('backflag', int), ('maxbackiter', int), + ('backtol', float), ('backreduce', float)) + specdict = collections.OrderedDict(specdict) + ipos = len(kwargs) + if kwargs["options"].lower().strip() == "specified": + for (k, c) in specdict.items(): + if ifrfm: + kwargs[k] = c(t[ipos].strip()) + else: + kwargs[k] = c(line[ipos * 10:(ipos + 1) * 10].strip()) + if k == 'backflag': + if kwargs['backflag'] == 0: + break + ipos += 1 + # dataset 2 + try: + line = flines.pop(0) + except: + raise Exception( + 'Error: OPTIONS set to "Specified" but only one line in NWT file') + + lindict = {} + if kwargs['linmeth'] == 1: + lindict = (('maxitinner', int), ('ilumethod', int), + ('levfill', int), ('stoptol', float), + ('msdr', int)) + elif kwargs['linmeth'] == 2: + lindict = (('iacl', int), ('norder', int), ('level', int), + ('north', int), ('iredsys', int), + ('rrctols', float), + ('idroptol', int), ('epsrn', float), + ('hclosexmd', float), + ('mxiterxmd', int)) + lindict = collections.OrderedDict(lindict) + if ifrfm: + t = line.split() + else: + t = [] + for idx, (k, c) in enumerate(lindict.items()): + t.append(line[idx * 10:(idx + 1) * 10]) + for idx, (k, c) in enumerate(lindict.items()): + # forgive missing value for MXITERXMD (last value) + # (apparently NWT runs without it) + if len(t) > 0: + kwargs[k] = c(t.pop(0)) + + # determine specified unit number + # set package unit number + unitnumber = None + filenames = [None] + if ext_unit_dict is not None: + unitnumber, filenames[0] = \ + model.get_ext_dict_attr(ext_unit_dict, + filetype=ModflowNwt.ftype()) + kwargs['unitnumber'] = unitnumber + kwargs['filenames'] = filenames + + # create and return an instance of the nwt class + return ModflowNwt(model, **kwargs) + + @staticmethod + def ftype(): + return 'NWT' + + @staticmethod + def defaultunit(): + return 32 diff --git a/flopy/modflow/mfoc.py b/flopy/modflow/mfoc.py index 573d448959..2005f750c9 100644 --- a/flopy/modflow/mfoc.py +++ b/flopy/modflow/mfoc.py @@ -1,1066 +1,1066 @@ -""" -mfoc module. Contains the ModflowOc class. Note that the user can access -the ModflowOc class as `flopy.modflow.ModflowOc`. - -Additional information for this MODFLOW package can be found at the `Online -MODFLOW Guide -`_. - -""" -import os -import sys - -from ..pakbase import Package - - -class ModflowOc(Package): - """ - MODFLOW Output Control Package Class. - - Parameters - ---------- - model : model object - The model object (of type :class:`flopy.modflow.mf.Modflow`) to which - this package will be added. - ihedfm : int - is a code for the format in which heads will be printed. - (default is 0). - iddnfm : int - is a code for the format in which drawdown will be printed. - (default is 0). - chedfm : string - is a character value that specifies the format for saving heads. - The format must contain 20 characters or less and must be a valid - Fortran format that is enclosed in parentheses. The format must be - enclosed in apostrophes if it contains one or more blanks or commas. - The optional word LABEL after the format is used to indicate that - each layer of output should be preceded with a line that defines the - output (simulation time, the layer being output, and so forth). If - there is no record specifying CHEDFM, then heads are written to a - binary (unformatted) file. Binary files are usually more compact than - text files, but they are not generally transportable among different - computer operating systems or different Fortran compilers. - (default is None) - cddnfm : string - is a character value that specifies the format for saving drawdown. - The format must contain 20 characters or less and must be a valid - Fortran format that is enclosed in parentheses. The format must be - enclosed in apostrophes if it contains one or more blanks or commas. - The optional word LABEL after the format is used to indicate that - each layer of output should be preceded with a line that defines the - output (simulation time, the layer being output, and so forth). If - there is no record specifying CDDNFM, then drawdowns are written to a - binary (unformatted) file. Binary files are usually more compact than - text files, but they are not generally transportable among different - computer operating systems or different Fortran compilers. - (default is None) - cboufm : string - is a character value that specifies the format for saving ibound. - The format must contain 20 characters or less and must be a valid - Fortran format that is enclosed in parentheses. The format must be - enclosed in apostrophes if it contains one or more blanks or commas. - The optional word LABEL after the format is used to indicate that - each layer of output should be preceded with a line that defines the - output (simulation time, the layer being output, and so forth). If - there is no record specifying CBOUFM, then ibounds are written to a - binary (unformatted) file. Binary files are usually more compact than - text files, but they are not generally transportable among different - computer operating systems or different Fortran compilers. - (default is None) - stress_period_data : dictionary of lists - Dictionary key is a tuple with the zero-based period and step - (IPEROC, ITSOC) for each print/save option list. If stress_period_data - is None, then heads are saved for the last time step of each stress - period. (default is None) - - The list can have any valid MODFLOW OC print/save option: - PRINT HEAD - PRINT DRAWDOWN - PRINT BUDGET - SAVE HEAD - SAVE DRAWDOWN - SAVE BUDGET - SAVE IBOUND - - The lists can also include (1) DDREFERENCE in the list to reset - drawdown reference to the period and step and (2) a list of layers - for PRINT HEAD, SAVE HEAD, PRINT DRAWDOWN, SAVE DRAWDOWN, and - SAVE IBOUND. - - stress_period_data = {(0,1):['save head']}) would save the head for - the second timestep in the first stress period. - - compact : boolean - Save results in compact budget form. (default is True). - extension : list of strings - (default is ['oc', 'hds', 'ddn', 'cbc', 'ibo']). - unitnumber : list of ints - (default is [14, 51, 52, 53, 0]). - filenames : str or list of str - Filenames to use for the package and the head, drawdown, budget (not - used), and ibound output files. If filenames=None the package name - will be created using the model name and package extension and the - output file names will be created using the model name and extensions. - If a single string is passed the package will be set to the string and - output names will be created using the model name and head, drawdown, - budget, and ibound extensions. To define the names for all package - files (input and output) the length of the list of strings should be 5. - Default is None. - - Attributes - ---------- - - Methods - ------- - - See Also - -------- - - Notes - ----- - The "words" method for specifying output control is the only option - available. Also, the "compact" budget should normally be used as it - produces files that are typically much smaller. The compact budget form is - also a requirement for using the MODPATH particle tracking program. - - Examples - -------- - - >>> import flopy - >>> m = flopy.modflow.Modflow() - >>> spd = {(0, 0): ['print head'], - ... (0, 1): [], - ... (0, 249): ['print head'], - ... (0, 250): [], - ... (0, 499): ['print head', 'save ibound'], - ... (0, 500): [], - ... (0, 749): ['print head', 'ddreference'], - ... (0, 750): [], - ... (0, 999): ['print head']} - >>> oc = flopy.modflow.ModflowOc(m, stress_period_data=spd, cboufm='(20i5)') - - """ - - def __init__(self, model, \ - ihedfm=0, iddnfm=0, chedfm=None, cddnfm=None, - cboufm=None, compact=True, - stress_period_data={(0, 0): ['save head']}, - extension=['oc', 'hds', 'ddn', 'cbc', 'ibo'], - unitnumber=None, filenames=None, label='LABEL', **kwargs): - - """ - Package constructor. - - """ - if unitnumber is None: - unitnumber = ModflowOc.defaultunit() - elif isinstance(unitnumber, list): - if len(unitnumber) < 5: - for idx in range(len(unitnumber), 6): - unitnumber.append(0) - self.label = label - # set filenames - if filenames is None: - filenames = [None, None, None, None, None] - elif isinstance(filenames, str): - filenames = [filenames, None, None, None, None] - elif isinstance(filenames, list): - if len(filenames) < 5: - for idx in range(len(filenames), 5): - filenames.append(None) - - # support structured and unstructured dis - dis = model.get_package('DIS') - if dis is None: - dis = model.get_package('DISU') - - if stress_period_data is None: - stress_period_data = { - (kper, dis.nstp.array[kper] - 1): ['save head'] for - kper in range(dis.nper)} - - # process kwargs - if 'save_every' in kwargs: - save_every = int(kwargs.pop('save_every')) - else: - save_every = None - if save_every is not None: - if 'save_types' in kwargs: - save_types = kwargs.pop('save_types') - if isinstance(save_types, str): - save_types = [save_types] - else: - save_types = ['save head', 'print budget'] - if 'save_start' in kwargs: - save_start = int(kwargs.pop('save_start')) - else: - save_start = 1 - stress_period_data = {} - for kper in range(dis.nper): - icnt = save_start - for kstp in range(dis.nstp[kper]): - if icnt == save_every: - stress_period_data[(kper, kstp)] = save_types - icnt = 0 - else: - stress_period_data[(kper, kstp)] = [] - icnt += 1 - - # set output unit numbers based on oc settings - self.savehead, self.saveddn, self.savebud, self.saveibnd = False, \ - False, \ - False, \ - False - for key, value in stress_period_data.items(): - tlist = list(value) - for t in tlist: - if 'save head' in t.lower(): - self.savehead = True - if unitnumber[1] == 0: - unitnumber[1] = 51 - if 'save drawdown' in t.lower(): - self.saveddn = True - if unitnumber[2] == 0: - unitnumber[2] = 52 - if 'save budget' in t.lower(): - self.savebud = True - if unitnumber[3] == 0 and filenames is None: - unitnumber[3] = 53 - if 'save ibound' in t.lower(): - self.saveibnd = True - if unitnumber[4] == 0: - unitnumber[4] = 54 - - # do not create head, ddn, or cbc output files if output is not - # specified in the oc stress_period_data - if not self.savehead: - unitnumber[1] = 0 - if not self.saveddn: - unitnumber[2] = 0 - if not self.savebud: - unitnumber[3] = 0 - if not self.saveibnd: - unitnumber[4] = 0 - - self.iuhead = unitnumber[1] - self.iuddn = unitnumber[2] - self.iubud = unitnumber[3] - self.iuibnd = unitnumber[4] - - # add output files - # head file - if self.savehead: - iu = unitnumber[1] - binflag = True - if chedfm is not None: - binflag = False - fname = filenames[1] - model.add_output_file(iu, fname=fname, extension=extension[1], - binflag=binflag) - # drawdown file - if self.saveddn: - iu = unitnumber[2] - binflag = True - if cddnfm is not None: - binflag = False - fname = filenames[2] - model.add_output_file(iu, fname=fname, extension=extension[2], - binflag=binflag) - # budget file - # Nothing is needed for the budget file - - # ibound file - ibouun = unitnumber[4] - if self.saveibnd: - iu = unitnumber[4] - binflag = True - if cboufm is not None: - binflag = False - fname = filenames[4] - model.add_output_file(iu, fname=fname, extension=extension[4], - binflag=binflag) - - name = [ModflowOc.ftype()] - extra = [''] - extension = [extension[0]] - unitnumber = unitnumber[0] - - # set package name - fname = [filenames[0]] - - # Call ancestor's init to set self.parent, extension, name and unit number - Package.__init__(self, model, extension=extension, name=name, - unit_number=unitnumber, - extra=extra, filenames=fname) - - self.heading = '# {} package for '.format(self.name[0]) + \ - ' {}, '.format(model.version_types[model.version]) + \ - 'generated by Flopy.' - - self.url = 'oc.htm' - self.ihedfm = ihedfm - self.iddnfm = iddnfm - self.chedfm = chedfm - self.cddnfm = cddnfm - - self.ibouun = ibouun - self.cboufm = cboufm - - self.compact = compact - - self.stress_period_data = stress_period_data - - self.parent.add_package(self) - - def check(self, f=None, verbose=True, level=1, checktype=None): - """ - Check package data for common errors. - - Parameters - ---------- - f : str or file handle - String defining file name or file handle for summary file - of check method output. If a string is passed a file handle - is created. If f is None, check method does not write - results to a summary file. (default is None) - verbose : bool - Boolean flag used to determine if check method results are - written to the screen. - level : int - Check method analysis level. If level=0, summary checks are - performed. If level=1, full checks are performed. - - Returns - ------- - None - - Examples - -------- - - >>> import flopy - >>> m = flopy.modflow.Modflow.load('model.nam') - >>> m.oc.check() - - """ - chk = self._get_check(f, verbose, level, checktype) - dis = self.parent.get_package('DIS') - if dis is None: - dis = self.parent.get_package('DISU') - if dis is None: - chk._add_to_summary('Error', package='OC', - desc='DIS package not available') - else: - # generate possible actions expected - expected_actions = [] - for first in ['PRINT', 'SAVE']: - for second in ['HEAD', 'DRAWDOWN', 'BUDGET', 'IBOUND']: - expected_actions.append([first, second]) - # remove exception - del expected_actions[expected_actions.index(['PRINT', 'IBOUND'])] - keys = list(self.stress_period_data.keys()) - for kper in range(dis.nper): - for kstp in range(dis.nstp[kper]): - kperkstp = (kper, kstp) - if kperkstp in keys: - del keys[keys.index(kperkstp)] - data = self.stress_period_data[kperkstp] - if not isinstance(data, list): - data = [data] - for action in data: - words = action.upper().split() - if len(words) < 2: - chk._add_to_summary( - 'Warning', package='OC', # value=kperkstp, - desc='action {!r} ignored; too few words' - .format(action)) - elif words[0:2] not in expected_actions: - chk._add_to_summary( - 'Warning', package='OC', # value=kperkstp, - desc='action {!r} ignored'.format(action)) - # TODO: check data list of layers for some actions - for kperkstp in keys: - # repeat as many times as remaining keys not used - chk._add_to_summary( - 'Warning', package='OC', # value=kperkstp, - desc='action(s) defined in OC stress_period_data ignored ' - 'as they are not part the stress periods defined by DIS') - chk.summarize() - return chk - - def write_file(self): - """ - Write the package file. - - Returns - ------- - None - - """ - f_oc = open(self.fn_path, 'w') - f_oc.write('{}\n'.format(self.heading)) - - # write options - line = 'HEAD PRINT FORMAT {0:3.0f}\n'.format(self.ihedfm) - f_oc.write(line) - if self.chedfm is not None: - line = 'HEAD SAVE FORMAT {0:20s} {1}\n'.format(self.chedfm, - self.label) - f_oc.write(line) - if self.savehead: - line = 'HEAD SAVE UNIT {0:5.0f}\n'.format(self.iuhead) - f_oc.write(line) - - f_oc.write('DRAWDOWN PRINT FORMAT {0:3.0f}\n'.format(self.iddnfm)) - if self.cddnfm is not None: - line = 'DRAWDOWN SAVE FORMAT {0:20s} {1}\n'.format(self.cddnfm, - self.label) - f_oc.write(line) - if self.saveddn: - line = 'DRAWDOWN SAVE UNIT {0:5.0f}\n'.format(self.iuddn) - f_oc.write(line) - - if self.saveibnd: - if self.cboufm is not None: - line = 'IBOUND SAVE FORMAT {0:20s} {1}\n'.format(self.cboufm, - self.label) - f_oc.write(line) - line = 'IBOUND SAVE UNIT {0:5.0f}\n'.format(self.iuibnd) - f_oc.write(line) - - if self.compact: - f_oc.write('COMPACT BUDGET AUX\n') - - # add a line separator between header and stress - # period data - f_oc.write('\n') - - # write the transient sequence described by the data dict - nr, nc, nl, nper = self.parent.get_nrow_ncol_nlay_nper() - dis = self.parent.get_package('DIS') - if dis is None: - dis = self.parent.get_package('DISU') - nstp = dis.nstp - - keys = list(self.stress_period_data.keys()) - keys.sort() - - data = [] - ddnref = '' - lines = '' - for kper in range(nper): - for kstp in range(nstp[kper]): - kperkstp = (kper, kstp) - if kperkstp in keys: - data = self.stress_period_data[kperkstp] - if not isinstance(data, list): - data = [data] - lines = '' - if len(data) > 0: - for item in data: - if 'DDREFERENCE' in item.upper(): - ddnref = item.lower() - else: - lines += ' {}\n'.format(item) - if len(lines) > 0: - f_oc.write( - 'period {} step {} {}\n'.format(kper + 1, kstp + 1, - ddnref)) - f_oc.write(lines) - f_oc.write('\n') - ddnref = '' - lines = '' - - # close oc file - f_oc.close() - - def _set_singlebudgetunit(self, budgetunit): - if budgetunit is None: - budgetunit = self.parent.next_ext_unit() - self.iubud = budgetunit - - def _set_budgetunit(self): - iubud = [] - for i, pp in enumerate(self.parent.packagelist): - if hasattr(pp, 'ipakcb'): - if pp.ipakcb > 0: - iubud.append(pp.ipakcb) - if len(iubud) < 1: - iubud = None - elif len(iubud) == 1: - iubud = iubud[0] - self.iubud = iubud - - def get_budgetunit(self): - """ - Get the budget file unit number(s). - - Parameters - ---------- - None - - Returns - ------- - iubud : integer ot list of integers - Unit number or list of cell-by-cell budget output unit numbers. - None is returned if ipakcb is less than one for all packages. - - Examples - -------- - - >>> import flopy - >>> m = flopy.modflow.Modflow() - >>> dis = flopy.modflow.ModflowDis(m) - >>> bas = flopy.modflow.ModflowBas(m) - >>> lpf = flopy.modflow.ModflowLpf(m, ipakcb=100) - >>> wel_data = {0: [[0, 0, 0, -1000.]]} - >>> wel = flopy.modflow.ModflowWel(m, ipakcb=101, - ... stress_period_data=wel_data) - >>> spd = {(0, 0): ['save head', 'save budget']} - >>> oc = flopy.modflow.ModflowOc(m, stress_period_data=spd) - >>> oc.get_budgetunit() - [100, 101] - - """ - # set iubud by iterating through the packages - self._set_budgetunit() - return self.iubud - - def reset_budgetunit(self, budgetunit=None, fname=None): - """ - Reset the cell-by-cell budget unit (ipakcb) for every package that - can write cell-by-cell data when SAVE BUDGET is specified in the - OC file to the specified budgetunit. - - Parameters - ---------- - budgetunit : int, optional - Unit number for cell-by-cell output data. If budgetunit is None - then the next available external unit number is assigned. Default - is None - fname : string, optional - Filename to use for cell-by-cell output file. If fname=None the - cell-by-cell output file will be created using the model name and - a '.cbc' file extension. Default is None. - - Returns - ------- - None - - Examples - -------- - - >>> import flopy - >>> m = flopy.modflow.Modflow() - >>> dis = flopy.modflow.ModflowDis(m) - >>> bas = flopy.modflow.ModflowBas(m) - >>> lpf = flopy.modflow.ModflowLpf(m, ipakcb=100) - >>> wel_data = {0: [[0, 0, 0, -1000.]]} - >>> wel = flopy.modflow.ModflowWel(m, ipakcb=101, - ... stress_period_data=wel_data) - >>> spd = {(0, 0): ['save head', 'save budget']} - >>> oc = flopy.modflow.ModflowOc(m, stress_period_data=spd) - >>> oc.reset_budgetunit(budgetunit=1053, fname='test.cbc') - - """ - - # remove existing output file - for pp in self.parent.packagelist: - if hasattr(pp, 'ipakcb'): - if pp.ipakcb > 0: - self.parent.remove_output(unit=pp.ipakcb) - pp.ipakcb = 0 - - # set the unit number used for all cell-by-cell output - self._set_singlebudgetunit(budgetunit) - - # add output file - for pp in self.parent.packagelist: - if hasattr(pp, 'ipakcb'): - pp.ipakcb = self.iubud - self.parent.add_output_file(pp.ipakcb, fname=fname, - package=pp.name) - - return - - @staticmethod - def get_ocoutput_units(f, ext_unit_dict=None): - """ - Get head and drawdown units from a OC file. - - Parameters - ---------- - f : filename or file handle - File to load. - ext_unit_dict : dictionary, optional - If the arrays in the file are specified using EXTERNAL, - or older style array control records, then `f` should be a file - handle. In this case ext_unit_dict is required, which can be - constructed using the function - :class:`flopy.utils.mfreadnam.parsenamefile`. - - Returns - ------- - ihedun : integer - Unit number of the head file. - fhead : str - File name of the head file. Is only defined if ext_unit_dict is - passed and the unit number is a valid key. - , headfilename, oc : ModflowOc object - ModflowOc object. - iddnun : integer - Unit number of the drawdown file. - fddn : str - File name of the drawdown file. Is only defined if ext_unit_dict is - passed and the unit number is a valid key. - - Examples - -------- - - >>> import flopy - >>> ihds, hf, iddn, df = flopy.modflow.ModflowOc.get_ocoutput_units('test.oc') - - """ - - # initialize - ihedun = 0 - iddnun = 0 - fhead = None - fddn = None - - numericformat = False - - openfile = not hasattr(f, 'read') - if openfile: - filename = f - f = open(filename, 'r') - - # read header - ipos = f.tell() - while True: - line = f.readline() - if line[0] == '#': - continue - elif line[0] == []: - continue - else: - lnlst = line.strip().split() - try: - ihedun, iddnun = int(lnlst[2]), int(lnlst[3]) - numericformat = True - except: - f.seek(ipos) - # exit so the remaining data can be read - # from the file based on numericformat - break - # read word formats - if not numericformat: - while True: - line = f.readline() - if len(line) < 1: - break - lnlst = line.strip().split() - if line[0] == '#': - continue - - # skip blank line in the OC file - if len(lnlst) < 1: - continue - - # dataset 1 values - elif ('HEAD' in lnlst[0].upper() and - 'SAVE' in lnlst[1].upper() and - 'UNIT' in lnlst[2].upper() - ): - ihedun = int(lnlst[3]) - elif ('DRAWDOWN' in lnlst[0].upper() and - 'SAVE' in lnlst[1].upper() and - 'UNIT' in lnlst[2].upper() - ): - iddnun = int(lnlst[3]) - # dataset 2 - elif 'PERIOD' in lnlst[0].upper(): - break - # - if ext_unit_dict is not None: - if ihedun in ext_unit_dict: - fhead = ext_unit_dict[ihedun] - if iddnun in ext_unit_dict: - fddn = ext_unit_dict[iddnun] - - if openfile: - f.close() - - # return - return ihedun, fhead, iddnun, fddn - - @staticmethod - def load(f, model, nper=None, nstp=None, nlay=None, ext_unit_dict=None): - """ - Load an existing package. - - Parameters - ---------- - f : filename or file handle - File to load. - model : model object - The model object (of type :class:`flopy.modflow.mf.Modflow`) to - which this package will be added. - nper : int - The number of stress periods. If nper is None, then nper will be - obtained from the model object. (default is None). - nstp : int or list of ints - Integer of list of integers containing the number of time steps - in each stress period. If nstp is None, then nstp will be obtained - from the DIS or DISU packages attached to the model object. The - length of nstp must be equal to nper. (default is None). - nlay : int - The number of model layers. If nlay is None, then nnlay will be - obtained from the model object. nlay only needs to be specified - if an empty model object is passed in and the oc file being loaded - is defined using numeric codes. (default is None). - ext_unit_dict : dictionary, optional - If the arrays in the file are specified using EXTERNAL, - or older style array control records, then `f` should be a file - handle. In this case ext_unit_dict is required, which can be - constructed using the function - :class:`flopy.utils.mfreadnam.parsenamefile`. - - Returns - ------- - oc : ModflowOc object - ModflowOc object. - - Examples - -------- - - >>> import flopy - >>> m = flopy.modflow.Modflow() - >>> oc = flopy.modflow.ModflowOc.load('test.oc', m) - - """ - - if model.verbose: - sys.stdout.write('loading oc package file...\n') - - # set nper - if nper is None or nlay is None: - nrow, ncol, nlay, nper = model.get_nrow_ncol_nlay_nper() - - if nper == 0 or nlay == 0: - msg = 'discretization package not defined for the model, ' + \ - 'nper and nlay must be provided to the .load() method' - raise ValueError(msg) - - - # set nstp - if nstp is None: - dis = model.get_package('DIS') - if dis is None: - dis = model.get_package('DISU') - if dis is None: - msg = 'discretization package not defined for the model, ' + \ - 'a nstp list must be provided to the .load() method' - raise ValueError(msg) - nstp = list(dis.nstp.array) - else: - if isinstance(nstp, (int, float)): - nstp = [int(nstp)] - - # validate the size of nstp - if len(nstp) != nper: - msg = 'nstp must be a list with {} entries, '.format(nper) + \ - 'provided nstp list has {} entries.'.format(len(nstp)) - raise IOError(msg) - - # initialize - ihedfm = 0 - iddnfm = 0 - ihedun = 0 - iddnun = 0 - ibouun = 0 - compact = False - chedfm = None - cddnfm = None - cboufm = None - - numericformat = False - - stress_period_data = {} - - openfile = not hasattr(f, 'read') - if openfile: - filename = f - f = open(filename, 'r') - else: - filename = os.path.basename(f.name) - - # read header - ipos = f.tell() - while True: - line = f.readline() - if line[0] == '#': - continue - elif line[0] == []: - continue - else: - lnlst = line.strip().split() - try: - ihedfm, iddnfm = int(lnlst[0]), int(lnlst[1]) - ihedun, iddnun = int(lnlst[2]), int(lnlst[3]) - numericformat = True - except: - f.seek(ipos) - # exit so the remaining data can be read - # from the file based on numericformat - break - # set pointer to current position in the OC file - ipos = f.tell() - - # process each line - lines = [] - if numericformat == True: - for iperoc in range(nper): - for itsoc in range(nstp[iperoc]): - line = f.readline() - lnlst = line.strip().split() - incode, ihddfl = int(lnlst[0]), int(lnlst[1]) - ibudfl, icbcfl = int(lnlst[2]), int(lnlst[3]) - # new print and save flags are needed if incode is not - # less than 0. - if incode >= 0: - lines = [] - # use print options from the last time step - else: - if len(lines) > 0: - stress_period_data[(iperoc, itsoc)] = list(lines) - continue - # set print and save budget flags - if ibudfl != 0: - lines.append('PRINT BUDGET') - if icbcfl != 0: - lines.append('SAVE BUDGET') - if incode == 0: - line = f.readline() - lnlst = line.strip().split() - hdpr, ddpr = int(lnlst[0]), int(lnlst[1]) - hdsv, ddsv = int(lnlst[2]), int(lnlst[3]) - if hdpr != 0: - lines.append('PRINT HEAD') - if ddpr != 0: - lines.append('PRINT DRAWDOWN') - if hdsv != 0: - lines.append('SAVE HEAD') - if ddsv != 0: - lines.append('SAVE DRAWDOWN') - elif incode > 0: - headprint = '' - headsave = '' - ddnprint = '' - ddnsave = '' - for k in range(nlay): - line = f.readline() - lnlst = line.strip().split() - hdpr, ddpr = int(lnlst[0]), int(lnlst[1]) - hdsv, ddsv = int(lnlst[2]), int(lnlst[3]) - if hdpr != 0: - headprint += ' {}'.format(k + 1) - if ddpr != 0: - ddnprint += ' {}'.format(k + 1) - if hdsv != 0: - headsave += ' {}'.format(k + 1) - if ddsv != 0: - ddnsave += ' {}'.format(k + 1) - if len(headprint) > 0: - lines.append('PRINT HEAD' + headprint) - if len(ddnprint) > 0: - lines.append('PRINT DRAWDOWN' + ddnprint) - if len(headsave) > 0: - lines.append('SAVE HEAD' + headsave) - if len(ddnsave) > 0: - lines.append('SAVE DRAWDOWN' + ddnsave) - stress_period_data[(iperoc, itsoc)] = list(lines) - else: - iperoc, itsoc = 0, 0 - while True: - line = f.readline() - if len(line) < 1: - break - lnlst = line.strip().split() - if line[0] == '#': - continue - - # added by JJS 12/12/14 to avoid error when there is a blank line in the OC file - if lnlst == []: - continue - # end add - - # dataset 1 values - elif ('HEAD' in lnlst[0].upper() and - 'PRINT' in lnlst[1].upper() and - 'FORMAT' in lnlst[2].upper() - ): - ihedfm = int(lnlst[3]) - elif ('HEAD' in lnlst[0].upper() and - 'SAVE' in lnlst[1].upper() and - 'FORMAT' in lnlst[2].upper() - ): - chedfm = lnlst[3] - elif ('HEAD' in lnlst[0].upper() and - 'SAVE' in lnlst[1].upper() and - 'UNIT' in lnlst[2].upper() - ): - ihedun = int(lnlst[3]) - elif ('DRAWDOWN' in lnlst[0].upper() and - 'PRINT' in lnlst[1].upper() and - 'FORMAT' in lnlst[2].upper() - ): - iddnfm = int(lnlst[3]) - elif ('DRAWDOWN' in lnlst[0].upper() and - 'SAVE' in lnlst[1].upper() and - 'FORMAT' in lnlst[2].upper() - ): - cddnfm = lnlst[3] - elif ('DRAWDOWN' in lnlst[0].upper() and - 'SAVE' in lnlst[1].upper() and - 'UNIT' in lnlst[2].upper() - ): - iddnun = int(lnlst[3]) - elif ('IBOUND' in lnlst[0].upper() and - 'SAVE' in lnlst[1].upper() and - 'FORMAT' in lnlst[2].upper() - ): - cboufm = lnlst[3] - elif ('IBOUND' in lnlst[0].upper() and - 'SAVE' in lnlst[1].upper() and - 'UNIT' in lnlst[2].upper() - ): - ibouun = int(lnlst[3]) - elif 'COMPACT' in lnlst[0].upper(): - compact = True - - # dataset 2 - elif 'PERIOD' in lnlst[0].upper(): - if len(lines) > 0: - if iperoc > 0: - # create period step tuple - kperkstp = (iperoc - 1, itsoc - 1) - # save data - stress_period_data[kperkstp] = lines - # reset lines - lines = [] - # turn off oc if required - if iperoc > 0: - if itsoc == nstp[iperoc - 1]: - iperoc1 = iperoc + 1 - itsoc1 = 1 - else: - iperoc1 = iperoc - itsoc1 = itsoc + 1 - else: - iperoc1, itsoc1 = iperoc, itsoc - # update iperoc and itsoc - iperoc = int(lnlst[1]) - itsoc = int(lnlst[3]) - # do not used data that exceeds nper - if iperoc > nper: - break - # add a empty list if necessary - iempty = False - if iperoc != iperoc1: - iempty = True - else: - if itsoc != itsoc1: - iempty = True - if iempty == True: - kperkstp = (iperoc1 - 1, itsoc1 - 1) - stress_period_data[kperkstp] = [] - # dataset 3 - elif 'PRINT' in lnlst[0].upper(): - lines.append( - '{} {}'.format(lnlst[0].lower(), lnlst[1].lower())) - elif 'SAVE' in lnlst[0].upper(): - lines.append( - '{} {}'.format(lnlst[0].lower(), lnlst[1].lower())) - else: - print('Error encountered in OC import.') - print('Creating default OC package.') - return ModflowOc(model) - - # store the last record in word - if len(lines) > 0: - # create period step tuple - kperkstp = (iperoc - 1, itsoc - 1) - # save data - stress_period_data[kperkstp] = lines - # add a empty list if necessary - iempty = False - if iperoc != iperoc1: - iempty = True - else: - if itsoc != itsoc1: - iempty = True - if iempty == True: - kperkstp = (iperoc1 - 1, itsoc1 - 1) - stress_period_data[kperkstp] = [] - - if openfile: - f.close() - - # reset unit numbers - unitnumber = [14, 0, 0, 0, 0] - if ext_unit_dict is not None: - for key, value in ext_unit_dict.items(): - if value.filetype == ModflowOc.ftype(): - unitnumber[0] = key - fname = os.path.basename(value.filename) - else: - fname = os.path.basename(filename) - - # initialize filenames list - filenames = [fname, None, None, None, None] - - # fill remainder of filenames list - if ihedun > 0: - unitnumber[1] = ihedun - try: - filenames[1] = os.path.basename(ext_unit_dict[ihedun].filename) - except: - if model.verbose: - print('head file name will be generated by flopy') - if iddnun > 0: - unitnumber[2] = iddnun - try: - filenames[2] = os.path.basename(ext_unit_dict[iddnun].filename) - except: - if model.verbose: - print('drawdown file name will be generated by flopy') - if ibouun > 0: - unitnumber[4] = ibouun - try: - filenames[4] = os.path.basename(ext_unit_dict[ibouun].filename) - except: - if model.verbose: - print('ibound file name will be generated by flopy') - if cboufm is None: - cboufm = True - - # add unit numbers to pop_key_list - for u in unitnumber: - model.add_pop_key_list(u) - - # create instance of oc class - oc = ModflowOc(model, ihedfm=ihedfm, iddnfm=iddnfm, - chedfm=chedfm, cddnfm=cddnfm, cboufm=cboufm, - compact=compact, - stress_period_data=stress_period_data, - unitnumber=unitnumber, filenames=filenames) - - return oc - - @staticmethod - def ftype(): - return 'OC' - - @staticmethod - def defaultunit(): - return [14, 0, 0, 0, 0] +""" +mfoc module. Contains the ModflowOc class. Note that the user can access +the ModflowOc class as `flopy.modflow.ModflowOc`. + +Additional information for this MODFLOW package can be found at the `Online +MODFLOW Guide +`_. + +""" +import os +import sys + +from ..pakbase import Package + + +class ModflowOc(Package): + """ + MODFLOW Output Control Package Class. + + Parameters + ---------- + model : model object + The model object (of type :class:`flopy.modflow.mf.Modflow`) to which + this package will be added. + ihedfm : int + is a code for the format in which heads will be printed. + (default is 0). + iddnfm : int + is a code for the format in which drawdown will be printed. + (default is 0). + chedfm : string + is a character value that specifies the format for saving heads. + The format must contain 20 characters or less and must be a valid + Fortran format that is enclosed in parentheses. The format must be + enclosed in apostrophes if it contains one or more blanks or commas. + The optional word LABEL after the format is used to indicate that + each layer of output should be preceded with a line that defines the + output (simulation time, the layer being output, and so forth). If + there is no record specifying CHEDFM, then heads are written to a + binary (unformatted) file. Binary files are usually more compact than + text files, but they are not generally transportable among different + computer operating systems or different Fortran compilers. + (default is None) + cddnfm : string + is a character value that specifies the format for saving drawdown. + The format must contain 20 characters or less and must be a valid + Fortran format that is enclosed in parentheses. The format must be + enclosed in apostrophes if it contains one or more blanks or commas. + The optional word LABEL after the format is used to indicate that + each layer of output should be preceded with a line that defines the + output (simulation time, the layer being output, and so forth). If + there is no record specifying CDDNFM, then drawdowns are written to a + binary (unformatted) file. Binary files are usually more compact than + text files, but they are not generally transportable among different + computer operating systems or different Fortran compilers. + (default is None) + cboufm : string + is a character value that specifies the format for saving ibound. + The format must contain 20 characters or less and must be a valid + Fortran format that is enclosed in parentheses. The format must be + enclosed in apostrophes if it contains one or more blanks or commas. + The optional word LABEL after the format is used to indicate that + each layer of output should be preceded with a line that defines the + output (simulation time, the layer being output, and so forth). If + there is no record specifying CBOUFM, then ibounds are written to a + binary (unformatted) file. Binary files are usually more compact than + text files, but they are not generally transportable among different + computer operating systems or different Fortran compilers. + (default is None) + stress_period_data : dictionary of lists + Dictionary key is a tuple with the zero-based period and step + (IPEROC, ITSOC) for each print/save option list. If stress_period_data + is None, then heads are saved for the last time step of each stress + period. (default is None) + + The list can have any valid MODFLOW OC print/save option: + PRINT HEAD + PRINT DRAWDOWN + PRINT BUDGET + SAVE HEAD + SAVE DRAWDOWN + SAVE BUDGET + SAVE IBOUND + + The lists can also include (1) DDREFERENCE in the list to reset + drawdown reference to the period and step and (2) a list of layers + for PRINT HEAD, SAVE HEAD, PRINT DRAWDOWN, SAVE DRAWDOWN, and + SAVE IBOUND. + + stress_period_data = {(0,1):['save head']}) would save the head for + the second timestep in the first stress period. + + compact : boolean + Save results in compact budget form. (default is True). + extension : list of strings + (default is ['oc', 'hds', 'ddn', 'cbc', 'ibo']). + unitnumber : list of ints + (default is [14, 51, 52, 53, 0]). + filenames : str or list of str + Filenames to use for the package and the head, drawdown, budget (not + used), and ibound output files. If filenames=None the package name + will be created using the model name and package extension and the + output file names will be created using the model name and extensions. + If a single string is passed the package will be set to the string and + output names will be created using the model name and head, drawdown, + budget, and ibound extensions. To define the names for all package + files (input and output) the length of the list of strings should be 5. + Default is None. + + Attributes + ---------- + + Methods + ------- + + See Also + -------- + + Notes + ----- + The "words" method for specifying output control is the only option + available. Also, the "compact" budget should normally be used as it + produces files that are typically much smaller. The compact budget form is + also a requirement for using the MODPATH particle tracking program. + + Examples + -------- + + >>> import flopy + >>> m = flopy.modflow.Modflow() + >>> spd = {(0, 0): ['print head'], + ... (0, 1): [], + ... (0, 249): ['print head'], + ... (0, 250): [], + ... (0, 499): ['print head', 'save ibound'], + ... (0, 500): [], + ... (0, 749): ['print head', 'ddreference'], + ... (0, 750): [], + ... (0, 999): ['print head']} + >>> oc = flopy.modflow.ModflowOc(m, stress_period_data=spd, cboufm='(20i5)') + + """ + + def __init__(self, model, \ + ihedfm=0, iddnfm=0, chedfm=None, cddnfm=None, + cboufm=None, compact=True, + stress_period_data={(0, 0): ['save head']}, + extension=['oc', 'hds', 'ddn', 'cbc', 'ibo'], + unitnumber=None, filenames=None, label='LABEL', **kwargs): + + """ + Package constructor. + + """ + if unitnumber is None: + unitnumber = ModflowOc.defaultunit() + elif isinstance(unitnumber, list): + if len(unitnumber) < 5: + for idx in range(len(unitnumber), 6): + unitnumber.append(0) + self.label = label + # set filenames + if filenames is None: + filenames = [None, None, None, None, None] + elif isinstance(filenames, str): + filenames = [filenames, None, None, None, None] + elif isinstance(filenames, list): + if len(filenames) < 5: + for idx in range(len(filenames), 5): + filenames.append(None) + + # support structured and unstructured dis + dis = model.get_package('DIS') + if dis is None: + dis = model.get_package('DISU') + + if stress_period_data is None: + stress_period_data = { + (kper, dis.nstp.array[kper] - 1): ['save head'] for + kper in range(dis.nper)} + + # process kwargs + if 'save_every' in kwargs: + save_every = int(kwargs.pop('save_every')) + else: + save_every = None + if save_every is not None: + if 'save_types' in kwargs: + save_types = kwargs.pop('save_types') + if isinstance(save_types, str): + save_types = [save_types] + else: + save_types = ['save head', 'print budget'] + if 'save_start' in kwargs: + save_start = int(kwargs.pop('save_start')) + else: + save_start = 1 + stress_period_data = {} + for kper in range(dis.nper): + icnt = save_start + for kstp in range(dis.nstp[kper]): + if icnt == save_every: + stress_period_data[(kper, kstp)] = save_types + icnt = 0 + else: + stress_period_data[(kper, kstp)] = [] + icnt += 1 + + # set output unit numbers based on oc settings + self.savehead, self.saveddn, self.savebud, self.saveibnd = False, \ + False, \ + False, \ + False + for key, value in stress_period_data.items(): + tlist = list(value) + for t in tlist: + if 'save head' in t.lower(): + self.savehead = True + if unitnumber[1] == 0: + unitnumber[1] = 51 + if 'save drawdown' in t.lower(): + self.saveddn = True + if unitnumber[2] == 0: + unitnumber[2] = 52 + if 'save budget' in t.lower(): + self.savebud = True + if unitnumber[3] == 0 and filenames is None: + unitnumber[3] = 53 + if 'save ibound' in t.lower(): + self.saveibnd = True + if unitnumber[4] == 0: + unitnumber[4] = 54 + + # do not create head, ddn, or cbc output files if output is not + # specified in the oc stress_period_data + if not self.savehead: + unitnumber[1] = 0 + if not self.saveddn: + unitnumber[2] = 0 + if not self.savebud: + unitnumber[3] = 0 + if not self.saveibnd: + unitnumber[4] = 0 + + self.iuhead = unitnumber[1] + self.iuddn = unitnumber[2] + self.iubud = unitnumber[3] + self.iuibnd = unitnumber[4] + + # add output files + # head file + if self.savehead: + iu = unitnumber[1] + binflag = True + if chedfm is not None: + binflag = False + fname = filenames[1] + model.add_output_file(iu, fname=fname, extension=extension[1], + binflag=binflag) + # drawdown file + if self.saveddn: + iu = unitnumber[2] + binflag = True + if cddnfm is not None: + binflag = False + fname = filenames[2] + model.add_output_file(iu, fname=fname, extension=extension[2], + binflag=binflag) + # budget file + # Nothing is needed for the budget file + + # ibound file + ibouun = unitnumber[4] + if self.saveibnd: + iu = unitnumber[4] + binflag = True + if cboufm is not None: + binflag = False + fname = filenames[4] + model.add_output_file(iu, fname=fname, extension=extension[4], + binflag=binflag) + + name = [ModflowOc.ftype()] + extra = [''] + extension = [extension[0]] + unitnumber = unitnumber[0] + + # set package name + fname = [filenames[0]] + + # Call ancestor's init to set self.parent, extension, name and unit number + Package.__init__(self, model, extension=extension, name=name, + unit_number=unitnumber, + extra=extra, filenames=fname) + + self.heading = '# {} package for '.format(self.name[0]) + \ + ' {}, '.format(model.version_types[model.version]) + \ + 'generated by Flopy.' + + self.url = 'oc.htm' + self.ihedfm = ihedfm + self.iddnfm = iddnfm + self.chedfm = chedfm + self.cddnfm = cddnfm + + self.ibouun = ibouun + self.cboufm = cboufm + + self.compact = compact + + self.stress_period_data = stress_period_data + + self.parent.add_package(self) + + def check(self, f=None, verbose=True, level=1, checktype=None): + """ + Check package data for common errors. + + Parameters + ---------- + f : str or file handle + String defining file name or file handle for summary file + of check method output. If a string is passed a file handle + is created. If f is None, check method does not write + results to a summary file. (default is None) + verbose : bool + Boolean flag used to determine if check method results are + written to the screen. + level : int + Check method analysis level. If level=0, summary checks are + performed. If level=1, full checks are performed. + + Returns + ------- + None + + Examples + -------- + + >>> import flopy + >>> m = flopy.modflow.Modflow.load('model.nam') + >>> m.oc.check() + + """ + chk = self._get_check(f, verbose, level, checktype) + dis = self.parent.get_package('DIS') + if dis is None: + dis = self.parent.get_package('DISU') + if dis is None: + chk._add_to_summary('Error', package='OC', + desc='DIS package not available') + else: + # generate possible actions expected + expected_actions = [] + for first in ['PRINT', 'SAVE']: + for second in ['HEAD', 'DRAWDOWN', 'BUDGET', 'IBOUND']: + expected_actions.append([first, second]) + # remove exception + del expected_actions[expected_actions.index(['PRINT', 'IBOUND'])] + keys = list(self.stress_period_data.keys()) + for kper in range(dis.nper): + for kstp in range(dis.nstp[kper]): + kperkstp = (kper, kstp) + if kperkstp in keys: + del keys[keys.index(kperkstp)] + data = self.stress_period_data[kperkstp] + if not isinstance(data, list): + data = [data] + for action in data: + words = action.upper().split() + if len(words) < 2: + chk._add_to_summary( + 'Warning', package='OC', # value=kperkstp, + desc='action {!r} ignored; too few words' + .format(action)) + elif words[0:2] not in expected_actions: + chk._add_to_summary( + 'Warning', package='OC', # value=kperkstp, + desc='action {!r} ignored'.format(action)) + # TODO: check data list of layers for some actions + for kperkstp in keys: + # repeat as many times as remaining keys not used + chk._add_to_summary( + 'Warning', package='OC', # value=kperkstp, + desc='action(s) defined in OC stress_period_data ignored ' + 'as they are not part the stress periods defined by DIS') + chk.summarize() + return chk + + def write_file(self): + """ + Write the package file. + + Returns + ------- + None + + """ + f_oc = open(self.fn_path, 'w') + f_oc.write('{}\n'.format(self.heading)) + + # write options + line = 'HEAD PRINT FORMAT {0:3.0f}\n'.format(self.ihedfm) + f_oc.write(line) + if self.chedfm is not None: + line = 'HEAD SAVE FORMAT {0:20s} {1}\n'.format(self.chedfm, + self.label) + f_oc.write(line) + if self.savehead: + line = 'HEAD SAVE UNIT {0:5.0f}\n'.format(self.iuhead) + f_oc.write(line) + + f_oc.write('DRAWDOWN PRINT FORMAT {0:3.0f}\n'.format(self.iddnfm)) + if self.cddnfm is not None: + line = 'DRAWDOWN SAVE FORMAT {0:20s} {1}\n'.format(self.cddnfm, + self.label) + f_oc.write(line) + if self.saveddn: + line = 'DRAWDOWN SAVE UNIT {0:5.0f}\n'.format(self.iuddn) + f_oc.write(line) + + if self.saveibnd: + if self.cboufm is not None: + line = 'IBOUND SAVE FORMAT {0:20s} {1}\n'.format(self.cboufm, + self.label) + f_oc.write(line) + line = 'IBOUND SAVE UNIT {0:5.0f}\n'.format(self.iuibnd) + f_oc.write(line) + + if self.compact: + f_oc.write('COMPACT BUDGET AUX\n') + + # add a line separator between header and stress + # period data + f_oc.write('\n') + + # write the transient sequence described by the data dict + nr, nc, nl, nper = self.parent.get_nrow_ncol_nlay_nper() + dis = self.parent.get_package('DIS') + if dis is None: + dis = self.parent.get_package('DISU') + nstp = dis.nstp + + keys = list(self.stress_period_data.keys()) + keys.sort() + + data = [] + ddnref = '' + lines = '' + for kper in range(nper): + for kstp in range(nstp[kper]): + kperkstp = (kper, kstp) + if kperkstp in keys: + data = self.stress_period_data[kperkstp] + if not isinstance(data, list): + data = [data] + lines = '' + if len(data) > 0: + for item in data: + if 'DDREFERENCE' in item.upper(): + ddnref = item.lower() + else: + lines += ' {}\n'.format(item) + if len(lines) > 0: + f_oc.write( + 'period {} step {} {}\n'.format(kper + 1, kstp + 1, + ddnref)) + f_oc.write(lines) + f_oc.write('\n') + ddnref = '' + lines = '' + + # close oc file + f_oc.close() + + def _set_singlebudgetunit(self, budgetunit): + if budgetunit is None: + budgetunit = self.parent.next_ext_unit() + self.iubud = budgetunit + + def _set_budgetunit(self): + iubud = [] + for i, pp in enumerate(self.parent.packagelist): + if hasattr(pp, 'ipakcb'): + if pp.ipakcb > 0: + iubud.append(pp.ipakcb) + if len(iubud) < 1: + iubud = None + elif len(iubud) == 1: + iubud = iubud[0] + self.iubud = iubud + + def get_budgetunit(self): + """ + Get the budget file unit number(s). + + Parameters + ---------- + None + + Returns + ------- + iubud : integer ot list of integers + Unit number or list of cell-by-cell budget output unit numbers. + None is returned if ipakcb is less than one for all packages. + + Examples + -------- + + >>> import flopy + >>> m = flopy.modflow.Modflow() + >>> dis = flopy.modflow.ModflowDis(m) + >>> bas = flopy.modflow.ModflowBas(m) + >>> lpf = flopy.modflow.ModflowLpf(m, ipakcb=100) + >>> wel_data = {0: [[0, 0, 0, -1000.]]} + >>> wel = flopy.modflow.ModflowWel(m, ipakcb=101, + ... stress_period_data=wel_data) + >>> spd = {(0, 0): ['save head', 'save budget']} + >>> oc = flopy.modflow.ModflowOc(m, stress_period_data=spd) + >>> oc.get_budgetunit() + [100, 101] + + """ + # set iubud by iterating through the packages + self._set_budgetunit() + return self.iubud + + def reset_budgetunit(self, budgetunit=None, fname=None): + """ + Reset the cell-by-cell budget unit (ipakcb) for every package that + can write cell-by-cell data when SAVE BUDGET is specified in the + OC file to the specified budgetunit. + + Parameters + ---------- + budgetunit : int, optional + Unit number for cell-by-cell output data. If budgetunit is None + then the next available external unit number is assigned. Default + is None + fname : string, optional + Filename to use for cell-by-cell output file. If fname=None the + cell-by-cell output file will be created using the model name and + a '.cbc' file extension. Default is None. + + Returns + ------- + None + + Examples + -------- + + >>> import flopy + >>> m = flopy.modflow.Modflow() + >>> dis = flopy.modflow.ModflowDis(m) + >>> bas = flopy.modflow.ModflowBas(m) + >>> lpf = flopy.modflow.ModflowLpf(m, ipakcb=100) + >>> wel_data = {0: [[0, 0, 0, -1000.]]} + >>> wel = flopy.modflow.ModflowWel(m, ipakcb=101, + ... stress_period_data=wel_data) + >>> spd = {(0, 0): ['save head', 'save budget']} + >>> oc = flopy.modflow.ModflowOc(m, stress_period_data=spd) + >>> oc.reset_budgetunit(budgetunit=1053, fname='test.cbc') + + """ + + # remove existing output file + for pp in self.parent.packagelist: + if hasattr(pp, 'ipakcb'): + if pp.ipakcb > 0: + self.parent.remove_output(unit=pp.ipakcb) + pp.ipakcb = 0 + + # set the unit number used for all cell-by-cell output + self._set_singlebudgetunit(budgetunit) + + # add output file + for pp in self.parent.packagelist: + if hasattr(pp, 'ipakcb'): + pp.ipakcb = self.iubud + self.parent.add_output_file(pp.ipakcb, fname=fname, + package=pp.name) + + return + + @staticmethod + def get_ocoutput_units(f, ext_unit_dict=None): + """ + Get head and drawdown units from a OC file. + + Parameters + ---------- + f : filename or file handle + File to load. + ext_unit_dict : dictionary, optional + If the arrays in the file are specified using EXTERNAL, + or older style array control records, then `f` should be a file + handle. In this case ext_unit_dict is required, which can be + constructed using the function + :class:`flopy.utils.mfreadnam.parsenamefile`. + + Returns + ------- + ihedun : integer + Unit number of the head file. + fhead : str + File name of the head file. Is only defined if ext_unit_dict is + passed and the unit number is a valid key. + , headfilename, oc : ModflowOc object + ModflowOc object. + iddnun : integer + Unit number of the drawdown file. + fddn : str + File name of the drawdown file. Is only defined if ext_unit_dict is + passed and the unit number is a valid key. + + Examples + -------- + + >>> import flopy + >>> ihds, hf, iddn, df = flopy.modflow.ModflowOc.get_ocoutput_units('test.oc') + + """ + + # initialize + ihedun = 0 + iddnun = 0 + fhead = None + fddn = None + + numericformat = False + + openfile = not hasattr(f, 'read') + if openfile: + filename = f + f = open(filename, 'r') + + # read header + ipos = f.tell() + while True: + line = f.readline() + if line[0] == '#': + continue + elif line[0] == []: + continue + else: + lnlst = line.strip().split() + try: + ihedun, iddnun = int(lnlst[2]), int(lnlst[3]) + numericformat = True + except: + f.seek(ipos) + # exit so the remaining data can be read + # from the file based on numericformat + break + # read word formats + if not numericformat: + while True: + line = f.readline() + if len(line) < 1: + break + lnlst = line.strip().split() + if line[0] == '#': + continue + + # skip blank line in the OC file + if len(lnlst) < 1: + continue + + # dataset 1 values + elif ('HEAD' in lnlst[0].upper() and + 'SAVE' in lnlst[1].upper() and + 'UNIT' in lnlst[2].upper() + ): + ihedun = int(lnlst[3]) + elif ('DRAWDOWN' in lnlst[0].upper() and + 'SAVE' in lnlst[1].upper() and + 'UNIT' in lnlst[2].upper() + ): + iddnun = int(lnlst[3]) + # dataset 2 + elif 'PERIOD' in lnlst[0].upper(): + break + # + if ext_unit_dict is not None: + if ihedun in ext_unit_dict: + fhead = ext_unit_dict[ihedun] + if iddnun in ext_unit_dict: + fddn = ext_unit_dict[iddnun] + + if openfile: + f.close() + + # return + return ihedun, fhead, iddnun, fddn + + @staticmethod + def load(f, model, nper=None, nstp=None, nlay=None, ext_unit_dict=None): + """ + Load an existing package. + + Parameters + ---------- + f : filename or file handle + File to load. + model : model object + The model object (of type :class:`flopy.modflow.mf.Modflow`) to + which this package will be added. + nper : int + The number of stress periods. If nper is None, then nper will be + obtained from the model object. (default is None). + nstp : int or list of ints + Integer of list of integers containing the number of time steps + in each stress period. If nstp is None, then nstp will be obtained + from the DIS or DISU packages attached to the model object. The + length of nstp must be equal to nper. (default is None). + nlay : int + The number of model layers. If nlay is None, then nnlay will be + obtained from the model object. nlay only needs to be specified + if an empty model object is passed in and the oc file being loaded + is defined using numeric codes. (default is None). + ext_unit_dict : dictionary, optional + If the arrays in the file are specified using EXTERNAL, + or older style array control records, then `f` should be a file + handle. In this case ext_unit_dict is required, which can be + constructed using the function + :class:`flopy.utils.mfreadnam.parsenamefile`. + + Returns + ------- + oc : ModflowOc object + ModflowOc object. + + Examples + -------- + + >>> import flopy + >>> m = flopy.modflow.Modflow() + >>> oc = flopy.modflow.ModflowOc.load('test.oc', m) + + """ + + if model.verbose: + sys.stdout.write('loading oc package file...\n') + + # set nper + if nper is None or nlay is None: + nrow, ncol, nlay, nper = model.get_nrow_ncol_nlay_nper() + + if nper == 0 or nlay == 0: + msg = 'discretization package not defined for the model, ' + \ + 'nper and nlay must be provided to the .load() method' + raise ValueError(msg) + + + # set nstp + if nstp is None: + dis = model.get_package('DIS') + if dis is None: + dis = model.get_package('DISU') + if dis is None: + msg = 'discretization package not defined for the model, ' + \ + 'a nstp list must be provided to the .load() method' + raise ValueError(msg) + nstp = list(dis.nstp.array) + else: + if isinstance(nstp, (int, float)): + nstp = [int(nstp)] + + # validate the size of nstp + if len(nstp) != nper: + msg = 'nstp must be a list with {} entries, '.format(nper) + \ + 'provided nstp list has {} entries.'.format(len(nstp)) + raise IOError(msg) + + # initialize + ihedfm = 0 + iddnfm = 0 + ihedun = 0 + iddnun = 0 + ibouun = 0 + compact = False + chedfm = None + cddnfm = None + cboufm = None + + numericformat = False + + stress_period_data = {} + + openfile = not hasattr(f, 'read') + if openfile: + filename = f + f = open(filename, 'r') + else: + filename = os.path.basename(f.name) + + # read header + ipos = f.tell() + while True: + line = f.readline() + if line[0] == '#': + continue + elif line[0] == []: + continue + else: + lnlst = line.strip().split() + try: + ihedfm, iddnfm = int(lnlst[0]), int(lnlst[1]) + ihedun, iddnun = int(lnlst[2]), int(lnlst[3]) + numericformat = True + except: + f.seek(ipos) + # exit so the remaining data can be read + # from the file based on numericformat + break + # set pointer to current position in the OC file + ipos = f.tell() + + # process each line + lines = [] + if numericformat == True: + for iperoc in range(nper): + for itsoc in range(nstp[iperoc]): + line = f.readline() + lnlst = line.strip().split() + incode, ihddfl = int(lnlst[0]), int(lnlst[1]) + ibudfl, icbcfl = int(lnlst[2]), int(lnlst[3]) + # new print and save flags are needed if incode is not + # less than 0. + if incode >= 0: + lines = [] + # use print options from the last time step + else: + if len(lines) > 0: + stress_period_data[(iperoc, itsoc)] = list(lines) + continue + # set print and save budget flags + if ibudfl != 0: + lines.append('PRINT BUDGET') + if icbcfl != 0: + lines.append('SAVE BUDGET') + if incode == 0: + line = f.readline() + lnlst = line.strip().split() + hdpr, ddpr = int(lnlst[0]), int(lnlst[1]) + hdsv, ddsv = int(lnlst[2]), int(lnlst[3]) + if hdpr != 0: + lines.append('PRINT HEAD') + if ddpr != 0: + lines.append('PRINT DRAWDOWN') + if hdsv != 0: + lines.append('SAVE HEAD') + if ddsv != 0: + lines.append('SAVE DRAWDOWN') + elif incode > 0: + headprint = '' + headsave = '' + ddnprint = '' + ddnsave = '' + for k in range(nlay): + line = f.readline() + lnlst = line.strip().split() + hdpr, ddpr = int(lnlst[0]), int(lnlst[1]) + hdsv, ddsv = int(lnlst[2]), int(lnlst[3]) + if hdpr != 0: + headprint += ' {}'.format(k + 1) + if ddpr != 0: + ddnprint += ' {}'.format(k + 1) + if hdsv != 0: + headsave += ' {}'.format(k + 1) + if ddsv != 0: + ddnsave += ' {}'.format(k + 1) + if len(headprint) > 0: + lines.append('PRINT HEAD' + headprint) + if len(ddnprint) > 0: + lines.append('PRINT DRAWDOWN' + ddnprint) + if len(headsave) > 0: + lines.append('SAVE HEAD' + headsave) + if len(ddnsave) > 0: + lines.append('SAVE DRAWDOWN' + ddnsave) + stress_period_data[(iperoc, itsoc)] = list(lines) + else: + iperoc, itsoc = 0, 0 + while True: + line = f.readline() + if len(line) < 1: + break + lnlst = line.strip().split() + if line[0] == '#': + continue + + # added by JJS 12/12/14 to avoid error when there is a blank line in the OC file + if lnlst == []: + continue + # end add + + # dataset 1 values + elif ('HEAD' in lnlst[0].upper() and + 'PRINT' in lnlst[1].upper() and + 'FORMAT' in lnlst[2].upper() + ): + ihedfm = int(lnlst[3]) + elif ('HEAD' in lnlst[0].upper() and + 'SAVE' in lnlst[1].upper() and + 'FORMAT' in lnlst[2].upper() + ): + chedfm = lnlst[3] + elif ('HEAD' in lnlst[0].upper() and + 'SAVE' in lnlst[1].upper() and + 'UNIT' in lnlst[2].upper() + ): + ihedun = int(lnlst[3]) + elif ('DRAWDOWN' in lnlst[0].upper() and + 'PRINT' in lnlst[1].upper() and + 'FORMAT' in lnlst[2].upper() + ): + iddnfm = int(lnlst[3]) + elif ('DRAWDOWN' in lnlst[0].upper() and + 'SAVE' in lnlst[1].upper() and + 'FORMAT' in lnlst[2].upper() + ): + cddnfm = lnlst[3] + elif ('DRAWDOWN' in lnlst[0].upper() and + 'SAVE' in lnlst[1].upper() and + 'UNIT' in lnlst[2].upper() + ): + iddnun = int(lnlst[3]) + elif ('IBOUND' in lnlst[0].upper() and + 'SAVE' in lnlst[1].upper() and + 'FORMAT' in lnlst[2].upper() + ): + cboufm = lnlst[3] + elif ('IBOUND' in lnlst[0].upper() and + 'SAVE' in lnlst[1].upper() and + 'UNIT' in lnlst[2].upper() + ): + ibouun = int(lnlst[3]) + elif 'COMPACT' in lnlst[0].upper(): + compact = True + + # dataset 2 + elif 'PERIOD' in lnlst[0].upper(): + if len(lines) > 0: + if iperoc > 0: + # create period step tuple + kperkstp = (iperoc - 1, itsoc - 1) + # save data + stress_period_data[kperkstp] = lines + # reset lines + lines = [] + # turn off oc if required + if iperoc > 0: + if itsoc == nstp[iperoc - 1]: + iperoc1 = iperoc + 1 + itsoc1 = 1 + else: + iperoc1 = iperoc + itsoc1 = itsoc + 1 + else: + iperoc1, itsoc1 = iperoc, itsoc + # update iperoc and itsoc + iperoc = int(lnlst[1]) + itsoc = int(lnlst[3]) + # do not used data that exceeds nper + if iperoc > nper: + break + # add a empty list if necessary + iempty = False + if iperoc != iperoc1: + iempty = True + else: + if itsoc != itsoc1: + iempty = True + if iempty == True: + kperkstp = (iperoc1 - 1, itsoc1 - 1) + stress_period_data[kperkstp] = [] + # dataset 3 + elif 'PRINT' in lnlst[0].upper(): + lines.append( + '{} {}'.format(lnlst[0].lower(), lnlst[1].lower())) + elif 'SAVE' in lnlst[0].upper(): + lines.append( + '{} {}'.format(lnlst[0].lower(), lnlst[1].lower())) + else: + print('Error encountered in OC import.') + print('Creating default OC package.') + return ModflowOc(model) + + # store the last record in word + if len(lines) > 0: + # create period step tuple + kperkstp = (iperoc - 1, itsoc - 1) + # save data + stress_period_data[kperkstp] = lines + # add a empty list if necessary + iempty = False + if iperoc != iperoc1: + iempty = True + else: + if itsoc != itsoc1: + iempty = True + if iempty == True: + kperkstp = (iperoc1 - 1, itsoc1 - 1) + stress_period_data[kperkstp] = [] + + if openfile: + f.close() + + # reset unit numbers + unitnumber = [14, 0, 0, 0, 0] + if ext_unit_dict is not None: + for key, value in ext_unit_dict.items(): + if value.filetype == ModflowOc.ftype(): + unitnumber[0] = key + fname = os.path.basename(value.filename) + else: + fname = os.path.basename(filename) + + # initialize filenames list + filenames = [fname, None, None, None, None] + + # fill remainder of filenames list + if ihedun > 0: + unitnumber[1] = ihedun + try: + filenames[1] = os.path.basename(ext_unit_dict[ihedun].filename) + except: + if model.verbose: + print('head file name will be generated by flopy') + if iddnun > 0: + unitnumber[2] = iddnun + try: + filenames[2] = os.path.basename(ext_unit_dict[iddnun].filename) + except: + if model.verbose: + print('drawdown file name will be generated by flopy') + if ibouun > 0: + unitnumber[4] = ibouun + try: + filenames[4] = os.path.basename(ext_unit_dict[ibouun].filename) + except: + if model.verbose: + print('ibound file name will be generated by flopy') + if cboufm is None: + cboufm = True + + # add unit numbers to pop_key_list + for u in unitnumber: + model.add_pop_key_list(u) + + # create instance of oc class + oc = ModflowOc(model, ihedfm=ihedfm, iddnfm=iddnfm, + chedfm=chedfm, cddnfm=cddnfm, cboufm=cboufm, + compact=compact, + stress_period_data=stress_period_data, + unitnumber=unitnumber, filenames=filenames) + + return oc + + @staticmethod + def ftype(): + return 'OC' + + @staticmethod + def defaultunit(): + return [14, 0, 0, 0, 0] diff --git a/flopy/modflow/mfpar.py b/flopy/modflow/mfpar.py index 9ef6d227c2..249900e2f7 100644 --- a/flopy/modflow/mfpar.py +++ b/flopy/modflow/mfpar.py @@ -1,340 +1,340 @@ -""" -mfpar module. Contains the ModflowPar class. Note that the user can access -the ModflowPar class as `flopy.modflow.ModflowPar`. - - -""" - -import sys -import numpy as np -from .mfzon import ModflowZon -from .mfpval import ModflowPval -from .mfmlt import ModflowMlt - - -class ModflowPar(object): - """ - Class for loading mult, zone, pval, and parameter data for MODFLOW packages - that use array data (LPF, UPW, RCH, EVT). Class also includes methods to - create data arrays using mult, zone, pval, and parameter data (not used - for boundary conditions). - - Notes - ----- - Parameters are supported in Flopy only when reading in existing models. - Parameter values are converted to native values in Flopy and the - connection to "parameters" is thus nonexistent. - - - """ - - def __init__(self): - """ - Package constructor. - - """ - self.pval = None - self.mult = None - self.zone = None - return - - def set_zone(self, model, ext_unit_dict): - """ - Load an existing zone package and set zone data for a model. - - Parameters - ---------- - model : model object - The model object (of type :class:`flopy.modflow.mf.Modflow`) to - which this package will be added. - ext_unit_dict : dictionary, optional - If the arrays in the file are specified using EXTERNAL, - or older style array control records, then `f` should be a file - handle. In this case ext_unit_dict is required, which can be - constructed using the function - :class:`flopy.utils.mfreadnam.parsenamefile`. - - Returns - ------- - - - Examples - -------- - - >>> ml.mfpar.set_zone(ml, ext_unit_dict) - - """ - zone = None - zone_key = None - for key, item in ext_unit_dict.items(): - if item.filetype.lower() == "zone": - zone = item - zone_key = key - if zone_key is not None: - try: - self.zone = ModflowZon.load(zone.filename, model, - ext_unit_dict=ext_unit_dict) - if model.verbose: - sys.stdout.write(' {} package load...success\n' \ - .format(self.zone.name[0])) - ext_unit_dict.pop(zone_key) - model.remove_package("ZONE") - except BaseException as o: - sys.stdout.write( - ' {} package load...failed\n {!s}'.format('ZONE', - o)) - return - - def set_mult(self, model, ext_unit_dict): - """ - Load an existing mult package and set mult data for a model. - - Parameters - ---------- - model : model object - The model object (of type :class:`flopy.modflow.mf.Modflow`) to - which this package will be added. - ext_unit_dict : dictionary, optional - If the arrays in the file are specified using EXTERNAL, - or older style array control records, then `f` should be a file - handle. In this case ext_unit_dict is required, which can be - constructed using the function - :class:`flopy.utils.mfreadnam.parsenamefile`. - - Returns - ------- - - - Examples - -------- - - >>> ml.mfpar.set_mult(ml, ext_unit_dict) - - """ - mult = None - mult_key = None - for key, item in ext_unit_dict.items(): - if item.filetype.lower() == "mult": - mult = item - mult_key = key - if mult_key is not None: - try: - self.mult = ModflowMlt.load(mult.filename, model, - ext_unit_dict=ext_unit_dict) - if model.verbose: - sys.stdout.write(' {} package load...success\n' \ - .format(self.mult.name[0])) - ext_unit_dict.pop(mult_key) - model.remove_package("MULT") - except BaseException as o: - sys.stdout.write( - ' {} package load...failed\n {!s}'.format('MULT', - o)) - - return - - def set_pval(self, model, ext_unit_dict): - """ - Load an existing pval package and set pval data for a model. - - Parameters - ---------- - model : model object - The model object (of type :class:`flopy.modflow.mf.Modflow`) to - which this package will be added. - ext_unit_dict : dictionary, optional - If the arrays in the file are specified using EXTERNAL, - or older style array control records, then `f` should be a file - handle. In this case ext_unit_dict is required, which can be - constructed using the function - :class:`flopy.utils.mfreadnam.parsenamefile`. - - Returns - ------- - - - Examples - -------- - - >>> ml.mfpar.set_pval(ml, ext_unit_dict) - - """ - pval = None - pval_key = None - for key, item in ext_unit_dict.items(): - if item.filetype.lower() == "pval": - pval = item - pval_key = key - if pval_key is not None: - try: - self.pval = ModflowPval.load(pval.filename, model, - ext_unit_dict=ext_unit_dict) - if model.verbose: - sys.stdout.write(' {} package load...success\n' \ - .format(self.pval.name[0])) - ext_unit_dict.pop(pval_key) - model.remove_package("PVAL") - except BaseException as o: - sys.stdout.write( - ' {} package load...failed\n {!s}'.format('PVAL', - o)) - - return - - @staticmethod - def load(f, npar, verbose=False): - """ - Load property parameters from an existing package. - - Parameters - ---------- - f : file handle - - npar : int - The number of parameters. - - verbose : bool - Boolean flag to control output. (default is False) - - Returns - ------- - list : list object of unique par_types in file f - dictionary : dictionary object with parameters in file f - - Examples - -------- - - >>>par_types, parm_dict = flopy.modflow.mfpar.ModflowPar.load(f, np) - - - """ - # read parameter data - if npar > 0: - parm_dict = {} - par_types = [] - for nprm in range(npar): - line = f.readline() - t = line.strip().split() - parnam = t[0].lower() - if verbose: - print(' loading parameter "{}"...'.format(parnam)) - partyp = t[1].lower() - if partyp not in par_types: - par_types.append(partyp) - parval = np.float(t[2]) - nclu = np.int(t[3]) - clusters = [] - for nc in range(nclu): - line = f.readline() - t = line.strip().split() - lay = np.int(t[0]) - s = t[1] - if len(s) > 10: - s = s[0:10] - mltarr = s - s = t[2] - if len(s) > 10: - s = s[0:10] - zonarr = s - iarr = [] - for iv in t[3:]: - try: - iz = int(np.int(iv)) - if iz != 0: - iarr.append(iz) - except: - break - - clusters.append([lay, mltarr, zonarr, iarr]) - # add parnam to parm_dict - parm_dict[parnam] = {'partyp': partyp, 'parval': parval, - 'nclu': nclu, 'clusters': clusters} - - return par_types, parm_dict - - @staticmethod - def parameter_fill(model, shape, findkey, parm_dict, findlayer=None): - """ - Fill an array with parameters using zone, mult, and pval data. - - Parameters - ---------- - model : model object - The model object (of type :class:`flopy.modflow.mf.Modflow`) to - which this package will be added. - - shape : tuple - The shape of the returned data array. Typically shape is (nrow, ncol) - - findkey : string - the parameter array to be constructed, - - parm_dict : dict - dictionary that includes all of the parameter data for a package - - findlayer : int - Layer that will be filled. Not required for array boundary condition data. - - Returns - ------- - data : numpy array - Filled array resulting from applications of zone, mult, pval, and - parameter data. - - Examples - -------- - - for lpf and upw: - - >>> data = flopy.modflow.mfpar.ModflowPar.parameter_fill(m, (nrow, ncol), 'vkcb', - >>> .....................................................parm_dict, findlayer=1) - - - """ - dtype = np.float32 - data = np.zeros(shape, dtype=dtype) - for key, tdict in parm_dict.items(): - partyp, parval = tdict['partyp'], tdict['parval'] - nclu, clusters = tdict['nclu'], tdict['clusters'] - if model.mfpar.pval is None: - pv = np.float(parval) - else: - try: - pv = np.float(model.mfpar.pval.pval_dict[key.lower()]) - except: - pv = np.float(parval) - # print partyp, parval, nclu, clusters - if partyp == findkey: - for [layer, mltarr, zonarr, izones] in clusters: - # print layer, mltarr, zonarr, izones - foundlayer = False - if findlayer == None: - foundlayer = True - else: - if layer == (findlayer + 1): - foundlayer = True - if foundlayer: - model.parameter_load = True - cluster_data = np.zeros(shape, dtype=dtype) - if mltarr.lower() == 'none': - mult = np.ones(shape, dtype=dtype) - else: - mult = model.mfpar.mult.mult_dict[mltarr.lower()][ - :, :] - if zonarr.lower() == 'all': - cluster_data = pv * mult - else: - mult_save = np.copy(mult) - za = model.mfpar.zone.zone_dict[zonarr.lower()][:, - :] - # build a multiplier for all of the izones - mult = np.zeros(shape, dtype=dtype) - for iz in izones: - filtarr = za == iz - mult[filtarr] += np.copy(mult_save[filtarr]) - # calculate parameter value for this cluster - cluster_data = pv * mult - # add data - data += cluster_data - - return data +""" +mfpar module. Contains the ModflowPar class. Note that the user can access +the ModflowPar class as `flopy.modflow.ModflowPar`. + + +""" + +import sys +import numpy as np +from .mfzon import ModflowZon +from .mfpval import ModflowPval +from .mfmlt import ModflowMlt + + +class ModflowPar(object): + """ + Class for loading mult, zone, pval, and parameter data for MODFLOW packages + that use array data (LPF, UPW, RCH, EVT). Class also includes methods to + create data arrays using mult, zone, pval, and parameter data (not used + for boundary conditions). + + Notes + ----- + Parameters are supported in Flopy only when reading in existing models. + Parameter values are converted to native values in Flopy and the + connection to "parameters" is thus nonexistent. + + + """ + + def __init__(self): + """ + Package constructor. + + """ + self.pval = None + self.mult = None + self.zone = None + return + + def set_zone(self, model, ext_unit_dict): + """ + Load an existing zone package and set zone data for a model. + + Parameters + ---------- + model : model object + The model object (of type :class:`flopy.modflow.mf.Modflow`) to + which this package will be added. + ext_unit_dict : dictionary, optional + If the arrays in the file are specified using EXTERNAL, + or older style array control records, then `f` should be a file + handle. In this case ext_unit_dict is required, which can be + constructed using the function + :class:`flopy.utils.mfreadnam.parsenamefile`. + + Returns + ------- + + + Examples + -------- + + >>> ml.mfpar.set_zone(ml, ext_unit_dict) + + """ + zone = None + zone_key = None + for key, item in ext_unit_dict.items(): + if item.filetype.lower() == "zone": + zone = item + zone_key = key + if zone_key is not None: + try: + self.zone = ModflowZon.load(zone.filename, model, + ext_unit_dict=ext_unit_dict) + if model.verbose: + sys.stdout.write(' {} package load...success\n' \ + .format(self.zone.name[0])) + ext_unit_dict.pop(zone_key) + model.remove_package("ZONE") + except BaseException as o: + sys.stdout.write( + ' {} package load...failed\n {!s}'.format('ZONE', + o)) + return + + def set_mult(self, model, ext_unit_dict): + """ + Load an existing mult package and set mult data for a model. + + Parameters + ---------- + model : model object + The model object (of type :class:`flopy.modflow.mf.Modflow`) to + which this package will be added. + ext_unit_dict : dictionary, optional + If the arrays in the file are specified using EXTERNAL, + or older style array control records, then `f` should be a file + handle. In this case ext_unit_dict is required, which can be + constructed using the function + :class:`flopy.utils.mfreadnam.parsenamefile`. + + Returns + ------- + + + Examples + -------- + + >>> ml.mfpar.set_mult(ml, ext_unit_dict) + + """ + mult = None + mult_key = None + for key, item in ext_unit_dict.items(): + if item.filetype.lower() == "mult": + mult = item + mult_key = key + if mult_key is not None: + try: + self.mult = ModflowMlt.load(mult.filename, model, + ext_unit_dict=ext_unit_dict) + if model.verbose: + sys.stdout.write(' {} package load...success\n' \ + .format(self.mult.name[0])) + ext_unit_dict.pop(mult_key) + model.remove_package("MULT") + except BaseException as o: + sys.stdout.write( + ' {} package load...failed\n {!s}'.format('MULT', + o)) + + return + + def set_pval(self, model, ext_unit_dict): + """ + Load an existing pval package and set pval data for a model. + + Parameters + ---------- + model : model object + The model object (of type :class:`flopy.modflow.mf.Modflow`) to + which this package will be added. + ext_unit_dict : dictionary, optional + If the arrays in the file are specified using EXTERNAL, + or older style array control records, then `f` should be a file + handle. In this case ext_unit_dict is required, which can be + constructed using the function + :class:`flopy.utils.mfreadnam.parsenamefile`. + + Returns + ------- + + + Examples + -------- + + >>> ml.mfpar.set_pval(ml, ext_unit_dict) + + """ + pval = None + pval_key = None + for key, item in ext_unit_dict.items(): + if item.filetype.lower() == "pval": + pval = item + pval_key = key + if pval_key is not None: + try: + self.pval = ModflowPval.load(pval.filename, model, + ext_unit_dict=ext_unit_dict) + if model.verbose: + sys.stdout.write(' {} package load...success\n' \ + .format(self.pval.name[0])) + ext_unit_dict.pop(pval_key) + model.remove_package("PVAL") + except BaseException as o: + sys.stdout.write( + ' {} package load...failed\n {!s}'.format('PVAL', + o)) + + return + + @staticmethod + def load(f, npar, verbose=False): + """ + Load property parameters from an existing package. + + Parameters + ---------- + f : file handle + + npar : int + The number of parameters. + + verbose : bool + Boolean flag to control output. (default is False) + + Returns + ------- + list : list object of unique par_types in file f + dictionary : dictionary object with parameters in file f + + Examples + -------- + + >>>par_types, parm_dict = flopy.modflow.mfpar.ModflowPar.load(f, np) + + + """ + # read parameter data + if npar > 0: + parm_dict = {} + par_types = [] + for nprm in range(npar): + line = f.readline() + t = line.strip().split() + parnam = t[0].lower() + if verbose: + print(' loading parameter "{}"...'.format(parnam)) + partyp = t[1].lower() + if partyp not in par_types: + par_types.append(partyp) + parval = np.float(t[2]) + nclu = np.int(t[3]) + clusters = [] + for nc in range(nclu): + line = f.readline() + t = line.strip().split() + lay = np.int(t[0]) + s = t[1] + if len(s) > 10: + s = s[0:10] + mltarr = s + s = t[2] + if len(s) > 10: + s = s[0:10] + zonarr = s + iarr = [] + for iv in t[3:]: + try: + iz = int(np.int(iv)) + if iz != 0: + iarr.append(iz) + except: + break + + clusters.append([lay, mltarr, zonarr, iarr]) + # add parnam to parm_dict + parm_dict[parnam] = {'partyp': partyp, 'parval': parval, + 'nclu': nclu, 'clusters': clusters} + + return par_types, parm_dict + + @staticmethod + def parameter_fill(model, shape, findkey, parm_dict, findlayer=None): + """ + Fill an array with parameters using zone, mult, and pval data. + + Parameters + ---------- + model : model object + The model object (of type :class:`flopy.modflow.mf.Modflow`) to + which this package will be added. + + shape : tuple + The shape of the returned data array. Typically shape is (nrow, ncol) + + findkey : string + the parameter array to be constructed, + + parm_dict : dict + dictionary that includes all of the parameter data for a package + + findlayer : int + Layer that will be filled. Not required for array boundary condition data. + + Returns + ------- + data : numpy array + Filled array resulting from applications of zone, mult, pval, and + parameter data. + + Examples + -------- + + for lpf and upw: + + >>> data = flopy.modflow.mfpar.ModflowPar.parameter_fill(m, (nrow, ncol), 'vkcb', + >>> .....................................................parm_dict, findlayer=1) + + + """ + dtype = np.float32 + data = np.zeros(shape, dtype=dtype) + for key, tdict in parm_dict.items(): + partyp, parval = tdict['partyp'], tdict['parval'] + nclu, clusters = tdict['nclu'], tdict['clusters'] + if model.mfpar.pval is None: + pv = np.float(parval) + else: + try: + pv = np.float(model.mfpar.pval.pval_dict[key.lower()]) + except: + pv = np.float(parval) + # print partyp, parval, nclu, clusters + if partyp == findkey: + for [layer, mltarr, zonarr, izones] in clusters: + # print layer, mltarr, zonarr, izones + foundlayer = False + if findlayer == None: + foundlayer = True + else: + if layer == (findlayer + 1): + foundlayer = True + if foundlayer: + model.parameter_load = True + cluster_data = np.zeros(shape, dtype=dtype) + if mltarr.lower() == 'none': + mult = np.ones(shape, dtype=dtype) + else: + mult = model.mfpar.mult.mult_dict[mltarr.lower()][ + :, :] + if zonarr.lower() == 'all': + cluster_data = pv * mult + else: + mult_save = np.copy(mult) + za = model.mfpar.zone.zone_dict[zonarr.lower()][:, + :] + # build a multiplier for all of the izones + mult = np.zeros(shape, dtype=dtype) + for iz in izones: + filtarr = za == iz + mult[filtarr] += np.copy(mult_save[filtarr]) + # calculate parameter value for this cluster + cluster_data = pv * mult + # add data + data += cluster_data + + return data diff --git a/flopy/modflow/mfparbc.py b/flopy/modflow/mfparbc.py index e22b90bddf..24ac76c002 100644 --- a/flopy/modflow/mfparbc.py +++ b/flopy/modflow/mfparbc.py @@ -1,269 +1,269 @@ -""" -mfparbc module. Contains the ModflowParBc class. Note that the user can access -the ModflowParBc class as `flopy.modflow.ModflowParBc`. - -""" - -import numpy as np -from ..utils.flopy_io import line_strip, ulstrd - - -class ModflowParBc(object): - """ - Class for loading boundary condition parameter data for MODFLOW packages - that use list data (WEL, GHB, DRN, etc.). This Class is also used to - create hfb6 data from hfb parameters. Class also includes methods to - create data arrays using pval and boundary condition parameter data. - - Notes - ----- - Parameters are supported in Flopy only when reading in existing models. - Parameter values are converted to native values in Flopy and the - connection to "parameters" is thus nonexistent. - - - """ - - def __init__(self, bc_parms): - """ - Package constructor. - - """ - self.bc_parms = bc_parms - - def get(self, fkey): - """ - overload get to return a value from the bc_parms dictionary - - """ - for key, value in self.bc_parms.items(): - if fkey == key: - return self.bc_parms[key] - return None - - @staticmethod - def load(f, npar, dt, model, ext_unit_dict=None, verbose=False): - """ - Load bc property parameters from an existing bc package - that uses list data (e.g. WEL, RIV, etc.). - - Parameters - ---------- - f : file handle - - npar : int - The number of parameters. - - dt : numpy.dtype - numpy.dtype for the particular list boundary condition. - - verbose : bool - Boolean flag to control output. (default is False) - - Returns - ------- - dictionary : dictionary object with parameters in file f - - Examples - -------- - - - """ - nitems = len(dt.names) - # read parameter data - if npar > 0: - bc_parms = {} - for idx in range(npar): - line = f.readline() - t = line_strip(line).split() - parnam = t[0].lower() - if parnam.startswith("'"): - parnam = parnam[1:] - if parnam.endswith("'"): - parnam = parnam[:-1] - if verbose: - print(' loading parameter "{}"...'.format(parnam)) - partyp = t[1].lower() - parval = t[2] - nlst = np.int(t[3]) - numinst = 1 - timeVarying = False - if len(t) > 4: - if 'instances' in t[4].lower(): - numinst = np.int(t[5]) - timeVarying = True - pinst = {} - for inst in range(numinst): - # read instance name - if timeVarying: - line = f.readline() - t = line_strip(line).split() - instnam = t[0].lower() - else: - instnam = 'static' - - ra = np.zeros(nlst, dtype=dt) - #todo: if sfac is used for parameter definition, then - # the empty list on the next line needs to be the package - # get_sfac_columns - bcinst = ulstrd(f, nlst, ra, model, [], ext_unit_dict) - pinst[instnam] = bcinst - bc_parms[parnam] = [{'partyp': partyp, 'parval': parval, - 'nlst': nlst, 'timevarying': timeVarying}, - pinst] - - # print bc_parms - bcpar = ModflowParBc(bc_parms) - return bcpar - - @staticmethod - def loadarray(f, npar, verbose=False): - """ - Load bc property parameters from an existing bc package - that uses array data (e.g. RCH, EVT). - - Parameters - ---------- - f : file handle - - npar : int - The number of parameters. - - verbose : bool - Boolean flag to control output. (default is False) - - Returns - ------- - dictionary : dictionary object with parameters in file f - - Examples - -------- - - - """ - # read parameter data - if npar > 0: - bc_parms = {} - for idx in range(npar): - line = f.readline() - t = line.strip().split() - parnam = t[0].lower() - if verbose: - print(' loading parameter "{}"...'.format(parnam)) - partyp = t[1].lower() - parval = t[2] - nclu = np.int(t[3]) - numinst = 1 - timeVarying = False - if len(t) > 4: - if 'instances' in t[4].lower(): - numinst = np.int(t[5]) - timeVarying = True - pinst = {} - for inst in range(numinst): - # read instance name - if timeVarying: - line = f.readline() - t = line.strip().split() - instnam = t[0].lower() - else: - instnam = 'static' - bcinst = [] - - for nc in range(nclu): - line = f.readline() - t = line.strip().split() - bnd = [t[0], t[1]] - if t[1].lower() == 'all': - bnd.append([]) - else: - iz = [] - for jdx in range(2, len(t)): - try: - ival = int(t[jdx]) - if ival > 0: - iz.append(ival) - except: - break - bnd.append(iz) - bcinst.append(bnd) - pinst[instnam] = bcinst - bc_parms[parnam] = [ - {'partyp': partyp, 'parval': parval, 'nclu': nclu, - 'timevarying': timeVarying}, - pinst] - - # print bc_parms - bcpar = ModflowParBc(bc_parms) - return bcpar - - @staticmethod - def parameter_bcfill(model, shape, parm_dict, pak_parms): - """ - Fill an array with parameters using zone, mult, and pval data. - - Parameters - ---------- - model : model object - The model object (of type :class:`flopy.modflow.mf.Modflow`) to - which this package will be added. - - shape : tuple - The shape of the returned data array. Typically shape is (nrow, ncol) - - parm_dict : list - dictionary of parameter instances - - pak_parms : dict - dictionary that includes all of the parameter data for a package - - Returns - ------- - data : numpy array - Filled array resulting from applications of zone, mult, pval, and - parameter data. - - Examples - -------- - - for rch and evt - >>> data = flopy.modflow.mfparbc.ModflowParBc.parameter_bcfill(m, (nrow, ncol), - >>> .......'rech', parm_dict, pak_parms) - - - """ - dtype = np.float32 - data = np.zeros(shape, dtype=dtype) - for key, value in parm_dict.items(): - # print key, value - pdict, idict = pak_parms.bc_parms[key] - inst_data = idict[value] - if model.mfpar.pval is None: - pv = np.float(pdict['parval']) - else: - try: - pv = np.float(model.mfpar.pval.pval_dict[key.lower()]) - except: - pv = np.float(pdict['parval']) - for [mltarr, zonarr, izones] in inst_data: - model.parameter_load = True - # print mltarr, zonarr, izones - if mltarr.lower() == 'none': - mult = np.ones(shape, dtype=dtype) - else: - mult = model.mfpar.mult.mult_dict[mltarr.lower()][:, :] - if zonarr.lower() == 'all': - t = pv * mult - else: - mult_save = np.copy(mult) - za = model.mfpar.zone.zone_dict[zonarr.lower()][:, :] - # build a multiplier for all of the izones - mult = np.zeros(shape, dtype=dtype) - for iz in izones: - filtarr = za == iz - mult[filtarr] += np.copy(mult_save[filtarr]) - # calculate parameter value for this instance - t = pv * mult - data += t - - return data +""" +mfparbc module. Contains the ModflowParBc class. Note that the user can access +the ModflowParBc class as `flopy.modflow.ModflowParBc`. + +""" + +import numpy as np +from ..utils.flopy_io import line_strip, ulstrd + + +class ModflowParBc(object): + """ + Class for loading boundary condition parameter data for MODFLOW packages + that use list data (WEL, GHB, DRN, etc.). This Class is also used to + create hfb6 data from hfb parameters. Class also includes methods to + create data arrays using pval and boundary condition parameter data. + + Notes + ----- + Parameters are supported in Flopy only when reading in existing models. + Parameter values are converted to native values in Flopy and the + connection to "parameters" is thus nonexistent. + + + """ + + def __init__(self, bc_parms): + """ + Package constructor. + + """ + self.bc_parms = bc_parms + + def get(self, fkey): + """ + overload get to return a value from the bc_parms dictionary + + """ + for key, value in self.bc_parms.items(): + if fkey == key: + return self.bc_parms[key] + return None + + @staticmethod + def load(f, npar, dt, model, ext_unit_dict=None, verbose=False): + """ + Load bc property parameters from an existing bc package + that uses list data (e.g. WEL, RIV, etc.). + + Parameters + ---------- + f : file handle + + npar : int + The number of parameters. + + dt : numpy.dtype + numpy.dtype for the particular list boundary condition. + + verbose : bool + Boolean flag to control output. (default is False) + + Returns + ------- + dictionary : dictionary object with parameters in file f + + Examples + -------- + + + """ + nitems = len(dt.names) + # read parameter data + if npar > 0: + bc_parms = {} + for idx in range(npar): + line = f.readline() + t = line_strip(line).split() + parnam = t[0].lower() + if parnam.startswith("'"): + parnam = parnam[1:] + if parnam.endswith("'"): + parnam = parnam[:-1] + if verbose: + print(' loading parameter "{}"...'.format(parnam)) + partyp = t[1].lower() + parval = t[2] + nlst = np.int(t[3]) + numinst = 1 + timeVarying = False + if len(t) > 4: + if 'instances' in t[4].lower(): + numinst = np.int(t[5]) + timeVarying = True + pinst = {} + for inst in range(numinst): + # read instance name + if timeVarying: + line = f.readline() + t = line_strip(line).split() + instnam = t[0].lower() + else: + instnam = 'static' + + ra = np.zeros(nlst, dtype=dt) + #todo: if sfac is used for parameter definition, then + # the empty list on the next line needs to be the package + # get_sfac_columns + bcinst = ulstrd(f, nlst, ra, model, [], ext_unit_dict) + pinst[instnam] = bcinst + bc_parms[parnam] = [{'partyp': partyp, 'parval': parval, + 'nlst': nlst, 'timevarying': timeVarying}, + pinst] + + # print bc_parms + bcpar = ModflowParBc(bc_parms) + return bcpar + + @staticmethod + def loadarray(f, npar, verbose=False): + """ + Load bc property parameters from an existing bc package + that uses array data (e.g. RCH, EVT). + + Parameters + ---------- + f : file handle + + npar : int + The number of parameters. + + verbose : bool + Boolean flag to control output. (default is False) + + Returns + ------- + dictionary : dictionary object with parameters in file f + + Examples + -------- + + + """ + # read parameter data + if npar > 0: + bc_parms = {} + for idx in range(npar): + line = f.readline() + t = line.strip().split() + parnam = t[0].lower() + if verbose: + print(' loading parameter "{}"...'.format(parnam)) + partyp = t[1].lower() + parval = t[2] + nclu = np.int(t[3]) + numinst = 1 + timeVarying = False + if len(t) > 4: + if 'instances' in t[4].lower(): + numinst = np.int(t[5]) + timeVarying = True + pinst = {} + for inst in range(numinst): + # read instance name + if timeVarying: + line = f.readline() + t = line.strip().split() + instnam = t[0].lower() + else: + instnam = 'static' + bcinst = [] + + for nc in range(nclu): + line = f.readline() + t = line.strip().split() + bnd = [t[0], t[1]] + if t[1].lower() == 'all': + bnd.append([]) + else: + iz = [] + for jdx in range(2, len(t)): + try: + ival = int(t[jdx]) + if ival > 0: + iz.append(ival) + except: + break + bnd.append(iz) + bcinst.append(bnd) + pinst[instnam] = bcinst + bc_parms[parnam] = [ + {'partyp': partyp, 'parval': parval, 'nclu': nclu, + 'timevarying': timeVarying}, + pinst] + + # print bc_parms + bcpar = ModflowParBc(bc_parms) + return bcpar + + @staticmethod + def parameter_bcfill(model, shape, parm_dict, pak_parms): + """ + Fill an array with parameters using zone, mult, and pval data. + + Parameters + ---------- + model : model object + The model object (of type :class:`flopy.modflow.mf.Modflow`) to + which this package will be added. + + shape : tuple + The shape of the returned data array. Typically shape is (nrow, ncol) + + parm_dict : list + dictionary of parameter instances + + pak_parms : dict + dictionary that includes all of the parameter data for a package + + Returns + ------- + data : numpy array + Filled array resulting from applications of zone, mult, pval, and + parameter data. + + Examples + -------- + + for rch and evt + >>> data = flopy.modflow.mfparbc.ModflowParBc.parameter_bcfill(m, (nrow, ncol), + >>> .......'rech', parm_dict, pak_parms) + + + """ + dtype = np.float32 + data = np.zeros(shape, dtype=dtype) + for key, value in parm_dict.items(): + # print key, value + pdict, idict = pak_parms.bc_parms[key] + inst_data = idict[value] + if model.mfpar.pval is None: + pv = np.float(pdict['parval']) + else: + try: + pv = np.float(model.mfpar.pval.pval_dict[key.lower()]) + except: + pv = np.float(pdict['parval']) + for [mltarr, zonarr, izones] in inst_data: + model.parameter_load = True + # print mltarr, zonarr, izones + if mltarr.lower() == 'none': + mult = np.ones(shape, dtype=dtype) + else: + mult = model.mfpar.mult.mult_dict[mltarr.lower()][:, :] + if zonarr.lower() == 'all': + t = pv * mult + else: + mult_save = np.copy(mult) + za = model.mfpar.zone.zone_dict[zonarr.lower()][:, :] + # build a multiplier for all of the izones + mult = np.zeros(shape, dtype=dtype) + for iz in izones: + filtarr = za == iz + mult[filtarr] += np.copy(mult_save[filtarr]) + # calculate parameter value for this instance + t = pv * mult + data += t + + return data diff --git a/flopy/modflow/mfpbc.py b/flopy/modflow/mfpbc.py index a16244ffb0..118ce78630 100644 --- a/flopy/modflow/mfpbc.py +++ b/flopy/modflow/mfpbc.py @@ -1,109 +1,109 @@ -from ..pakbase import Package - - -class ModflowPbc(Package): - """ - Periodic boundary condition class - - """ - - def __init__(self, model, layer_row_column_data=None, - layer_row_column_shead_ehead=None, - cosines=None, extension='pbc', unitnumber=None, - zerobase=True): - # set default unit number of one is not specified - if unitnumber is None: - unitnumber = ModflowPbc.defaultunit() - - # Call ancestor's init to set self.parent, extension, name and - # unit number - Package.__init__(self, model, extension, ModflowPbc.ftype(), - unitnumber) - self.heading = '# {} package for '.format(self.name[0]) + \ - ' {}, '.format(model.version_types[model.version]) + \ - 'generated by Flopy.' - self.mxactp = 0 - if layer_row_column_data is None: - if layer_row_column_shead_ehead is not None: - msg = '\nWARNING: ModflowPbc - Do not use ' + \ - 'layer_row_column_shead_ehead!\n' + \ - 22 * ' ' + 'Use layer_row_column_data instead.' - print(msg) - layer_row_column_data = layer_row_column_shead_ehead - else: - e = 'Failed to specify layer_row_column_shead_ehead ' + \ - 'or layer_row_column_data.' - raise Exception(e) - - self.mxactp, self.layer_row_column_data = self.assign_layer_row_column_data( - layer_row_column_data, 5, zerobase=zerobase) - # misuse of this function - zerobase needs to be False - self.mxcos, self.cosines = self.assign_layer_row_column_data(cosines, - 3, - zerobase=False) - # self.mxcos = 0 - # if (cosines != None): - # error_message = 'cosines must have 3 columns' - # if (not isinstance(cosines, list)): - # cosines = [cosines] - # for a in cosines: - # a = np.atleast_2d(a) - # nr, nc = a.shape - # assert nc == 3, error_message - # if (nr > self.mxcos): - # self.mxcos = nr - # self.cosines = cosines - self.np = 0 - self.parent.add_package(self) - - def ncells(self): - # Returns the maximum number of cells that have recharge - # (developed for MT3DMS SSM package) - return self.mxactp - - def write_file(self): - """ - Write the package file. - - Returns - ------- - None - - """ - f_pbc = open(self.fn_path, 'w') - f_pbc.write('%s\n' % self.heading) - f_pbc.write('%10i%10i\n' % (self.mxactp, self.mxcos)) - for n in range(self.parent.get_package('DIS').nper): - if (n < len(self.layer_row_column_data)): - a = self.layer_row_column_data[n] - itmp = a.shape[0] - else: - itmp = -1 - if (n < len(self.cosines)): - c = self.cosines[n] - ctmp = c.shape[0] - else: - ctmp = -1 - f_pbc.write('{:10d}{:10d}{:10d}\n'.format(itmp, ctmp, self.np)) - if n < len(self.layer_row_column_data): - for b in a: - line = '{:10d}{:10d}{:10d}{:10d}{:10d}\n'.format(b[0], - b[1], - b[2], - b[3], - b[4]) - f_pbc.write(line) - if n < len(self.cosines): - for d in c: - f_pbc.write('{:10g}{:10g}{:10g}\n'.format(d[0], - d[1], - d[2])) - f_pbc.close() - - @staticmethod - def ftype(): - return 'PBC' - - @staticmethod - def defaultunit(): - return 30 +from ..pakbase import Package + + +class ModflowPbc(Package): + """ + Periodic boundary condition class + + """ + + def __init__(self, model, layer_row_column_data=None, + layer_row_column_shead_ehead=None, + cosines=None, extension='pbc', unitnumber=None, + zerobase=True): + # set default unit number of one is not specified + if unitnumber is None: + unitnumber = ModflowPbc.defaultunit() + + # Call ancestor's init to set self.parent, extension, name and + # unit number + Package.__init__(self, model, extension, ModflowPbc.ftype(), + unitnumber) + self.heading = '# {} package for '.format(self.name[0]) + \ + ' {}, '.format(model.version_types[model.version]) + \ + 'generated by Flopy.' + self.mxactp = 0 + if layer_row_column_data is None: + if layer_row_column_shead_ehead is not None: + msg = '\nWARNING: ModflowPbc - Do not use ' + \ + 'layer_row_column_shead_ehead!\n' + \ + 22 * ' ' + 'Use layer_row_column_data instead.' + print(msg) + layer_row_column_data = layer_row_column_shead_ehead + else: + e = 'Failed to specify layer_row_column_shead_ehead ' + \ + 'or layer_row_column_data.' + raise Exception(e) + + self.mxactp, self.layer_row_column_data = self.assign_layer_row_column_data( + layer_row_column_data, 5, zerobase=zerobase) + # misuse of this function - zerobase needs to be False + self.mxcos, self.cosines = self.assign_layer_row_column_data(cosines, + 3, + zerobase=False) + # self.mxcos = 0 + # if (cosines != None): + # error_message = 'cosines must have 3 columns' + # if (not isinstance(cosines, list)): + # cosines = [cosines] + # for a in cosines: + # a = np.atleast_2d(a) + # nr, nc = a.shape + # assert nc == 3, error_message + # if (nr > self.mxcos): + # self.mxcos = nr + # self.cosines = cosines + self.np = 0 + self.parent.add_package(self) + + def ncells(self): + # Returns the maximum number of cells that have recharge + # (developed for MT3DMS SSM package) + return self.mxactp + + def write_file(self): + """ + Write the package file. + + Returns + ------- + None + + """ + f_pbc = open(self.fn_path, 'w') + f_pbc.write('%s\n' % self.heading) + f_pbc.write('%10i%10i\n' % (self.mxactp, self.mxcos)) + for n in range(self.parent.get_package('DIS').nper): + if (n < len(self.layer_row_column_data)): + a = self.layer_row_column_data[n] + itmp = a.shape[0] + else: + itmp = -1 + if (n < len(self.cosines)): + c = self.cosines[n] + ctmp = c.shape[0] + else: + ctmp = -1 + f_pbc.write('{:10d}{:10d}{:10d}\n'.format(itmp, ctmp, self.np)) + if n < len(self.layer_row_column_data): + for b in a: + line = '{:10d}{:10d}{:10d}{:10d}{:10d}\n'.format(b[0], + b[1], + b[2], + b[3], + b[4]) + f_pbc.write(line) + if n < len(self.cosines): + for d in c: + f_pbc.write('{:10g}{:10g}{:10g}\n'.format(d[0], + d[1], + d[2])) + f_pbc.close() + + @staticmethod + def ftype(): + return 'PBC' + + @staticmethod + def defaultunit(): + return 30 diff --git a/flopy/modflow/mfpcg.py b/flopy/modflow/mfpcg.py index 42d95b2b34..cb4fa32f70 100644 --- a/flopy/modflow/mfpcg.py +++ b/flopy/modflow/mfpcg.py @@ -1,335 +1,335 @@ -""" -mfpcg module. Contains the ModflowPcg class. Note that the user can access -the ModflowPcg class as `flopy.modflow.ModflowPcg`. - -Additional information for this MODFLOW package can be found at the `Online -MODFLOW Guide -`_. - -""" -import sys - -from ..pakbase import Package -from ..utils.flopy_io import line_parse - - -class ModflowPcg(Package): - """ - MODFLOW Pcg Package Class. - - Parameters - ---------- - model : model object - The model object (of type :class:`flopy.modflow.mf.Modflow`) to which - this package will be added. - mxiter : int - maximum number of outer iterations. (default is 50) - iter1 : int - maximum number of inner iterations. (default is 30) - npcond : int - flag used to select the matrix conditioning method. (default is 1). - specify npcond = 1 for Modified Incomplete Cholesky. - specify npcond = 2 for Polynomial. - hclose : float - is the head change criterion for convergence. (default is 1e-5). - rclose : float - is the residual criterion for convergence. (default is 1e-5) - relax : float - is the relaxation parameter used with npcond = 1. (default is 1.0) - nbpol : int - is only used when npcond = 2 to indicate whether the estimate of the - upper bound on the maximum eigenvalue is 2.0, or whether the estimate - will be calculated. nbpol = 2 is used to specify the value is 2.0; - for any other value of nbpol, the estimate is calculated. Convergence - is generally insensitive to this parameter. (default is 0). - iprpcg : int - solver print out interval. (default is 0). - mutpcg : int - If mutpcg = 0, tables of maximum head change and residual will be - printed each iteration. - If mutpcg = 1, only the total number of iterations will be printed. - If mutpcg = 2, no information will be printed. - If mutpcg = 3, information will only be printed if convergence fails. - (default is 3). - damp : float - is the steady-state damping factor. (default is 1.) - dampt : float - is the transient damping factor. (default is 1.) - ihcofadd : int - is a flag that determines what happens to an active cell that is - surrounded by dry cells. (default is 0). If ihcofadd=0, cell - converts to dry regardless of HCOF value. This is the default, which - is the way PCG2 worked prior to the addition of this option. If - ihcofadd<>0, cell converts to dry only if HCOF has no head-dependent - stresses or storage terms. - extension : list string - Filename extension (default is 'pcg') - unitnumber : int - File unit number (default is None). - filenames : str or list of str - Filenames to use for the package. If filenames=None the package name - will be created using the model name and package extension. If a - single string is passed the package will be set to the string. - Default is None. - - Attributes - ---------- - - Methods - ------- - - See Also - -------- - - Notes - ----- - - Examples - -------- - - >>> import flopy - >>> m = flopy.modflow.Modflow() - >>> pcg = flopy.modflow.ModflowPcg(m) - - """ - - def __init__(self, model, mxiter=50, iter1=30, npcond=1, - hclose=1e-5, rclose=1e-5, relax=1.0, nbpol=0, iprpcg=0, - mutpcg=3, - damp=1.0, dampt=1.0, ihcofadd=0, - extension='pcg', unitnumber=None, filenames=None): - """ - Package constructor. - - """ - # set default unit number of one is not specified - if unitnumber is None: - unitnumber = ModflowPcg.defaultunit() - - # set filenames - if filenames is None: - filenames = [None] - elif isinstance(filenames, str): - filenames = [filenames] - - # Fill namefile items - name = [ModflowPcg.ftype()] - units = [unitnumber] - extra = [''] - - # set package name - fname = [filenames[0]] - - # Call ancestor's init to set self.parent, extension, name and - # unit number - Package.__init__(self, model, extension=extension, name=name, - unit_number=units, extra=extra, filenames=fname) - - # check if a valid model version has been specified - if model.version == 'mfusg': - err = 'Error: cannot use {} package with model version {}'.format( - self.name, model.version) - raise Exception(err) - - self.heading = '# {} package for '.format(self.name[0]) + \ - ' {}, '.format(model.version_types[model.version]) + \ - 'generated by Flopy.' - self.url = 'pcg.htm' - self.mxiter = mxiter - self.iter1 = iter1 - self.npcond = npcond - self.hclose = hclose - self.rclose = rclose - self.relax = relax - self.nbpol = nbpol - self.iprpcg = iprpcg - self.mutpcg = mutpcg - self.damp = damp - self.dampt = dampt - self.ihcofadd = ihcofadd - self.parent.add_package(self) - - def write_file(self): - """ - Write the package file. - - Returns - ------- - None - - """ - f = open(self.fn_path, 'w') - f.write('{}\n'.format(self.heading)) - ifrfm = self.parent.get_ifrefm() - if ifrfm: - f.write('{} '.format(self.mxiter)) - f.write('{} '.format(self.iter1)) - f.write('{} '.format(self.npcond)) - f.write('{}'.format(self.ihcofadd)) - f.write('\n') - f.write('{} '.format(self.hclose)) - f.write('{} '.format(self.rclose)) - f.write('{} '.format(self.relax)) - f.write('{} '.format(self.nbpol)) - f.write('{} '.format(self.iprpcg)) - f.write('{} '.format(self.mutpcg)) - f.write('{} '.format(self.damp)) - if self.damp < 0: - f.write('{}'.format(self.dampt)) - f.write('\n') - else: - f.write(' {0:9d}'.format(self.mxiter)) - f.write(' {0:9d}'.format(self.iter1)) - f.write(' {0:9d}'.format(self.npcond)) - f.write(' {0:9d}'.format(self.ihcofadd)) - f.write('\n') - f.write(' {0:9.3e}'.format(self.hclose)) - f.write(' {0:9.3e}'.format(self.rclose)) - f.write(' {0:9.3e}'.format(self.relax)) - f.write(' {0:9d}'.format(self.nbpol)) - f.write(' {0:9d}'.format(self.iprpcg)) - f.write(' {0:9d}'.format(self.mutpcg)) - f.write(' {0:9.3e}'.format(self.damp)) - if self.damp < 0: - f.write(' {0:9.3e}'.format(self.dampt)) - f.write('\n') - f.close() - - @staticmethod - def load(f, model, ext_unit_dict=None): - """ - Load an existing package. - - Parameters - ---------- - f : filename or file handle - File to load. - model : model object - The model object (of type :class:`flopy.modflow.mf.Modflow`) to - which this package will be added. - ext_unit_dict : dictionary, optional - If the arrays in the file are specified using EXTERNAL, - or older style array control records, then `f` should be a file - handle. In this case ext_unit_dict is required, which can be - constructed using the function - :class:`flopy.utils.mfreadnam.parsenamefile`. - - Returns - ------- - pcg : ModflowPcg object - - Examples - -------- - - >>> import flopy - >>> m = flopy.modflow.Modflow() - >>> pcg = flopy.modflow.ModflowPcg.load('test.pcg', m) - - """ - - if model.verbose: - sys.stdout.write('loading pcg package file...\n') - - openfile = not hasattr(f, 'read') - if openfile: - filename = f - f = open(filename, 'r') - - # dataset 0 -- header - while True: - line = f.readline() - if line[0] != '#': - break - # dataset 1 - ifrfm = model.get_ifrefm() - if model.version != 'mf2k': - ifrfm = True - ihcofadd = 0 - dampt = 0. - - # free format - if ifrfm: - t = line_parse(line) - # t = line.strip().split() - mxiter = int(t[0]) - iter1 = int(t[1]) - npcond = int(t[2]) - try: - ihcofadd = int(t[3]) - except: - if model.verbose: - print(' explicit ihcofadd in file') - - # dataset 2 - try: - line = f.readline() - t = line_parse(line) - # t = line.strip().split() - hclose = float(t[0]) - rclose = float(t[1]) - relax = float(t[2]) - nbpol = int(t[3]) - iprpcg = int(t[4]) - mutpcg = int(t[5]) - damp = float(t[6]) - if damp < 0.: - dampt = float(t[7]) - except ValueError: - hclose = float(line[0:10].strip()) - rclose = float(line[10:20].strip()) - relax = float(line[20:30].strip()) - nbpol = int(line[30:40].strip()) - iprpcg = int(line[40:50].strip()) - mutpcg = int(line[50:60].strip()) - damp = float(line[60:70].strip()) - if damp < 0.: - dampt = float(line[70:80].strip()) - # fixed format - else: - mxiter = int(line[0:10].strip()) - iter1 = int(line[10:20].strip()) - npcond = int(line[20:30].strip()) - try: - ihcofadd = int(line[30:40].strip()) - except: - if model.verbose: - print(' explicit ihcofadd in file') - - # dataset 2 - line = f.readline() - hclose = float(line[0:10].strip()) - rclose = float(line[10:20].strip()) - relax = float(line[20:30].strip()) - nbpol = int(line[30:40].strip()) - iprpcg = int(line[40:50].strip()) - mutpcg = int(line[50:60].strip()) - damp = float(line[60:70].strip()) - if damp < 0.: - dampt = float(line[70:80].strip()) - - if openfile: - f.close() - - # set package unit number - unitnumber = None - filenames = [None] - if ext_unit_dict is not None: - unitnumber, filenames[0] = \ - model.get_ext_dict_attr(ext_unit_dict, - filetype=ModflowPcg.ftype()) - - # create instance of pcg class - pcg = ModflowPcg(model, mxiter=mxiter, iter1=iter1, npcond=npcond, - ihcofadd=ihcofadd, hclose=hclose, rclose=rclose, - relax=relax, nbpol=nbpol, iprpcg=iprpcg, - mutpcg=mutpcg, damp=damp, dampt=dampt, - unitnumber=unitnumber, filenames=filenames) - return pcg - - @staticmethod - def ftype(): - return 'PCG' - - @staticmethod - def defaultunit(): - return 27 +""" +mfpcg module. Contains the ModflowPcg class. Note that the user can access +the ModflowPcg class as `flopy.modflow.ModflowPcg`. + +Additional information for this MODFLOW package can be found at the `Online +MODFLOW Guide +`_. + +""" +import sys + +from ..pakbase import Package +from ..utils.flopy_io import line_parse + + +class ModflowPcg(Package): + """ + MODFLOW Pcg Package Class. + + Parameters + ---------- + model : model object + The model object (of type :class:`flopy.modflow.mf.Modflow`) to which + this package will be added. + mxiter : int + maximum number of outer iterations. (default is 50) + iter1 : int + maximum number of inner iterations. (default is 30) + npcond : int + flag used to select the matrix conditioning method. (default is 1). + specify npcond = 1 for Modified Incomplete Cholesky. + specify npcond = 2 for Polynomial. + hclose : float + is the head change criterion for convergence. (default is 1e-5). + rclose : float + is the residual criterion for convergence. (default is 1e-5) + relax : float + is the relaxation parameter used with npcond = 1. (default is 1.0) + nbpol : int + is only used when npcond = 2 to indicate whether the estimate of the + upper bound on the maximum eigenvalue is 2.0, or whether the estimate + will be calculated. nbpol = 2 is used to specify the value is 2.0; + for any other value of nbpol, the estimate is calculated. Convergence + is generally insensitive to this parameter. (default is 0). + iprpcg : int + solver print out interval. (default is 0). + mutpcg : int + If mutpcg = 0, tables of maximum head change and residual will be + printed each iteration. + If mutpcg = 1, only the total number of iterations will be printed. + If mutpcg = 2, no information will be printed. + If mutpcg = 3, information will only be printed if convergence fails. + (default is 3). + damp : float + is the steady-state damping factor. (default is 1.) + dampt : float + is the transient damping factor. (default is 1.) + ihcofadd : int + is a flag that determines what happens to an active cell that is + surrounded by dry cells. (default is 0). If ihcofadd=0, cell + converts to dry regardless of HCOF value. This is the default, which + is the way PCG2 worked prior to the addition of this option. If + ihcofadd<>0, cell converts to dry only if HCOF has no head-dependent + stresses or storage terms. + extension : list string + Filename extension (default is 'pcg') + unitnumber : int + File unit number (default is None). + filenames : str or list of str + Filenames to use for the package. If filenames=None the package name + will be created using the model name and package extension. If a + single string is passed the package will be set to the string. + Default is None. + + Attributes + ---------- + + Methods + ------- + + See Also + -------- + + Notes + ----- + + Examples + -------- + + >>> import flopy + >>> m = flopy.modflow.Modflow() + >>> pcg = flopy.modflow.ModflowPcg(m) + + """ + + def __init__(self, model, mxiter=50, iter1=30, npcond=1, + hclose=1e-5, rclose=1e-5, relax=1.0, nbpol=0, iprpcg=0, + mutpcg=3, + damp=1.0, dampt=1.0, ihcofadd=0, + extension='pcg', unitnumber=None, filenames=None): + """ + Package constructor. + + """ + # set default unit number of one is not specified + if unitnumber is None: + unitnumber = ModflowPcg.defaultunit() + + # set filenames + if filenames is None: + filenames = [None] + elif isinstance(filenames, str): + filenames = [filenames] + + # Fill namefile items + name = [ModflowPcg.ftype()] + units = [unitnumber] + extra = [''] + + # set package name + fname = [filenames[0]] + + # Call ancestor's init to set self.parent, extension, name and + # unit number + Package.__init__(self, model, extension=extension, name=name, + unit_number=units, extra=extra, filenames=fname) + + # check if a valid model version has been specified + if model.version == 'mfusg': + err = 'Error: cannot use {} package with model version {}'.format( + self.name, model.version) + raise Exception(err) + + self.heading = '# {} package for '.format(self.name[0]) + \ + ' {}, '.format(model.version_types[model.version]) + \ + 'generated by Flopy.' + self.url = 'pcg.htm' + self.mxiter = mxiter + self.iter1 = iter1 + self.npcond = npcond + self.hclose = hclose + self.rclose = rclose + self.relax = relax + self.nbpol = nbpol + self.iprpcg = iprpcg + self.mutpcg = mutpcg + self.damp = damp + self.dampt = dampt + self.ihcofadd = ihcofadd + self.parent.add_package(self) + + def write_file(self): + """ + Write the package file. + + Returns + ------- + None + + """ + f = open(self.fn_path, 'w') + f.write('{}\n'.format(self.heading)) + ifrfm = self.parent.get_ifrefm() + if ifrfm: + f.write('{} '.format(self.mxiter)) + f.write('{} '.format(self.iter1)) + f.write('{} '.format(self.npcond)) + f.write('{}'.format(self.ihcofadd)) + f.write('\n') + f.write('{} '.format(self.hclose)) + f.write('{} '.format(self.rclose)) + f.write('{} '.format(self.relax)) + f.write('{} '.format(self.nbpol)) + f.write('{} '.format(self.iprpcg)) + f.write('{} '.format(self.mutpcg)) + f.write('{} '.format(self.damp)) + if self.damp < 0: + f.write('{}'.format(self.dampt)) + f.write('\n') + else: + f.write(' {0:9d}'.format(self.mxiter)) + f.write(' {0:9d}'.format(self.iter1)) + f.write(' {0:9d}'.format(self.npcond)) + f.write(' {0:9d}'.format(self.ihcofadd)) + f.write('\n') + f.write(' {0:9.3e}'.format(self.hclose)) + f.write(' {0:9.3e}'.format(self.rclose)) + f.write(' {0:9.3e}'.format(self.relax)) + f.write(' {0:9d}'.format(self.nbpol)) + f.write(' {0:9d}'.format(self.iprpcg)) + f.write(' {0:9d}'.format(self.mutpcg)) + f.write(' {0:9.3e}'.format(self.damp)) + if self.damp < 0: + f.write(' {0:9.3e}'.format(self.dampt)) + f.write('\n') + f.close() + + @staticmethod + def load(f, model, ext_unit_dict=None): + """ + Load an existing package. + + Parameters + ---------- + f : filename or file handle + File to load. + model : model object + The model object (of type :class:`flopy.modflow.mf.Modflow`) to + which this package will be added. + ext_unit_dict : dictionary, optional + If the arrays in the file are specified using EXTERNAL, + or older style array control records, then `f` should be a file + handle. In this case ext_unit_dict is required, which can be + constructed using the function + :class:`flopy.utils.mfreadnam.parsenamefile`. + + Returns + ------- + pcg : ModflowPcg object + + Examples + -------- + + >>> import flopy + >>> m = flopy.modflow.Modflow() + >>> pcg = flopy.modflow.ModflowPcg.load('test.pcg', m) + + """ + + if model.verbose: + sys.stdout.write('loading pcg package file...\n') + + openfile = not hasattr(f, 'read') + if openfile: + filename = f + f = open(filename, 'r') + + # dataset 0 -- header + while True: + line = f.readline() + if line[0] != '#': + break + # dataset 1 + ifrfm = model.get_ifrefm() + if model.version != 'mf2k': + ifrfm = True + ihcofadd = 0 + dampt = 0. + + # free format + if ifrfm: + t = line_parse(line) + # t = line.strip().split() + mxiter = int(t[0]) + iter1 = int(t[1]) + npcond = int(t[2]) + try: + ihcofadd = int(t[3]) + except: + if model.verbose: + print(' explicit ihcofadd in file') + + # dataset 2 + try: + line = f.readline() + t = line_parse(line) + # t = line.strip().split() + hclose = float(t[0]) + rclose = float(t[1]) + relax = float(t[2]) + nbpol = int(t[3]) + iprpcg = int(t[4]) + mutpcg = int(t[5]) + damp = float(t[6]) + if damp < 0.: + dampt = float(t[7]) + except ValueError: + hclose = float(line[0:10].strip()) + rclose = float(line[10:20].strip()) + relax = float(line[20:30].strip()) + nbpol = int(line[30:40].strip()) + iprpcg = int(line[40:50].strip()) + mutpcg = int(line[50:60].strip()) + damp = float(line[60:70].strip()) + if damp < 0.: + dampt = float(line[70:80].strip()) + # fixed format + else: + mxiter = int(line[0:10].strip()) + iter1 = int(line[10:20].strip()) + npcond = int(line[20:30].strip()) + try: + ihcofadd = int(line[30:40].strip()) + except: + if model.verbose: + print(' explicit ihcofadd in file') + + # dataset 2 + line = f.readline() + hclose = float(line[0:10].strip()) + rclose = float(line[10:20].strip()) + relax = float(line[20:30].strip()) + nbpol = int(line[30:40].strip()) + iprpcg = int(line[40:50].strip()) + mutpcg = int(line[50:60].strip()) + damp = float(line[60:70].strip()) + if damp < 0.: + dampt = float(line[70:80].strip()) + + if openfile: + f.close() + + # set package unit number + unitnumber = None + filenames = [None] + if ext_unit_dict is not None: + unitnumber, filenames[0] = \ + model.get_ext_dict_attr(ext_unit_dict, + filetype=ModflowPcg.ftype()) + + # create instance of pcg class + pcg = ModflowPcg(model, mxiter=mxiter, iter1=iter1, npcond=npcond, + ihcofadd=ihcofadd, hclose=hclose, rclose=rclose, + relax=relax, nbpol=nbpol, iprpcg=iprpcg, + mutpcg=mutpcg, damp=damp, dampt=dampt, + unitnumber=unitnumber, filenames=filenames) + return pcg + + @staticmethod + def ftype(): + return 'PCG' + + @staticmethod + def defaultunit(): + return 27 diff --git a/flopy/modflow/mfpcgn.py b/flopy/modflow/mfpcgn.py index 38fb5171cb..e49897cd43 100644 --- a/flopy/modflow/mfpcgn.py +++ b/flopy/modflow/mfpcgn.py @@ -1,526 +1,526 @@ -""" -mfpcgn module. Contains the ModflowPcgn class. Note that the user can access -the ModflowStr class as `flopy.modflow.ModflowPcgn`. - -Additional information for this MODFLOW package can be found at the `Online -MODFLOW Guide -`_. - -""" - -import sys -from ..pakbase import Package - - -class ModflowPcgn(Package): - """ - MODFLOW Pcgn Package Class. - - Parameters - ---------- - model : model object - The model object (of type :class:`flopy.modflow.mf.Modflow`) to which - this package will be added. - iter_mo : int - The maximum number of picard (outer) iterations allowed. For nonlinear - problems, this variable must be set to some number greater than one, - depending on the problem size and degree of nonlinearity. If iter_mo - is set to 1, then the pcgn solver assumes that the problem is linear - and the input requirements are greatly truncated. (default is 50) - iter_mi : int - maximum number of pcg (inner) iterations allowed. Generally, - this variable is set to some number greater than one, depending on - the matrix size, degree of convergence called for, and the nature of - the problem. For a nonlinear problem, iter_mi should be set large - enough that the pcg iteration converges freely with the relative - convergence parameter epsilon described in the Parameters Related - to Convergence of Inner Iteration: Line 4 subsection. - (default is 30) - close_r : float - The residual-based stopping criterion for iteration. This parameter is - used differently, depending on whether it is applied to a linear or - nonlinear problem. - - If iter_mo = 1: For a linear problem, the variant of the conjugate - gradient method outlined in algorithm 2 is employed, but uses the - absolute convergence criterion in place of the relative convergence - criterion. close_r is used as the value in the absolute convergence - criterion for quitting the pcg iterative solver. close_r is compared - to the square root of the weighted residual norm. In particular, if - the square root of the weighted residual norm is less than close_r, - then the linear Pcg iterative solve is said to have converged, - causing the pcg iteration to cease and control of the program to - pass out of the pcg solver. - - If iter_mo > 1: For a nonlinear problem, close_r is used as a criterion - for quitting the picard (outer) iteration. close_r is compared to the - square root of the inner product of the residuals (the residual norm) - as calculated on entry to the pcg solver at the beginning of every - picard iteration. if this norm is less than close_r, then the picard - iteration is considered to have converged. - close_h : float - close_h is used as an alternate stopping criterion for the picard - iteration needed to solve a nonlinear problem. The maximum value of - the head change is obtained for each picard iteration, after completion - of the inner, pcg iteration. If this maximum head change is less than - close_h, then the picard iteration is considered tentatively to have - converged. However, as nonlinear problems can demonstrate oscillation - in the head solution, the picard iteration is not declared to have - converged unless the maximum head change is less than close_h for - three picard iterations. If these picard iterations are sequential, - then a good solution is assumed to have been obtained. If the picard - iterations are not sequential, then a warning is issued advising that - the convergence is conditional and the user is urged to examine the - mass balance of the solution. - relax : float - is the relaxation parameter used with npcond = 1. (default is 1.0) - ifill : int - is the fill level of the mic preconditioner. Preconditioners with - fill levels of 0 and 1 are available (ifill = 0 and ifill = 1, - respectively). (default is 0) - unit_pc : int - is the unit number of an optional output file where progress for the - inner PCG iteration can be written. (default is 0) - unit_ts : int - is the unit number of an optional output file where the actual time in - the PCG solver is accumulated. (default is 0) - adamp : int - defines the mode of damping applied to the linear solution. In general, - damping determines how much of the head changes vector shall be applied - to the hydraulic head vector hj in picard iteration j. If adamp = 0, - Ordinary damping is employed and a constant value of damping parameter - will be used throughout the picard iteration; this option requires a - valid value for damp. If adamp = 1, Adaptive damping is employed. If - adamp = 2: Enhanced damping algorithm in which the damping value is - increased (but never decreased) provided the picard iteration is - proceeding satisfactorily. (default is 0) - damp : float - is the damping factor. (default is 1.) - damp_lb : float - is the lower bound placed on the dampening; generally, - 0 < damp_lb < damp. (default is 0.001) - rate_d : float - is a rate parameter; generally, 0 < rate_d < 1. (default is 0.1) - chglimit : float - this variable limits the maximum head change applicable to the updated - hydraulic heads in a Picard iteration. If chglimit = 0.0, then adaptive - damping proceeds without this feature. (default is 0.) - acnvg : int - defines the mode of convergence applied to the PCG solver. - (default is 0) - cnvg_lb : int - is the minimum value that the relative convergence is allowed to take - under the self-adjusting convergence option. cnvg_lb is used only in - convergence mode acnvg = 1. (default is 0.001) - mcnvg : float - increases the relative PCG convergence criteria by a power equal to - MCNVG. MCNVG is used only in convergence mode acnvg = 2. (default is 2) - rate_c : float - this option results in variable enhancement of epsilon. - If 0 < rate_c < 1, then enhanced relative convergence is allowed to - decrease by increasing epsilon(j) = epsilon(j-1) + rate_c epsilon(j-1), - where j is the Picarditeration number; this change in epsilon occurs - so long as the Picard iteration is progressing satisfactorily. If - rate_c <= 0, then the value of epsilon set by mcnvg remains unchanged - through the picard iteration. It should be emphasized that rate_c must - have a value greater than 0 for the variable enhancement to be - ffected; otherwise epsilon remains constant. rate_c is used only in - convergence mode acnvg = 2. (default is -1.) - ipunit : int - enables progress reporting for the picard iteration. If ipunit >= 0, - then a record of progress made by the picard iteration for each time - step is printed in the MODFLOW Listing file - (Harbaugh and others, 2000). This record consists of the total number - of dry cells at the end of each time step as well as the total number - of PCG iterations necessary to obtain convergence. In addition, if - ipunit > 0, then extensive diagnostics for each Picard iteration is - also written in comma-separated format to a file whose unit number - corresponds to ipunit; the name for this file, along with its unit - number and type 'data' should be entered in the modflow Name file. - If ipunit < 0 then printing of all progress concerning the Picard - iteration is suppressed, as well as information on the nature of the - convergence of the picard iteration. (default is 0) - extension : list string - Filename extension (default is 'pcgn') - unitnumber : int - File unit number (default is None). - filenames : str or list of str - Filenames to use for the package and the output files. If - filenames=None the package name will be created using the model name - and package extension and the pcgn output names will be created using - the model name and .pcgni, .pcgnt, and .pcgno extensions. If a single - string is passed the package will be set to the string and pcgn output - names will be created using the model name and pcgn output extensions. - To define the names for all package files (input and output) the - length of the list of strings should be 4. Default is None. - - Attributes - ---------- - - Methods - ------- - - See Also - -------- - - Notes - ----- - - Examples - -------- - - >>> import flopy - >>> m = flopy.modflow.Modflow() - >>> pcgn = flopy.modflow.ModflowPcgn(m) - - """ - - def __init__(self, model, iter_mo=50, iter_mi=30, close_r=1e-5, - close_h=1e-5, relax=1.0, ifill=0, unit_pc=None, unit_ts=None, - adamp=0, damp=1.0, damp_lb=0.001, rate_d=0.1, chglimit=0., - acnvg=0, cnvg_lb=0.001, mcnvg=2, rate_c=-1.0, ipunit=None, - extension='pcgn', unitnumber=None, filenames=None): - """ - Package constructor. - - """ - # set default unit number of one is not specified - if unitnumber is None: - unitnumber = ModflowPcgn.defaultunit() - - # set filenames - if filenames is None: - filenames = [None, None, None, None] - elif isinstance(filenames, str): - filenames = [filenames, None, None, None] - elif isinstance(filenames, list): - if len(filenames) < 4: - for idx in range(len(filenames), 4): - filenames.append(None) - - # update external file information with unit_pc output, if necessary - if unit_pc is not None: - fname = filenames[1] - model.add_output_file(unit_pc, fname=fname, extension='pcgni', - binflag=False, - package=ModflowPcgn.ftype()) - else: - unit_pc = 0 - - # update external file information with unit_ts output, if necessary - if unit_ts is not None: - fname = filenames[2] - model.add_output_file(unit_ts, fname=fname, extension='pcgnt', - binflag=False, - package=ModflowPcgn.ftype()) - else: - unit_ts = 0 - - # update external file information with ipunit output, if necessary - if ipunit is not None: - if ipunit > 0: - fname = filenames[3] - model.add_output_file(ipunit, fname=fname, extension='pcgno', - binflag=False, - package=ModflowPcgn.ftype()) - else: - ipunit = -1 - - name = [ModflowPcgn.ftype()] - units = [unitnumber] - extra = [''] - - # set package name - fname = [filenames[0]] - - # Call ancestor's init to set self.parent, extension, name and - # unit number - Package.__init__(self, model, extension=extension, name=name, - unit_number=units, extra=extra, filenames=fname) - - # check if a valid model version has been specified - if model.version == 'mfusg': - err = 'Error: cannot use {} package '.format(self.name) + \ - 'with model version {}'.format(model.version) - raise Exception(err) - - self.heading = '# {} package for '.format(self.name[0]) + \ - ' {}, '.format(model.version_types[model.version]) + \ - 'generated by Flopy.' - self.url = 'pcgn.htm' - self.iter_mo = iter_mo - self.iter_mi = iter_mi - self.close_h = close_h - self.close_r = close_r - self.relax = relax - self.ifill = ifill - self.unit_pc = unit_pc - self.unit_ts = unit_ts - self.adamp = adamp - self.damp = damp - self.damp_lb = damp_lb - self.rate_d = rate_d - self.chglimit = chglimit - self.acnvg = acnvg - self.cnvg_lb = cnvg_lb - self.mcnvg = mcnvg - self.rate_c = rate_c - self.ipunit = ipunit - # error trapping - if self.ifill < 0 or self.ifill > 1: - e = 'PCGN: ifill must be 0 or 1 - an ifill value of ' + \ - '{} was specified'.format(self.ifill) - raise TypeError(e) - # add package - self.parent.add_package(self) - - def write_file(self): - """ - Write the package file. - - Returns - ------- - None - - """ - # Open file for writing - f = open(self.fn_path, 'w') - f.write('{0:s}\n'.format(self.heading)) - - ifrfm = self.parent.get_ifrefm() - if ifrfm: - # dataset 1 - line = '{} '.format(self.iter_mo) - line += '{} '.format(self.iter_mi) - line += '{} '.format(self.close_r) - line += '{}\n'.format(self.close_h) - f.write(line) - - # dataset 2 - line = '{} '.format(self.relax) - line += '{} '.format(self.ifill) - line += '{} '.format(self.unit_pc) - line += '{}\n'.format(self.unit_ts) - f.write(line) - - # dataset 3 - line = '{} '.format(self.adamp) - line += '{} '.format(self.damp) - line += '{} '.format(self.damp_lb) - line += '{} '.format(self.rate_d) - line += '{}\n'.format(self.chglimit) - f.write(line) - - # dataset 4 - line = '{} '.format(self.acnvg) - line += '{} '.format(self.cnvg_lb) - line += '{} '.format(self.mcnvg) - line += '{} '.format(self.rate_c) - line += '{}\n'.format(self.ipunit) - f.write(line) - - else: - # dataset 1 - sfmt = ' {0:9d} {1:9d} {2:9.3g} {3:9.3g}\n' - line = sfmt.format(self.iter_mo, self.iter_mi, self.close_r, - self.close_h) - f.write(line) - - # dataset 2 - sfmt = ' {0:9.3g} {1:9d} {2:9d} {3:9d}\n' - line = sfmt.format(self.relax, self.ifill, self.unit_pc, - self.unit_ts) - f.write(line) - - # dataset 3 - sfmt = ' {0:9d} {1:9.3g} {2:9.3g} {3:9.3g} {4:9.3g}\n' - line = sfmt.format(self.adamp, self.damp, self.damp_lb, - self.rate_d, self.chglimit) - f.write(line) - - # dataset 4 - sfmt = ' {0:9d} {1:9.3g} {2:9d} {3:9.3g} {4:9d}\n' - line = sfmt.format(self.acnvg, self.cnvg_lb, self.mcnvg, - self.rate_c, self.ipunit) - f.write(line) - f.close() - - @staticmethod - def load(f, model, ext_unit_dict=None): - """ - Load an existing package. - - Parameters - ---------- - f : filename or file handle - File to load. - model : model object - The model object (of type :class:`flopy.modflow.mf.Modflow`) to - which this package will be added. - ext_unit_dict : dictionary, optional - If the arrays in the file are specified using EXTERNAL, - or older style array control records, then `f` should be a file - handle. In this case ext_unit_dict is required, which can be - constructed using the function - :class:`flopy.utils.mfreadnam.parsenamefile`. - - Returns - ------- - pcgn : ModflowPcgn object - - Examples - -------- - - >>> import flopy - >>> m = flopy.modflow.Modflow() - >>> pcgn = flopy.modflow.ModflowPcgn.load('test.pcgn', m) - - """ - - if model.verbose: - sys.stdout.write('loading pcgn package file...\n') - - openfile = not hasattr(f, 'read') - if openfile: - filename = f - f = open(filename, 'r') - - ifrefm = model.get_ifrefm() - # dataset 0 -- header - while True: - line = f.readline() - if line[0] != '#': - break - if ifrefm: - # dataset 1 - t = line.strip().split() - iter_mo = int(t[0]) - iter_mi = int(t[1]) - close_r = float(t[2]) - close_h = float(t[3]) - - # dataset 2 - while True: - line = f.readline() - if line[0] != '#': - break - t = line.strip().split() - relax = float(t[0]) - ifill = int(t[1]) - unit_pc = int(t[2]) - unit_ts = int(t[3]) - - # read datasets 3 and 4 for non-linear problems - if (iter_mo) > 1: - # dataset 3 - while True: - line = f.readline() - if line[0] != '#': - break - t = line.strip().split() - adamp = int(t[0]) - damp = float(t[1]) - damp_lb = float(t[2]) - rate_d = float(t[3]) - chglimit = float(t[4]) - - # dataset 4 - while True: - line = f.readline() - if line[0] != '#': - break - t = line.strip().split() - acnvg = int(t[0]) - cnvg_lb = float(t[1]) - mcnvg = int(t[2]) - rate_c = float(t[3]) - ipunit = int(t[4]) - else: - iter_mo = int(line[0:10].strip()) - iter_mi = int(line[10:20].strip()) - close_r = float(line[20:30].strip()) - close_h = float(line[30:40].strip()) - - # dataset 2 - while True: - line = f.readline() - if line[0] != '#': - break - relax = float(line[0:10].strip()) - ifill = int(line[10:20].strip()) - unit_pc = int(line[20:30].strip()) - unit_ts = int(line[30:40].strip()) - - # read datasets 3 and 4 for non-linear problems - if (iter_mo) > 1: - # dataset 3 - while True: - line = f.readline() - if line[0] != '#': - break - adamp = int(line[0:10].strip()) - damp = float(line[10:20].strip()) - damp_lb = float(line[20:30].strip()) - rate_d = float(line[30:40].strip()) - chglimit = float(line[40:50].strip()) - - # dataset 4 - while True: - line = f.readline() - if line[0] != '#': - break - acnvg = int(line[0:10].strip()) - cnvg_lb = float(line[10:20].strip()) - mcnvg = int(line[20:30].strip()) - rate_c = float(line[30:40].strip()) - ipunit = int(line[40:50].strip()) - - if iter_mo == 1: - adamp = None - damp = None - damp_lb = None - rate_d = None - chglimit = None - acnvg = None - cnvg_lb = None - mcnvg = None - rate_c = None - ipunit = None - - if openfile: - f.close() - - # determine specified unit number - unitnumber = None - filenames = [None, None, None, None] - if ext_unit_dict is not None: - unitnumber, filenames[0] = \ - model.get_ext_dict_attr(ext_unit_dict, - filetype=ModflowPcgn.ftype()) - if unit_pc > 0: - iu, filenames[1] = \ - model.get_ext_dict_attr(ext_unit_dict, unit=unit_pc) - if unit_ts > 0: - iu, filenames[2] = \ - model.get_ext_dict_attr(ext_unit_dict, unit=unit_ts) - if ipunit > 0: - iu, filenames[3] = \ - model.get_ext_dict_attr(ext_unit_dict, unit=ipunit) - - pcgn = ModflowPcgn(model, iter_mo=iter_mo, iter_mi=iter_mi, - close_r=close_r, close_h=close_h, relax=relax, - ifill=ifill, unit_pc=unit_pc, unit_ts=unit_ts, - adamp=adamp, damp=damp, damp_lb=damp_lb, - rate_d=rate_d, chglimit=chglimit, acnvg=acnvg, - cnvg_lb=cnvg_lb, mcnvg=mcnvg, rate_c=rate_c, - ipunit=ipunit, unitnumber=unitnumber, - filenames=filenames) - return pcgn - - @staticmethod - def ftype(): - return 'PCGN' - - @staticmethod - def defaultunit(): - return 27 +""" +mfpcgn module. Contains the ModflowPcgn class. Note that the user can access +the ModflowStr class as `flopy.modflow.ModflowPcgn`. + +Additional information for this MODFLOW package can be found at the `Online +MODFLOW Guide +`_. + +""" + +import sys +from ..pakbase import Package + + +class ModflowPcgn(Package): + """ + MODFLOW Pcgn Package Class. + + Parameters + ---------- + model : model object + The model object (of type :class:`flopy.modflow.mf.Modflow`) to which + this package will be added. + iter_mo : int + The maximum number of picard (outer) iterations allowed. For nonlinear + problems, this variable must be set to some number greater than one, + depending on the problem size and degree of nonlinearity. If iter_mo + is set to 1, then the pcgn solver assumes that the problem is linear + and the input requirements are greatly truncated. (default is 50) + iter_mi : int + maximum number of pcg (inner) iterations allowed. Generally, + this variable is set to some number greater than one, depending on + the matrix size, degree of convergence called for, and the nature of + the problem. For a nonlinear problem, iter_mi should be set large + enough that the pcg iteration converges freely with the relative + convergence parameter epsilon described in the Parameters Related + to Convergence of Inner Iteration: Line 4 subsection. + (default is 30) + close_r : float + The residual-based stopping criterion for iteration. This parameter is + used differently, depending on whether it is applied to a linear or + nonlinear problem. + + If iter_mo = 1: For a linear problem, the variant of the conjugate + gradient method outlined in algorithm 2 is employed, but uses the + absolute convergence criterion in place of the relative convergence + criterion. close_r is used as the value in the absolute convergence + criterion for quitting the pcg iterative solver. close_r is compared + to the square root of the weighted residual norm. In particular, if + the square root of the weighted residual norm is less than close_r, + then the linear Pcg iterative solve is said to have converged, + causing the pcg iteration to cease and control of the program to + pass out of the pcg solver. + + If iter_mo > 1: For a nonlinear problem, close_r is used as a criterion + for quitting the picard (outer) iteration. close_r is compared to the + square root of the inner product of the residuals (the residual norm) + as calculated on entry to the pcg solver at the beginning of every + picard iteration. if this norm is less than close_r, then the picard + iteration is considered to have converged. + close_h : float + close_h is used as an alternate stopping criterion for the picard + iteration needed to solve a nonlinear problem. The maximum value of + the head change is obtained for each picard iteration, after completion + of the inner, pcg iteration. If this maximum head change is less than + close_h, then the picard iteration is considered tentatively to have + converged. However, as nonlinear problems can demonstrate oscillation + in the head solution, the picard iteration is not declared to have + converged unless the maximum head change is less than close_h for + three picard iterations. If these picard iterations are sequential, + then a good solution is assumed to have been obtained. If the picard + iterations are not sequential, then a warning is issued advising that + the convergence is conditional and the user is urged to examine the + mass balance of the solution. + relax : float + is the relaxation parameter used with npcond = 1. (default is 1.0) + ifill : int + is the fill level of the mic preconditioner. Preconditioners with + fill levels of 0 and 1 are available (ifill = 0 and ifill = 1, + respectively). (default is 0) + unit_pc : int + is the unit number of an optional output file where progress for the + inner PCG iteration can be written. (default is 0) + unit_ts : int + is the unit number of an optional output file where the actual time in + the PCG solver is accumulated. (default is 0) + adamp : int + defines the mode of damping applied to the linear solution. In general, + damping determines how much of the head changes vector shall be applied + to the hydraulic head vector hj in picard iteration j. If adamp = 0, + Ordinary damping is employed and a constant value of damping parameter + will be used throughout the picard iteration; this option requires a + valid value for damp. If adamp = 1, Adaptive damping is employed. If + adamp = 2: Enhanced damping algorithm in which the damping value is + increased (but never decreased) provided the picard iteration is + proceeding satisfactorily. (default is 0) + damp : float + is the damping factor. (default is 1.) + damp_lb : float + is the lower bound placed on the dampening; generally, + 0 < damp_lb < damp. (default is 0.001) + rate_d : float + is a rate parameter; generally, 0 < rate_d < 1. (default is 0.1) + chglimit : float + this variable limits the maximum head change applicable to the updated + hydraulic heads in a Picard iteration. If chglimit = 0.0, then adaptive + damping proceeds without this feature. (default is 0.) + acnvg : int + defines the mode of convergence applied to the PCG solver. + (default is 0) + cnvg_lb : int + is the minimum value that the relative convergence is allowed to take + under the self-adjusting convergence option. cnvg_lb is used only in + convergence mode acnvg = 1. (default is 0.001) + mcnvg : float + increases the relative PCG convergence criteria by a power equal to + MCNVG. MCNVG is used only in convergence mode acnvg = 2. (default is 2) + rate_c : float + this option results in variable enhancement of epsilon. + If 0 < rate_c < 1, then enhanced relative convergence is allowed to + decrease by increasing epsilon(j) = epsilon(j-1) + rate_c epsilon(j-1), + where j is the Picarditeration number; this change in epsilon occurs + so long as the Picard iteration is progressing satisfactorily. If + rate_c <= 0, then the value of epsilon set by mcnvg remains unchanged + through the picard iteration. It should be emphasized that rate_c must + have a value greater than 0 for the variable enhancement to be + ffected; otherwise epsilon remains constant. rate_c is used only in + convergence mode acnvg = 2. (default is -1.) + ipunit : int + enables progress reporting for the picard iteration. If ipunit >= 0, + then a record of progress made by the picard iteration for each time + step is printed in the MODFLOW Listing file + (Harbaugh and others, 2000). This record consists of the total number + of dry cells at the end of each time step as well as the total number + of PCG iterations necessary to obtain convergence. In addition, if + ipunit > 0, then extensive diagnostics for each Picard iteration is + also written in comma-separated format to a file whose unit number + corresponds to ipunit; the name for this file, along with its unit + number and type 'data' should be entered in the modflow Name file. + If ipunit < 0 then printing of all progress concerning the Picard + iteration is suppressed, as well as information on the nature of the + convergence of the picard iteration. (default is 0) + extension : list string + Filename extension (default is 'pcgn') + unitnumber : int + File unit number (default is None). + filenames : str or list of str + Filenames to use for the package and the output files. If + filenames=None the package name will be created using the model name + and package extension and the pcgn output names will be created using + the model name and .pcgni, .pcgnt, and .pcgno extensions. If a single + string is passed the package will be set to the string and pcgn output + names will be created using the model name and pcgn output extensions. + To define the names for all package files (input and output) the + length of the list of strings should be 4. Default is None. + + Attributes + ---------- + + Methods + ------- + + See Also + -------- + + Notes + ----- + + Examples + -------- + + >>> import flopy + >>> m = flopy.modflow.Modflow() + >>> pcgn = flopy.modflow.ModflowPcgn(m) + + """ + + def __init__(self, model, iter_mo=50, iter_mi=30, close_r=1e-5, + close_h=1e-5, relax=1.0, ifill=0, unit_pc=None, unit_ts=None, + adamp=0, damp=1.0, damp_lb=0.001, rate_d=0.1, chglimit=0., + acnvg=0, cnvg_lb=0.001, mcnvg=2, rate_c=-1.0, ipunit=None, + extension='pcgn', unitnumber=None, filenames=None): + """ + Package constructor. + + """ + # set default unit number of one is not specified + if unitnumber is None: + unitnumber = ModflowPcgn.defaultunit() + + # set filenames + if filenames is None: + filenames = [None, None, None, None] + elif isinstance(filenames, str): + filenames = [filenames, None, None, None] + elif isinstance(filenames, list): + if len(filenames) < 4: + for idx in range(len(filenames), 4): + filenames.append(None) + + # update external file information with unit_pc output, if necessary + if unit_pc is not None: + fname = filenames[1] + model.add_output_file(unit_pc, fname=fname, extension='pcgni', + binflag=False, + package=ModflowPcgn.ftype()) + else: + unit_pc = 0 + + # update external file information with unit_ts output, if necessary + if unit_ts is not None: + fname = filenames[2] + model.add_output_file(unit_ts, fname=fname, extension='pcgnt', + binflag=False, + package=ModflowPcgn.ftype()) + else: + unit_ts = 0 + + # update external file information with ipunit output, if necessary + if ipunit is not None: + if ipunit > 0: + fname = filenames[3] + model.add_output_file(ipunit, fname=fname, extension='pcgno', + binflag=False, + package=ModflowPcgn.ftype()) + else: + ipunit = -1 + + name = [ModflowPcgn.ftype()] + units = [unitnumber] + extra = [''] + + # set package name + fname = [filenames[0]] + + # Call ancestor's init to set self.parent, extension, name and + # unit number + Package.__init__(self, model, extension=extension, name=name, + unit_number=units, extra=extra, filenames=fname) + + # check if a valid model version has been specified + if model.version == 'mfusg': + err = 'Error: cannot use {} package '.format(self.name) + \ + 'with model version {}'.format(model.version) + raise Exception(err) + + self.heading = '# {} package for '.format(self.name[0]) + \ + ' {}, '.format(model.version_types[model.version]) + \ + 'generated by Flopy.' + self.url = 'pcgn.htm' + self.iter_mo = iter_mo + self.iter_mi = iter_mi + self.close_h = close_h + self.close_r = close_r + self.relax = relax + self.ifill = ifill + self.unit_pc = unit_pc + self.unit_ts = unit_ts + self.adamp = adamp + self.damp = damp + self.damp_lb = damp_lb + self.rate_d = rate_d + self.chglimit = chglimit + self.acnvg = acnvg + self.cnvg_lb = cnvg_lb + self.mcnvg = mcnvg + self.rate_c = rate_c + self.ipunit = ipunit + # error trapping + if self.ifill < 0 or self.ifill > 1: + e = 'PCGN: ifill must be 0 or 1 - an ifill value of ' + \ + '{} was specified'.format(self.ifill) + raise TypeError(e) + # add package + self.parent.add_package(self) + + def write_file(self): + """ + Write the package file. + + Returns + ------- + None + + """ + # Open file for writing + f = open(self.fn_path, 'w') + f.write('{0:s}\n'.format(self.heading)) + + ifrfm = self.parent.get_ifrefm() + if ifrfm: + # dataset 1 + line = '{} '.format(self.iter_mo) + line += '{} '.format(self.iter_mi) + line += '{} '.format(self.close_r) + line += '{}\n'.format(self.close_h) + f.write(line) + + # dataset 2 + line = '{} '.format(self.relax) + line += '{} '.format(self.ifill) + line += '{} '.format(self.unit_pc) + line += '{}\n'.format(self.unit_ts) + f.write(line) + + # dataset 3 + line = '{} '.format(self.adamp) + line += '{} '.format(self.damp) + line += '{} '.format(self.damp_lb) + line += '{} '.format(self.rate_d) + line += '{}\n'.format(self.chglimit) + f.write(line) + + # dataset 4 + line = '{} '.format(self.acnvg) + line += '{} '.format(self.cnvg_lb) + line += '{} '.format(self.mcnvg) + line += '{} '.format(self.rate_c) + line += '{}\n'.format(self.ipunit) + f.write(line) + + else: + # dataset 1 + sfmt = ' {0:9d} {1:9d} {2:9.3g} {3:9.3g}\n' + line = sfmt.format(self.iter_mo, self.iter_mi, self.close_r, + self.close_h) + f.write(line) + + # dataset 2 + sfmt = ' {0:9.3g} {1:9d} {2:9d} {3:9d}\n' + line = sfmt.format(self.relax, self.ifill, self.unit_pc, + self.unit_ts) + f.write(line) + + # dataset 3 + sfmt = ' {0:9d} {1:9.3g} {2:9.3g} {3:9.3g} {4:9.3g}\n' + line = sfmt.format(self.adamp, self.damp, self.damp_lb, + self.rate_d, self.chglimit) + f.write(line) + + # dataset 4 + sfmt = ' {0:9d} {1:9.3g} {2:9d} {3:9.3g} {4:9d}\n' + line = sfmt.format(self.acnvg, self.cnvg_lb, self.mcnvg, + self.rate_c, self.ipunit) + f.write(line) + f.close() + + @staticmethod + def load(f, model, ext_unit_dict=None): + """ + Load an existing package. + + Parameters + ---------- + f : filename or file handle + File to load. + model : model object + The model object (of type :class:`flopy.modflow.mf.Modflow`) to + which this package will be added. + ext_unit_dict : dictionary, optional + If the arrays in the file are specified using EXTERNAL, + or older style array control records, then `f` should be a file + handle. In this case ext_unit_dict is required, which can be + constructed using the function + :class:`flopy.utils.mfreadnam.parsenamefile`. + + Returns + ------- + pcgn : ModflowPcgn object + + Examples + -------- + + >>> import flopy + >>> m = flopy.modflow.Modflow() + >>> pcgn = flopy.modflow.ModflowPcgn.load('test.pcgn', m) + + """ + + if model.verbose: + sys.stdout.write('loading pcgn package file...\n') + + openfile = not hasattr(f, 'read') + if openfile: + filename = f + f = open(filename, 'r') + + ifrefm = model.get_ifrefm() + # dataset 0 -- header + while True: + line = f.readline() + if line[0] != '#': + break + if ifrefm: + # dataset 1 + t = line.strip().split() + iter_mo = int(t[0]) + iter_mi = int(t[1]) + close_r = float(t[2]) + close_h = float(t[3]) + + # dataset 2 + while True: + line = f.readline() + if line[0] != '#': + break + t = line.strip().split() + relax = float(t[0]) + ifill = int(t[1]) + unit_pc = int(t[2]) + unit_ts = int(t[3]) + + # read datasets 3 and 4 for non-linear problems + if (iter_mo) > 1: + # dataset 3 + while True: + line = f.readline() + if line[0] != '#': + break + t = line.strip().split() + adamp = int(t[0]) + damp = float(t[1]) + damp_lb = float(t[2]) + rate_d = float(t[3]) + chglimit = float(t[4]) + + # dataset 4 + while True: + line = f.readline() + if line[0] != '#': + break + t = line.strip().split() + acnvg = int(t[0]) + cnvg_lb = float(t[1]) + mcnvg = int(t[2]) + rate_c = float(t[3]) + ipunit = int(t[4]) + else: + iter_mo = int(line[0:10].strip()) + iter_mi = int(line[10:20].strip()) + close_r = float(line[20:30].strip()) + close_h = float(line[30:40].strip()) + + # dataset 2 + while True: + line = f.readline() + if line[0] != '#': + break + relax = float(line[0:10].strip()) + ifill = int(line[10:20].strip()) + unit_pc = int(line[20:30].strip()) + unit_ts = int(line[30:40].strip()) + + # read datasets 3 and 4 for non-linear problems + if (iter_mo) > 1: + # dataset 3 + while True: + line = f.readline() + if line[0] != '#': + break + adamp = int(line[0:10].strip()) + damp = float(line[10:20].strip()) + damp_lb = float(line[20:30].strip()) + rate_d = float(line[30:40].strip()) + chglimit = float(line[40:50].strip()) + + # dataset 4 + while True: + line = f.readline() + if line[0] != '#': + break + acnvg = int(line[0:10].strip()) + cnvg_lb = float(line[10:20].strip()) + mcnvg = int(line[20:30].strip()) + rate_c = float(line[30:40].strip()) + ipunit = int(line[40:50].strip()) + + if iter_mo == 1: + adamp = None + damp = None + damp_lb = None + rate_d = None + chglimit = None + acnvg = None + cnvg_lb = None + mcnvg = None + rate_c = None + ipunit = None + + if openfile: + f.close() + + # determine specified unit number + unitnumber = None + filenames = [None, None, None, None] + if ext_unit_dict is not None: + unitnumber, filenames[0] = \ + model.get_ext_dict_attr(ext_unit_dict, + filetype=ModflowPcgn.ftype()) + if unit_pc > 0: + iu, filenames[1] = \ + model.get_ext_dict_attr(ext_unit_dict, unit=unit_pc) + if unit_ts > 0: + iu, filenames[2] = \ + model.get_ext_dict_attr(ext_unit_dict, unit=unit_ts) + if ipunit > 0: + iu, filenames[3] = \ + model.get_ext_dict_attr(ext_unit_dict, unit=ipunit) + + pcgn = ModflowPcgn(model, iter_mo=iter_mo, iter_mi=iter_mi, + close_r=close_r, close_h=close_h, relax=relax, + ifill=ifill, unit_pc=unit_pc, unit_ts=unit_ts, + adamp=adamp, damp=damp, damp_lb=damp_lb, + rate_d=rate_d, chglimit=chglimit, acnvg=acnvg, + cnvg_lb=cnvg_lb, mcnvg=mcnvg, rate_c=rate_c, + ipunit=ipunit, unitnumber=unitnumber, + filenames=filenames) + return pcgn + + @staticmethod + def ftype(): + return 'PCGN' + + @staticmethod + def defaultunit(): + return 27 diff --git a/flopy/modflow/mfpks.py b/flopy/modflow/mfpks.py index eb9b2a0cf1..78e51ccce2 100644 --- a/flopy/modflow/mfpks.py +++ b/flopy/modflow/mfpks.py @@ -1,265 +1,265 @@ -""" -mfpks module. Contains the ModflowPks class. Note that the user can access -the ModflowPks class as `flopy.modflow.ModflowPks`. - -""" -import sys -from ..pakbase import Package - - -class ModflowPks(Package): - """ - MODFLOW Pks Package Class. - - Parameters - ---------- - model : model object - The model object (of type :class:`flopy.modflow.mf.Modflow`) to which - this package will be added. - mxiter : int - maximum number of outer iterations. (default is 100) - innerit : int - maximum number of inner iterations. (default is 30) - hclose : float - is the head change criterion for convergence. (default is 1.e-3). - rclose : float - is the residual criterion for convergence. (default is 1.e-1) - relax : float - is the relaxation parameter used with npcond = 1. (default is 1.0) - . - . - . - iprpks : int - solver print out interval. (default is 0). - mutpks : int - If mutpcg = 0, tables of maximum head change and residual will be - printed each iteration. - If mutpcg = 1, only the total number of iterations will be printed. - If mutpcg = 2, no information will be printed. - If mutpcg = 3, information will only be printed if convergence fails. - (default is 3). - damp : float - is the steady-state damping factor. (default is 1.) - dampt : float - is the transient damping factor. (default is 1.) - extension : list string - Filename extension (default is 'pks') - unitnumber : int - File unit number (default is 27). - filenames : str or list of str - Filenames to use for the package. If filenames=None the package name - will be created using the model name and package extension. If a - single string is passed the package will be set to the string. - Default is None. - - Attributes - ---------- - - Methods - ------- - - See Also - -------- - - Notes - ----- - - Examples - -------- - - >>> import flopy - >>> m = flopy.modflow.Modflow() - >>> pks = flopy.modflow.ModflowPks(m) - - """ - - def __init__(self, model, mxiter=100, innerit=50, - isolver=1, npc=2, iscl=0, iord=0, ncoresm=1, ncoresv=1, - damp=1.0, dampt=1.0, relax=0.97, - ifill=0, droptol=0.0, - hclose=1e-3, rclose=1e-1, l2norm=None, - iprpks=0, mutpks=3, - mpi=False, partopt=0, novlapimpsol=1, stenimpsol=2, verbose=0, - partdata=None, - extension='pks', unitnumber=None, filenames=None): - """ - Package constructor. - - """ - # set default unit number of one is not specified - if unitnumber is None: - unitnumber = ModflowPks.defaultunit() - - # set filenames - if filenames is None: - filenames = [None] - elif isinstance(filenames, str): - filenames = [filenames] - - # Fill namefile items - name = [ModflowPks.ftype()] - units = [unitnumber] - extra = [''] - - # set package name - fname = [filenames[0]] - - # Call ancestor's init to set self.parent, extension, name and unit number - Package.__init__(self, model, extension=extension, name=name, - unit_number=units, extra=extra, filenames=fname) - # check if a valid model version has been specified - if model.version == 'mf2k' or model.version == 'mfnwt': - err = 'Error: cannot use {} package with model version {}'.format( - self.name, model.version) - raise Exception(err) - - self.heading = '# {} package for '.format(self.name[0]) + \ - ' {}, '.format(model.version_types[model.version]) + \ - 'generated by Flopy.' - self.url = 'pks.htm' - self.mxiter = mxiter - self.innerit = innerit - self.isolver = isolver - self.npc = npc - self.iscl = iscl - self.iord = iord - self.ncoresm = ncoresm - self.ncoresv = ncoresv - self.damp = damp - self.dampt = dampt - self.relax = relax - self.ifill = ifill - self.droptol = droptol - self.hclose = hclose - self.rclose = rclose - self.l2norm = l2norm - self.iprpks = iprpks - self.mutpks = mutpks - # MPI - self.mpi = mpi - self.partopt = partopt - self.novlapimpsol = novlapimpsol - self.stenimpsol = stenimpsol - self.verbose = verbose - self.partdata = partdata - - self.parent.add_package(self) - - def write_file(self): - """ - Write the package file. - - Returns - ------- - None - - """ - # Open file for writing - f = open(self.fn_path, 'w') - f.write('%s\n' % self.heading) - f.write('MXITER {0}\n'.format(self.mxiter)) - f.write('INNERIT {0}\n'.format(self.innerit)) - f.write('ISOLVER {0}\n'.format(self.isolver)) - f.write('NPC {0}\n'.format(self.npc)) - f.write('ISCL {0}\n'.format(self.iscl)) - f.write('IORD {0}\n'.format(self.iord)) - if self.ncoresm > 1: - f.write('NCORESM {0}\n'.format(self.ncoresm)) - if self.ncoresv > 1: - f.write('NCORESV {0}\n'.format(self.ncoresv)) - f.write('DAMP {0}\n'.format(self.damp)) - f.write('DAMPT {0}\n'.format(self.dampt)) - if self.npc > 0: - f.write('RELAX {0}\n'.format(self.relax)) - if self.npc == 3: - f.write('IFILL {0}\n'.format(self.ifill)) - f.write('DROPTOL {0}\n'.format(self.droptol)) - f.write('HCLOSEPKS {0}\n'.format(self.hclose)) - f.write('RCLOSEPKS {0}\n'.format(self.rclose)) - if self.l2norm != None: - if self.l2norm.lower() == 'l2norm' or self.l2norm == '1': - f.write('L2NORM\n') - elif self.l2norm.lower() == 'rl2norm' or self.l2norm == '2': - f.write('RELATIVE-L2NORM\n') - f.write('IPRPKS {0}\n'.format(self.iprpks)) - f.write('MUTPKS {0}\n'.format(self.mutpks)) - # MPI - if self.mpi: - f.write('PARTOPT {0}\n'.format(self.partopt)) - f.write('NOVLAPIMPSOL {0}\n'.format(self.novlapimpsol)) - f.write('STENIMPSOL {0}\n'.format(self.stenimpsol)) - f.write('VERBOSE {0}\n'.format(self.verbose)) - if self.partopt == 1 | 2: - pass - # to be implemented - - f.write('END\n') - f.close() - - @staticmethod - def load(f, model, ext_unit_dict=None): - """ - Load an existing package. - - Parameters - ---------- - f : filename or file handle - File to load. - model : model object - The model object (of type :class:`flopy.modflow.mf.Modflow`) to - which this package will be added. - ext_unit_dict : dictionary, optional - If the arrays in the file are specified using EXTERNAL, - or older style array control records, then `f` should be a file - handle. In this case ext_unit_dict is required, which can be - constructed using the function - :class:`flopy.utils.mfreadnam.parsenamefile`. - - Returns - ------- - pks : ModflowPks object - - Examples - -------- - - >>> import flopy - >>> m = flopy.modflow.Modflow() - >>> pks = flopy.modflow.ModflowPks.load('test.pks', m) - - """ - - if model.verbose: - sys.stdout.write('loading pks package file...\n') - - openfile = not hasattr(f, 'read') - if openfile: - filename = f - f = open(filename, 'r') - - # dataset 0 -- header - - msg = 3 * ' ' + \ - 'Warning: load method not completed. default pks object created.' - print(msg) - - if openfile: - f.close() - - # set package unit number - unitnumber = None - filenames = [None] - if ext_unit_dict is not None: - unitnumber, filenames[0] = \ - model.get_ext_dict_attr(ext_unit_dict, - filetype=ModflowPks.ftype()) - - pks = ModflowPks(model, unitnumber=unitnumber, filenames=filenames) - return pks - - @staticmethod - def ftype(): - return 'PKS' - - @staticmethod - def defaultunit(): - return 27 +""" +mfpks module. Contains the ModflowPks class. Note that the user can access +the ModflowPks class as `flopy.modflow.ModflowPks`. + +""" +import sys +from ..pakbase import Package + + +class ModflowPks(Package): + """ + MODFLOW Pks Package Class. + + Parameters + ---------- + model : model object + The model object (of type :class:`flopy.modflow.mf.Modflow`) to which + this package will be added. + mxiter : int + maximum number of outer iterations. (default is 100) + innerit : int + maximum number of inner iterations. (default is 30) + hclose : float + is the head change criterion for convergence. (default is 1.e-3). + rclose : float + is the residual criterion for convergence. (default is 1.e-1) + relax : float + is the relaxation parameter used with npcond = 1. (default is 1.0) + . + . + . + iprpks : int + solver print out interval. (default is 0). + mutpks : int + If mutpcg = 0, tables of maximum head change and residual will be + printed each iteration. + If mutpcg = 1, only the total number of iterations will be printed. + If mutpcg = 2, no information will be printed. + If mutpcg = 3, information will only be printed if convergence fails. + (default is 3). + damp : float + is the steady-state damping factor. (default is 1.) + dampt : float + is the transient damping factor. (default is 1.) + extension : list string + Filename extension (default is 'pks') + unitnumber : int + File unit number (default is 27). + filenames : str or list of str + Filenames to use for the package. If filenames=None the package name + will be created using the model name and package extension. If a + single string is passed the package will be set to the string. + Default is None. + + Attributes + ---------- + + Methods + ------- + + See Also + -------- + + Notes + ----- + + Examples + -------- + + >>> import flopy + >>> m = flopy.modflow.Modflow() + >>> pks = flopy.modflow.ModflowPks(m) + + """ + + def __init__(self, model, mxiter=100, innerit=50, + isolver=1, npc=2, iscl=0, iord=0, ncoresm=1, ncoresv=1, + damp=1.0, dampt=1.0, relax=0.97, + ifill=0, droptol=0.0, + hclose=1e-3, rclose=1e-1, l2norm=None, + iprpks=0, mutpks=3, + mpi=False, partopt=0, novlapimpsol=1, stenimpsol=2, verbose=0, + partdata=None, + extension='pks', unitnumber=None, filenames=None): + """ + Package constructor. + + """ + # set default unit number of one is not specified + if unitnumber is None: + unitnumber = ModflowPks.defaultunit() + + # set filenames + if filenames is None: + filenames = [None] + elif isinstance(filenames, str): + filenames = [filenames] + + # Fill namefile items + name = [ModflowPks.ftype()] + units = [unitnumber] + extra = [''] + + # set package name + fname = [filenames[0]] + + # Call ancestor's init to set self.parent, extension, name and unit number + Package.__init__(self, model, extension=extension, name=name, + unit_number=units, extra=extra, filenames=fname) + # check if a valid model version has been specified + if model.version == 'mf2k' or model.version == 'mfnwt': + err = 'Error: cannot use {} package with model version {}'.format( + self.name, model.version) + raise Exception(err) + + self.heading = '# {} package for '.format(self.name[0]) + \ + ' {}, '.format(model.version_types[model.version]) + \ + 'generated by Flopy.' + self.url = 'pks.htm' + self.mxiter = mxiter + self.innerit = innerit + self.isolver = isolver + self.npc = npc + self.iscl = iscl + self.iord = iord + self.ncoresm = ncoresm + self.ncoresv = ncoresv + self.damp = damp + self.dampt = dampt + self.relax = relax + self.ifill = ifill + self.droptol = droptol + self.hclose = hclose + self.rclose = rclose + self.l2norm = l2norm + self.iprpks = iprpks + self.mutpks = mutpks + # MPI + self.mpi = mpi + self.partopt = partopt + self.novlapimpsol = novlapimpsol + self.stenimpsol = stenimpsol + self.verbose = verbose + self.partdata = partdata + + self.parent.add_package(self) + + def write_file(self): + """ + Write the package file. + + Returns + ------- + None + + """ + # Open file for writing + f = open(self.fn_path, 'w') + f.write('%s\n' % self.heading) + f.write('MXITER {0}\n'.format(self.mxiter)) + f.write('INNERIT {0}\n'.format(self.innerit)) + f.write('ISOLVER {0}\n'.format(self.isolver)) + f.write('NPC {0}\n'.format(self.npc)) + f.write('ISCL {0}\n'.format(self.iscl)) + f.write('IORD {0}\n'.format(self.iord)) + if self.ncoresm > 1: + f.write('NCORESM {0}\n'.format(self.ncoresm)) + if self.ncoresv > 1: + f.write('NCORESV {0}\n'.format(self.ncoresv)) + f.write('DAMP {0}\n'.format(self.damp)) + f.write('DAMPT {0}\n'.format(self.dampt)) + if self.npc > 0: + f.write('RELAX {0}\n'.format(self.relax)) + if self.npc == 3: + f.write('IFILL {0}\n'.format(self.ifill)) + f.write('DROPTOL {0}\n'.format(self.droptol)) + f.write('HCLOSEPKS {0}\n'.format(self.hclose)) + f.write('RCLOSEPKS {0}\n'.format(self.rclose)) + if self.l2norm != None: + if self.l2norm.lower() == 'l2norm' or self.l2norm == '1': + f.write('L2NORM\n') + elif self.l2norm.lower() == 'rl2norm' or self.l2norm == '2': + f.write('RELATIVE-L2NORM\n') + f.write('IPRPKS {0}\n'.format(self.iprpks)) + f.write('MUTPKS {0}\n'.format(self.mutpks)) + # MPI + if self.mpi: + f.write('PARTOPT {0}\n'.format(self.partopt)) + f.write('NOVLAPIMPSOL {0}\n'.format(self.novlapimpsol)) + f.write('STENIMPSOL {0}\n'.format(self.stenimpsol)) + f.write('VERBOSE {0}\n'.format(self.verbose)) + if self.partopt == 1 | 2: + pass + # to be implemented + + f.write('END\n') + f.close() + + @staticmethod + def load(f, model, ext_unit_dict=None): + """ + Load an existing package. + + Parameters + ---------- + f : filename or file handle + File to load. + model : model object + The model object (of type :class:`flopy.modflow.mf.Modflow`) to + which this package will be added. + ext_unit_dict : dictionary, optional + If the arrays in the file are specified using EXTERNAL, + or older style array control records, then `f` should be a file + handle. In this case ext_unit_dict is required, which can be + constructed using the function + :class:`flopy.utils.mfreadnam.parsenamefile`. + + Returns + ------- + pks : ModflowPks object + + Examples + -------- + + >>> import flopy + >>> m = flopy.modflow.Modflow() + >>> pks = flopy.modflow.ModflowPks.load('test.pks', m) + + """ + + if model.verbose: + sys.stdout.write('loading pks package file...\n') + + openfile = not hasattr(f, 'read') + if openfile: + filename = f + f = open(filename, 'r') + + # dataset 0 -- header + + msg = 3 * ' ' + \ + 'Warning: load method not completed. default pks object created.' + print(msg) + + if openfile: + f.close() + + # set package unit number + unitnumber = None + filenames = [None] + if ext_unit_dict is not None: + unitnumber, filenames[0] = \ + model.get_ext_dict_attr(ext_unit_dict, + filetype=ModflowPks.ftype()) + + pks = ModflowPks(model, unitnumber=unitnumber, filenames=filenames) + return pks + + @staticmethod + def ftype(): + return 'PKS' + + @staticmethod + def defaultunit(): + return 27 diff --git a/flopy/modflow/mfpval.py b/flopy/modflow/mfpval.py index d6acee15c1..28057a2b3b 100644 --- a/flopy/modflow/mfpval.py +++ b/flopy/modflow/mfpval.py @@ -1,216 +1,216 @@ -""" -mfpval module. Contains the ModflowPval class. Note that the user can access -the ModflowPval class as `flopy.modflow.ModflowPval`. - -Additional information for this MODFLOW package can be found at the `Online -MODFLOW Guide -`_. - -""" -import sys -from ..pakbase import Package - - -class ModflowPval(Package): - """ - MODFLOW Mult Package Class. - - Parameters - ---------- - model : model object - The model object (of type :class:`flopy.modflow.mf.Modflow`) to which - this package will be added. - pval_dict : dict - Dictionary with pval data for the model. pval_dict is typically - instantiated using load method. - extension : string - Filename extension (default is 'pval') - unitnumber : int - File unit number (default is None). - filenames : str or list of str - Filenames to use for the package. If filenames=None the package name - will be created using the model name and package extension. If a - single string is passed the package will be set to the string. - Default is None. - - - Attributes - ---------- - - Methods - ------- - - See Also - -------- - - Notes - ----- - Parameters are supported in Flopy only when reading in existing models. - Parameter values are converted to native values in Flopy and the - connection to "parameters" is thus nonexistent. - - Examples - -------- - - >>> import flopy - >>> m = flopy.modflow.Modflow() - >>> pval_dict = flopy.modflow.ModflowZon(m, pval_dict=pval_dict) - - """ - - def __init__(self, model, pval_dict=None, - extension='pval', unitnumber=None, filenames=None): - """ - Package constructor. - - """ - # set default unit number of one is not specified - if unitnumber is None: - unitnumber = ModflowPval.defaultunit() - - # set filenames - if filenames is None: - filenames = [None] - elif isinstance(filenames, str): - filenames = [filenames] - - # Fill namefile items - name = [ModflowPval.ftype()] - units = [unitnumber] - extra = [''] - - # set package name - fname = [filenames[0]] - - # Call ancestor's init to set self.parent, extension, name and - # unit number - Package.__init__(self, model, extension=extension, name=name, - unit_number=units, extra=extra, filenames=fname) - - self.heading = '# {} package for '.format(self.name[0]) + \ - ' {}, '.format(model.version_types[model.version]) + \ - 'generated by Flopy.' - self.url = 'parameter_value_file.htm' - - self.npval = 0 - if pval_dict is not None: - self.pval = len(pval_dict) - self.pval_dict = pval_dict - self.parent.add_package(self) - - def write_file(self): - """ - Write the package file. - - Returns - ------- - None - - Notes - ----- - Not implemented because parameters are only supported on load - - """ - pass - - def __getitem__(self, item): - """ - overload __getitem__ to return a value from the pval_dict - - """ - - if item in list(self.pval_dict.keys()): - return self.pval_dict[item] - else: - return None - - @staticmethod - def load(f, model, ext_unit_dict=None): - """ - Load an existing package. - - Parameters - ---------- - f : filename or file handle - File to load. - model : model object - The model object (of type :class:`flopy.modflow.mf.Modflow`) to - which this package will be added. - ext_unit_dict : dictionary, optional - If the arrays in the file are specified using EXTERNAL, - or older style array control records, then `f` should be a file - handle. In this case ext_unit_dict is required, which can be - constructed using the function - :class:`flopy.utils.mfreadnam.parsenamefile`. - - Returns - ------- - pval : ModflowPval dict - - Examples - -------- - - >>> import flopy - >>> m = flopy.modflow.Modflow() - >>> mlt = flopy.modflow.ModflowPval.load('test.pval', m) - - """ - - if model.verbose: - sys.stdout.write('loading pval package file...\n') - - openfile = not hasattr(f, 'read') - if openfile: - filename = f - f = open(filename, 'r') - else: - filename = f.name - - # dataset 0 -- header - while True: - line = f.readline() - if line[0] != '#': - break - # dataset 1 - t = line.strip().split() - npval = int(t[0]) - - if model.verbose: - sys.stdout.write( - ' reading parameter values from "{:<10s}"\n'.format( - filename)) - - # read PVAL data - pval_dict = dict() - for n in range(npval): - line = f.readline() - t = line.strip().split() - if len(t[0]) > 10: - pvalnam = t[0][0:10].lower() - else: - pvalnam = t[0].lower() - - pval_dict[pvalnam] = float(t[1]) - - if openfile: - f.close() - - # set package unit number - unitnumber = None - filenames = [None] - if ext_unit_dict is not None: - unitnumber, filenames[0] = \ - model.get_ext_dict_attr(ext_unit_dict, - filetype=ModflowPval.ftype()) - - pval = ModflowPval(model, pval_dict=pval_dict, - unitnumber=unitnumber, filenames=filenames) - return pval - - @staticmethod - def ftype(): - return 'PVAL' - - @staticmethod - def defaultunit(): - return 1005 +""" +mfpval module. Contains the ModflowPval class. Note that the user can access +the ModflowPval class as `flopy.modflow.ModflowPval`. + +Additional information for this MODFLOW package can be found at the `Online +MODFLOW Guide +`_. + +""" +import sys +from ..pakbase import Package + + +class ModflowPval(Package): + """ + MODFLOW Mult Package Class. + + Parameters + ---------- + model : model object + The model object (of type :class:`flopy.modflow.mf.Modflow`) to which + this package will be added. + pval_dict : dict + Dictionary with pval data for the model. pval_dict is typically + instantiated using load method. + extension : string + Filename extension (default is 'pval') + unitnumber : int + File unit number (default is None). + filenames : str or list of str + Filenames to use for the package. If filenames=None the package name + will be created using the model name and package extension. If a + single string is passed the package will be set to the string. + Default is None. + + + Attributes + ---------- + + Methods + ------- + + See Also + -------- + + Notes + ----- + Parameters are supported in Flopy only when reading in existing models. + Parameter values are converted to native values in Flopy and the + connection to "parameters" is thus nonexistent. + + Examples + -------- + + >>> import flopy + >>> m = flopy.modflow.Modflow() + >>> pval_dict = flopy.modflow.ModflowZon(m, pval_dict=pval_dict) + + """ + + def __init__(self, model, pval_dict=None, + extension='pval', unitnumber=None, filenames=None): + """ + Package constructor. + + """ + # set default unit number of one is not specified + if unitnumber is None: + unitnumber = ModflowPval.defaultunit() + + # set filenames + if filenames is None: + filenames = [None] + elif isinstance(filenames, str): + filenames = [filenames] + + # Fill namefile items + name = [ModflowPval.ftype()] + units = [unitnumber] + extra = [''] + + # set package name + fname = [filenames[0]] + + # Call ancestor's init to set self.parent, extension, name and + # unit number + Package.__init__(self, model, extension=extension, name=name, + unit_number=units, extra=extra, filenames=fname) + + self.heading = '# {} package for '.format(self.name[0]) + \ + ' {}, '.format(model.version_types[model.version]) + \ + 'generated by Flopy.' + self.url = 'parameter_value_file.htm' + + self.npval = 0 + if pval_dict is not None: + self.pval = len(pval_dict) + self.pval_dict = pval_dict + self.parent.add_package(self) + + def write_file(self): + """ + Write the package file. + + Returns + ------- + None + + Notes + ----- + Not implemented because parameters are only supported on load + + """ + pass + + def __getitem__(self, item): + """ + overload __getitem__ to return a value from the pval_dict + + """ + + if item in list(self.pval_dict.keys()): + return self.pval_dict[item] + else: + return None + + @staticmethod + def load(f, model, ext_unit_dict=None): + """ + Load an existing package. + + Parameters + ---------- + f : filename or file handle + File to load. + model : model object + The model object (of type :class:`flopy.modflow.mf.Modflow`) to + which this package will be added. + ext_unit_dict : dictionary, optional + If the arrays in the file are specified using EXTERNAL, + or older style array control records, then `f` should be a file + handle. In this case ext_unit_dict is required, which can be + constructed using the function + :class:`flopy.utils.mfreadnam.parsenamefile`. + + Returns + ------- + pval : ModflowPval dict + + Examples + -------- + + >>> import flopy + >>> m = flopy.modflow.Modflow() + >>> mlt = flopy.modflow.ModflowPval.load('test.pval', m) + + """ + + if model.verbose: + sys.stdout.write('loading pval package file...\n') + + openfile = not hasattr(f, 'read') + if openfile: + filename = f + f = open(filename, 'r') + else: + filename = f.name + + # dataset 0 -- header + while True: + line = f.readline() + if line[0] != '#': + break + # dataset 1 + t = line.strip().split() + npval = int(t[0]) + + if model.verbose: + sys.stdout.write( + ' reading parameter values from "{:<10s}"\n'.format( + filename)) + + # read PVAL data + pval_dict = dict() + for n in range(npval): + line = f.readline() + t = line.strip().split() + if len(t[0]) > 10: + pvalnam = t[0][0:10].lower() + else: + pvalnam = t[0].lower() + + pval_dict[pvalnam] = float(t[1]) + + if openfile: + f.close() + + # set package unit number + unitnumber = None + filenames = [None] + if ext_unit_dict is not None: + unitnumber, filenames[0] = \ + model.get_ext_dict_attr(ext_unit_dict, + filetype=ModflowPval.ftype()) + + pval = ModflowPval(model, pval_dict=pval_dict, + unitnumber=unitnumber, filenames=filenames) + return pval + + @staticmethod + def ftype(): + return 'PVAL' + + @staticmethod + def defaultunit(): + return 1005 diff --git a/flopy/modflow/mfrch.py b/flopy/modflow/mfrch.py index 1d4f09b477..3956f80262 100644 --- a/flopy/modflow/mfrch.py +++ b/flopy/modflow/mfrch.py @@ -1,475 +1,475 @@ -""" -mfrch module. Contains the ModflowRch class. Note that the user can access -the ModflowRch class as `flopy.modflow.ModflowRch`. - -Additional information for this MODFLOW package can be found at the `Online -MODFLOW Guide -`_. - -""" - -import sys -import numpy as np -from ..pakbase import Package -from ..utils import Util2d, Transient2d -from ..modflow.mfparbc import ModflowParBc as mfparbc -from ..utils.flopy_io import line_parse - - -class ModflowRch(Package): - """ - MODFLOW Recharge Package Class. - - Parameters - ---------- - model : model object - The model object (of type :class:`flopy.modflow.mf.Modflow`) to which - this package will be added. - ipakcb : int - A flag that is used to determine if cell-by-cell budget data should be - saved. If ipakcb is non-zero cell-by-cell budget data will be saved. - (default is 0). - nrchop : int - is the recharge option code. - 1: Recharge to top grid layer only - 2: Recharge to layer defined in irch - 3: Recharge to highest active cell (default is 3). - rech : float or array of floats (nrow, ncol) - is the recharge flux. (default is 1.e-3). - irch : int or array of ints (nrow, ncol) - is the layer to which recharge is applied in each vertical - column (only used when nrchop=2). (default is 0). - extension : string - Filename extension (default is 'rch') - unitnumber : int - File unit number (default is None). - filenames : str or list of str - Filenames to use for the package and the output files. If - filenames=None the package name will be created using the model name - and package extension and the cbc output name will be created using - the model name and .cbc extension (for example, modflowtest.cbc), - if ipakcbc is a number greater than zero. If a single string is passed - the package will be set to the string and cbc output names will be - created using the model name and .cbc extension, if ipakcbc is a - number greater than zero. To define the names for all package files - (input and output) the length of the list of strings should be 2. - Default is None. - - Attributes - ---------- - - Methods - ------- - - See Also - -------- - - Notes - ----- - Parameters are supported in Flopy only when reading in existing models. - Parameter values are converted to native values in Flopy and the - connection to "parameters" is thus nonexistent. - - Examples - -------- - - >>> #steady state - >>> import flopy - >>> m = flopy.modflow.Modflow() - >>> rch = flopy.modflow.ModflowRch(m, nrchop=3, rech=1.2e-4) - - >>> #transient with time-varying recharge - >>> import flopy - >>> rech = {} - >>> rech[0] = 1.2e-4 #stress period 1 to 4 - >>> rech[4] = 0.0 #stress period 5 and 6 - >>> rech[6] = 1.2e-3 #stress period 7 to the end - >>> m = flopy.modflow.Modflow() - >>> rch = flopy.modflow.ModflowRch(m, nrchop=3, rech=rech) - - """ - - def __init__(self, model, nrchop=3, ipakcb=None, rech=1e-3, irch=0, - extension='rch', unitnumber=None, filenames=None): - """ - Package constructor. - - """ - # set default unit number of one is not specified - if unitnumber is None: - unitnumber = ModflowRch.defaultunit() - - # set filenames - if filenames is None: - filenames = [None, None] - elif isinstance(filenames, str): - filenames = [filenames, None] - elif isinstance(filenames, list): - if len(filenames) < 2: - filenames.append(None) - - # update external file information with cbc output, if necessary - if ipakcb is not None: - fname = filenames[1] - model.add_output_file(ipakcb, fname=fname, - package=ModflowRch.ftype()) - else: - ipakcb = 0 - - # Fill namefile items - name = [ModflowRch.ftype()] - units = [unitnumber] - extra = [''] - - # set package name - fname = [filenames[0]] - - # Call ancestor's init to set self.parent, extension, name and - # unit number - Package.__init__(self, model, extension=extension, name=name, - unit_number=units, extra=extra, filenames=fname) - - nrow, ncol, nlay, nper = self.parent.nrow_ncol_nlay_nper - self.heading = '# {} package for '.format(self.name[0]) + \ - ' {}, '.format(model.version_types[model.version]) + \ - 'generated by Flopy.' - self.url = 'rch.htm' - - self.nrchop = nrchop - self.ipakcb = ipakcb - self.rech = Transient2d(model, (nrow, ncol), np.float32, - rech, name='rech_') - if self.nrchop == 2: - self.irch = Transient2d(model, (nrow, ncol), np.int32, - irch, - name='irch_') - else: - self.irch = None - self.np = 0 - self.parent.add_package(self) - - def check(self, f=None, verbose=True, level=1, RTmin=2e-8, RTmax=2e-4, - checktype=None): - """ - Check package data for common errors. - - Parameters - ---------- - f : str or file handle - String defining file name or file handle for summary file - of check method output. If a sting is passed a file handle - is created. If f is None, check method does not write - results to a summary file. (default is None) - verbose : bool - Boolean flag used to determine if check method results are - written to the screen - level : int - Check method analysis level. If level=0, summary checks are - performed. If level=1, full checks are performed. - RTmin : float - Minimum product of recharge and transmissivity. Default is 2e-8 - RTmax : float - Maximum product of recharge and transmissivity. Default is 2e-4 - - Returns - ------- - None - - Examples - -------- - - >>> import flopy - >>> m = flopy.modflow.Modflow.load('model.nam') - >>> m.rch.check() - - """ - chk = self._get_check(f, verbose, level, checktype) - if self.parent.bas6 is not None: - active = self.parent.bas6.ibound.array.sum(axis=0) != 0 - else: - active = np.ones(self.rech.array[0][0].shape, dtype=bool) - - # check for unusually high or low values of mean R/T - hk_package = {'UPW', 'LPF'}.intersection( - set(self.parent.get_package_list())) - if len(hk_package) > 0: - pkg = list(hk_package)[0] - - # handle quasi-3D layers - # (ugly, would be nice to put this else where in a general function) - if self.parent.dis.laycbd.sum() != 0: - thickness = np.empty((self.parent.dis.nlay, - self.parent.dis.nrow, - self.parent.dis.ncol), - dtype=float) - l = 0 - for i, cbd in enumerate(self.parent.dis.laycbd): - thickness[i, :, :] = self.parent.dis.thickness.array[l, :, - :] - if cbd > 0: - l += 1 - l += 1 - assert l == self.parent.dis.thickness.shape[0] - else: - thickness = self.parent.dis.thickness.array - assert thickness.shape == self.parent.get_package(pkg).hk.shape - Tmean = (self.parent.get_package(pkg).hk.array * - thickness)[:, active].sum(axis=0).mean() - - # get mean value of recharge array for each stress period - period_means = self.rech.array.mean(axis=(1, 2, 3)) - - if Tmean != 0: - R_T = period_means / Tmean - lessthan = np.where(R_T < RTmin)[0] - greaterthan = np.where(R_T > RTmax)[0] - - if len(lessthan) > 0: - txt = '\r Mean R/T ratio < checker warning ' + \ - 'threshold of {}'.format(RTmin) - txt += ' for {} stress periods'.format(len(lessthan)) - chk._add_to_summary(type='Warning', value=R_T.min(), - desc=txt) - chk.remove_passed( - 'Mean R/T is between {} and {}'.format(RTmin, RTmax)) - - if len(greaterthan) > 0: - txt = '\r Mean R/T ratio > checker warning ' + \ - 'threshold of {}'.format(RTmax) - txt += ' for {} stress periods'.format(len(greaterthan)) - chk._add_to_summary(type='Warning', value=R_T.max(), - desc=txt) - chk.remove_passed( - 'Mean R/T is between {} and {}'.format(RTmin, RTmax)) - elif len(lessthan) == 0 and len(greaterthan) == 0: - chk.append_passed( - 'Mean R/T is between {} and {}'.format(RTmin, RTmax)) - - # check for NRCHOP values != 3 - if self.nrchop != 3: - txt = '\r Variable NRCHOP set to value other than 3' - chk._add_to_summary(type='Warning', value=self.nrchop, - desc=txt) - chk.remove_passed('Variable NRCHOP set to 3.') - else: - chk.append_passed('Variable NRCHOP set to 3.') - chk.summarize() - return chk - - def ncells(self): - # Returns the maximum number of cells that have recharge - # (developed for MT3DMS SSM package) - nrow, ncol, nlay, nper = self.parent.nrow_ncol_nlay_nper - return (nrow * ncol) - - def write_file(self, check=True, f=None): - """ - Write the package file. - - Parameters - ---------- - check : boolean - Check package data for common errors. (default True) - - Returns - ------- - None - - """ - # allows turning off package checks when writing files at model level - if check: - self.check(f='{}.chk'.format(self.name[0]), - verbose=self.parent.verbose, level=1) - nrow, ncol, nlay, nper = self.parent.nrow_ncol_nlay_nper - # Open file for writing - if f is not None: - f_rch = f - else: - f_rch = open(self.fn_path, 'w') - f_rch.write('{0:s}\n'.format(self.heading)) - f_rch.write('{0:10d}{1:10d}\n'.format(self.nrchop, self.ipakcb)) - - if self.nrchop == 2: - irch = {} - for kper,u2d in self.irch.transient_2ds.items(): - irch[kper] = u2d.array + 1 - irch = Transient2d(self.parent - ,self.irch.shape, - self.irch.dtype, - irch, - self.irch.name) - - for kper in range(nper): - inrech, file_entry_rech = self.rech.get_kper_entry(kper) - if self.nrchop == 2: - inirch, file_entry_irch = irch.get_kper_entry(kper) - else: - inirch = -1 - f_rch.write('{0:10d}{1:10d} # {2:s}\n'.format(inrech, - inirch, - "Stress period " + str( - kper + 1))) - if (inrech >= 0): - f_rch.write(file_entry_rech) - if self.nrchop == 2: - if inirch >= 0: - f_rch.write(file_entry_irch) - f_rch.close() - - @staticmethod - def load(f, model, nper=None, ext_unit_dict=None, check=True): - """ - Load an existing package. - - Parameters - ---------- - f : filename or file handle - File to load. - model : model object - The model object (of type :class:`flopy.modflow.mf.Modflow`) to - which this package will be added. - nper : int - The number of stress periods. If nper is None, then nper will be - obtained from the model object. (default is None). - ext_unit_dict : dictionary, optional - If the arrays in the file are specified using EXTERNAL, - or older style array control records, then `f` should be a file - handle. In this case ext_unit_dict is required, which can be - constructed using the function - :class:`flopy.utils.mfreadnam.parsenamefile`. - check : boolean - Check package data for common errors. (default True) - - Returns - ------- - rch : ModflowRch object - ModflowRch object. - - Examples - -------- - - >>> import flopy - >>> m = flopy.modflow.Modflow() - >>> rch = flopy.modflow.ModflowRch.load('test.rch', m) - - """ - if model.verbose: - sys.stdout.write('loading rch package file...\n') - - openfile = not hasattr(f, 'read') - if openfile: - filename = f - f = open(filename, 'r') - - # dataset 0 -- header - while True: - line = f.readline() - if line[0] != '#': - break - npar = 0 - if "parameter" in line.lower(): - raw = line.strip().split() - npar = np.int(raw[1]) - if npar > 0: - if model.verbose: - txt = 3 * ' ' + 'Parameters detected. Number of ' + \ - 'parameters = {}'.format(npar) - print(txt) - line = f.readline() - # dataset 2 - t = line_parse(line) - nrchop = int(t[0]) - ipakcb = int(t[1]) - - # dataset 3 and 4 - parameters data - pak_parms = None - if npar > 0: - pak_parms = mfparbc.loadarray(f, npar, model.verbose) - - if nper is None: - nrow, ncol, nlay, nper = model.get_nrow_ncol_nlay_nper() - # read data for every stress period - rech = {} - irch = None - if nrchop == 2: - irch = {} - current_rech = [] - current_irch = [] - for iper in range(nper): - line = f.readline() - t = line_parse(line) - inrech = int(t[0]) - if nrchop == 2: - inirch = int(t[1]) - if inrech >= 0: - if npar == 0: - if model.verbose: - txt = 3 * ' ' + 'loading rech stress ' + \ - 'period {0:3d}...'.format(iper + 1) - print(txt) - t = Util2d.load(f, model, (nrow, ncol), np.float32, 'rech', - ext_unit_dict) - else: - parm_dict = {} - for ipar in range(inrech): - line = f.readline() - t = line.strip().split() - pname = t[0].lower() - try: - c = t[1].lower() - instance_dict = pak_parms.bc_parms[pname][1] - if c in instance_dict: - iname = c - else: - iname = 'static' - except: - iname = 'static' - parm_dict[pname] = iname - t = mfparbc.parameter_bcfill(model, (nrow, ncol), - parm_dict, pak_parms) - - current_rech = t - rech[iper] = current_rech - if nrchop == 2: - if inirch >= 0: - if model.verbose: - txt = 3 * ' ' + 'loading irch stress ' + \ - 'period {0:3d}...'.format(iper + 1) - print(txt) - t = Util2d.load(f, model, (nrow, ncol), np.int32, 'irch', - ext_unit_dict) - current_irch = Util2d(model,(nrow, ncol), np.int32, - t.array - 1, "irch") - irch[iper] = current_irch - - if openfile: - f.close() - - # determine specified unit number - unitnumber = None - filenames = [None, None] - if ext_unit_dict is not None: - unitnumber, filenames[0] = \ - model.get_ext_dict_attr(ext_unit_dict, - filetype=ModflowRch.ftype()) - if ipakcb > 0: - iu, filenames[1] = \ - model.get_ext_dict_attr(ext_unit_dict, unit=ipakcb) - model.add_pop_key_list(ipakcb) - - # create recharge package instance - rch = ModflowRch(model, nrchop=nrchop, ipakcb=ipakcb, - rech=rech, irch=irch, - unitnumber=unitnumber, filenames=filenames) - if check: - rch.check(f='{}.chk'.format(rch.name[0]), - verbose=rch.parent.verbose, level=0) - return rch - - @staticmethod - def ftype(): - return 'RCH' - - @staticmethod - def defaultunit(): - return 19 +""" +mfrch module. Contains the ModflowRch class. Note that the user can access +the ModflowRch class as `flopy.modflow.ModflowRch`. + +Additional information for this MODFLOW package can be found at the `Online +MODFLOW Guide +`_. + +""" + +import sys +import numpy as np +from ..pakbase import Package +from ..utils import Util2d, Transient2d +from ..modflow.mfparbc import ModflowParBc as mfparbc +from ..utils.flopy_io import line_parse + + +class ModflowRch(Package): + """ + MODFLOW Recharge Package Class. + + Parameters + ---------- + model : model object + The model object (of type :class:`flopy.modflow.mf.Modflow`) to which + this package will be added. + ipakcb : int + A flag that is used to determine if cell-by-cell budget data should be + saved. If ipakcb is non-zero cell-by-cell budget data will be saved. + (default is 0). + nrchop : int + is the recharge option code. + 1: Recharge to top grid layer only + 2: Recharge to layer defined in irch + 3: Recharge to highest active cell (default is 3). + rech : float or array of floats (nrow, ncol) + is the recharge flux. (default is 1.e-3). + irch : int or array of ints (nrow, ncol) + is the layer to which recharge is applied in each vertical + column (only used when nrchop=2). (default is 0). + extension : string + Filename extension (default is 'rch') + unitnumber : int + File unit number (default is None). + filenames : str or list of str + Filenames to use for the package and the output files. If + filenames=None the package name will be created using the model name + and package extension and the cbc output name will be created using + the model name and .cbc extension (for example, modflowtest.cbc), + if ipakcbc is a number greater than zero. If a single string is passed + the package will be set to the string and cbc output names will be + created using the model name and .cbc extension, if ipakcbc is a + number greater than zero. To define the names for all package files + (input and output) the length of the list of strings should be 2. + Default is None. + + Attributes + ---------- + + Methods + ------- + + See Also + -------- + + Notes + ----- + Parameters are supported in Flopy only when reading in existing models. + Parameter values are converted to native values in Flopy and the + connection to "parameters" is thus nonexistent. + + Examples + -------- + + >>> #steady state + >>> import flopy + >>> m = flopy.modflow.Modflow() + >>> rch = flopy.modflow.ModflowRch(m, nrchop=3, rech=1.2e-4) + + >>> #transient with time-varying recharge + >>> import flopy + >>> rech = {} + >>> rech[0] = 1.2e-4 #stress period 1 to 4 + >>> rech[4] = 0.0 #stress period 5 and 6 + >>> rech[6] = 1.2e-3 #stress period 7 to the end + >>> m = flopy.modflow.Modflow() + >>> rch = flopy.modflow.ModflowRch(m, nrchop=3, rech=rech) + + """ + + def __init__(self, model, nrchop=3, ipakcb=None, rech=1e-3, irch=0, + extension='rch', unitnumber=None, filenames=None): + """ + Package constructor. + + """ + # set default unit number of one is not specified + if unitnumber is None: + unitnumber = ModflowRch.defaultunit() + + # set filenames + if filenames is None: + filenames = [None, None] + elif isinstance(filenames, str): + filenames = [filenames, None] + elif isinstance(filenames, list): + if len(filenames) < 2: + filenames.append(None) + + # update external file information with cbc output, if necessary + if ipakcb is not None: + fname = filenames[1] + model.add_output_file(ipakcb, fname=fname, + package=ModflowRch.ftype()) + else: + ipakcb = 0 + + # Fill namefile items + name = [ModflowRch.ftype()] + units = [unitnumber] + extra = [''] + + # set package name + fname = [filenames[0]] + + # Call ancestor's init to set self.parent, extension, name and + # unit number + Package.__init__(self, model, extension=extension, name=name, + unit_number=units, extra=extra, filenames=fname) + + nrow, ncol, nlay, nper = self.parent.nrow_ncol_nlay_nper + self.heading = '# {} package for '.format(self.name[0]) + \ + ' {}, '.format(model.version_types[model.version]) + \ + 'generated by Flopy.' + self.url = 'rch.htm' + + self.nrchop = nrchop + self.ipakcb = ipakcb + self.rech = Transient2d(model, (nrow, ncol), np.float32, + rech, name='rech_') + if self.nrchop == 2: + self.irch = Transient2d(model, (nrow, ncol), np.int32, + irch, + name='irch_') + else: + self.irch = None + self.np = 0 + self.parent.add_package(self) + + def check(self, f=None, verbose=True, level=1, RTmin=2e-8, RTmax=2e-4, + checktype=None): + """ + Check package data for common errors. + + Parameters + ---------- + f : str or file handle + String defining file name or file handle for summary file + of check method output. If a sting is passed a file handle + is created. If f is None, check method does not write + results to a summary file. (default is None) + verbose : bool + Boolean flag used to determine if check method results are + written to the screen + level : int + Check method analysis level. If level=0, summary checks are + performed. If level=1, full checks are performed. + RTmin : float + Minimum product of recharge and transmissivity. Default is 2e-8 + RTmax : float + Maximum product of recharge and transmissivity. Default is 2e-4 + + Returns + ------- + None + + Examples + -------- + + >>> import flopy + >>> m = flopy.modflow.Modflow.load('model.nam') + >>> m.rch.check() + + """ + chk = self._get_check(f, verbose, level, checktype) + if self.parent.bas6 is not None: + active = self.parent.bas6.ibound.array.sum(axis=0) != 0 + else: + active = np.ones(self.rech.array[0][0].shape, dtype=bool) + + # check for unusually high or low values of mean R/T + hk_package = {'UPW', 'LPF'}.intersection( + set(self.parent.get_package_list())) + if len(hk_package) > 0: + pkg = list(hk_package)[0] + + # handle quasi-3D layers + # (ugly, would be nice to put this else where in a general function) + if self.parent.dis.laycbd.sum() != 0: + thickness = np.empty((self.parent.dis.nlay, + self.parent.dis.nrow, + self.parent.dis.ncol), + dtype=float) + l = 0 + for i, cbd in enumerate(self.parent.dis.laycbd): + thickness[i, :, :] = self.parent.dis.thickness.array[l, :, + :] + if cbd > 0: + l += 1 + l += 1 + assert l == self.parent.dis.thickness.shape[0] + else: + thickness = self.parent.dis.thickness.array + assert thickness.shape == self.parent.get_package(pkg).hk.shape + Tmean = (self.parent.get_package(pkg).hk.array * + thickness)[:, active].sum(axis=0).mean() + + # get mean value of recharge array for each stress period + period_means = self.rech.array.mean(axis=(1, 2, 3)) + + if Tmean != 0: + R_T = period_means / Tmean + lessthan = np.where(R_T < RTmin)[0] + greaterthan = np.where(R_T > RTmax)[0] + + if len(lessthan) > 0: + txt = '\r Mean R/T ratio < checker warning ' + \ + 'threshold of {}'.format(RTmin) + txt += ' for {} stress periods'.format(len(lessthan)) + chk._add_to_summary(type='Warning', value=R_T.min(), + desc=txt) + chk.remove_passed( + 'Mean R/T is between {} and {}'.format(RTmin, RTmax)) + + if len(greaterthan) > 0: + txt = '\r Mean R/T ratio > checker warning ' + \ + 'threshold of {}'.format(RTmax) + txt += ' for {} stress periods'.format(len(greaterthan)) + chk._add_to_summary(type='Warning', value=R_T.max(), + desc=txt) + chk.remove_passed( + 'Mean R/T is between {} and {}'.format(RTmin, RTmax)) + elif len(lessthan) == 0 and len(greaterthan) == 0: + chk.append_passed( + 'Mean R/T is between {} and {}'.format(RTmin, RTmax)) + + # check for NRCHOP values != 3 + if self.nrchop != 3: + txt = '\r Variable NRCHOP set to value other than 3' + chk._add_to_summary(type='Warning', value=self.nrchop, + desc=txt) + chk.remove_passed('Variable NRCHOP set to 3.') + else: + chk.append_passed('Variable NRCHOP set to 3.') + chk.summarize() + return chk + + def ncells(self): + # Returns the maximum number of cells that have recharge + # (developed for MT3DMS SSM package) + nrow, ncol, nlay, nper = self.parent.nrow_ncol_nlay_nper + return (nrow * ncol) + + def write_file(self, check=True, f=None): + """ + Write the package file. + + Parameters + ---------- + check : boolean + Check package data for common errors. (default True) + + Returns + ------- + None + + """ + # allows turning off package checks when writing files at model level + if check: + self.check(f='{}.chk'.format(self.name[0]), + verbose=self.parent.verbose, level=1) + nrow, ncol, nlay, nper = self.parent.nrow_ncol_nlay_nper + # Open file for writing + if f is not None: + f_rch = f + else: + f_rch = open(self.fn_path, 'w') + f_rch.write('{0:s}\n'.format(self.heading)) + f_rch.write('{0:10d}{1:10d}\n'.format(self.nrchop, self.ipakcb)) + + if self.nrchop == 2: + irch = {} + for kper,u2d in self.irch.transient_2ds.items(): + irch[kper] = u2d.array + 1 + irch = Transient2d(self.parent + ,self.irch.shape, + self.irch.dtype, + irch, + self.irch.name) + + for kper in range(nper): + inrech, file_entry_rech = self.rech.get_kper_entry(kper) + if self.nrchop == 2: + inirch, file_entry_irch = irch.get_kper_entry(kper) + else: + inirch = -1 + f_rch.write('{0:10d}{1:10d} # {2:s}\n'.format(inrech, + inirch, + "Stress period " + str( + kper + 1))) + if (inrech >= 0): + f_rch.write(file_entry_rech) + if self.nrchop == 2: + if inirch >= 0: + f_rch.write(file_entry_irch) + f_rch.close() + + @staticmethod + def load(f, model, nper=None, ext_unit_dict=None, check=True): + """ + Load an existing package. + + Parameters + ---------- + f : filename or file handle + File to load. + model : model object + The model object (of type :class:`flopy.modflow.mf.Modflow`) to + which this package will be added. + nper : int + The number of stress periods. If nper is None, then nper will be + obtained from the model object. (default is None). + ext_unit_dict : dictionary, optional + If the arrays in the file are specified using EXTERNAL, + or older style array control records, then `f` should be a file + handle. In this case ext_unit_dict is required, which can be + constructed using the function + :class:`flopy.utils.mfreadnam.parsenamefile`. + check : boolean + Check package data for common errors. (default True) + + Returns + ------- + rch : ModflowRch object + ModflowRch object. + + Examples + -------- + + >>> import flopy + >>> m = flopy.modflow.Modflow() + >>> rch = flopy.modflow.ModflowRch.load('test.rch', m) + + """ + if model.verbose: + sys.stdout.write('loading rch package file...\n') + + openfile = not hasattr(f, 'read') + if openfile: + filename = f + f = open(filename, 'r') + + # dataset 0 -- header + while True: + line = f.readline() + if line[0] != '#': + break + npar = 0 + if "parameter" in line.lower(): + raw = line.strip().split() + npar = np.int(raw[1]) + if npar > 0: + if model.verbose: + txt = 3 * ' ' + 'Parameters detected. Number of ' + \ + 'parameters = {}'.format(npar) + print(txt) + line = f.readline() + # dataset 2 + t = line_parse(line) + nrchop = int(t[0]) + ipakcb = int(t[1]) + + # dataset 3 and 4 - parameters data + pak_parms = None + if npar > 0: + pak_parms = mfparbc.loadarray(f, npar, model.verbose) + + if nper is None: + nrow, ncol, nlay, nper = model.get_nrow_ncol_nlay_nper() + # read data for every stress period + rech = {} + irch = None + if nrchop == 2: + irch = {} + current_rech = [] + current_irch = [] + for iper in range(nper): + line = f.readline() + t = line_parse(line) + inrech = int(t[0]) + if nrchop == 2: + inirch = int(t[1]) + if inrech >= 0: + if npar == 0: + if model.verbose: + txt = 3 * ' ' + 'loading rech stress ' + \ + 'period {0:3d}...'.format(iper + 1) + print(txt) + t = Util2d.load(f, model, (nrow, ncol), np.float32, 'rech', + ext_unit_dict) + else: + parm_dict = {} + for ipar in range(inrech): + line = f.readline() + t = line.strip().split() + pname = t[0].lower() + try: + c = t[1].lower() + instance_dict = pak_parms.bc_parms[pname][1] + if c in instance_dict: + iname = c + else: + iname = 'static' + except: + iname = 'static' + parm_dict[pname] = iname + t = mfparbc.parameter_bcfill(model, (nrow, ncol), + parm_dict, pak_parms) + + current_rech = t + rech[iper] = current_rech + if nrchop == 2: + if inirch >= 0: + if model.verbose: + txt = 3 * ' ' + 'loading irch stress ' + \ + 'period {0:3d}...'.format(iper + 1) + print(txt) + t = Util2d.load(f, model, (nrow, ncol), np.int32, 'irch', + ext_unit_dict) + current_irch = Util2d(model,(nrow, ncol), np.int32, + t.array - 1, "irch") + irch[iper] = current_irch + + if openfile: + f.close() + + # determine specified unit number + unitnumber = None + filenames = [None, None] + if ext_unit_dict is not None: + unitnumber, filenames[0] = \ + model.get_ext_dict_attr(ext_unit_dict, + filetype=ModflowRch.ftype()) + if ipakcb > 0: + iu, filenames[1] = \ + model.get_ext_dict_attr(ext_unit_dict, unit=ipakcb) + model.add_pop_key_list(ipakcb) + + # create recharge package instance + rch = ModflowRch(model, nrchop=nrchop, ipakcb=ipakcb, + rech=rech, irch=irch, + unitnumber=unitnumber, filenames=filenames) + if check: + rch.check(f='{}.chk'.format(rch.name[0]), + verbose=rch.parent.verbose, level=0) + return rch + + @staticmethod + def ftype(): + return 'RCH' + + @staticmethod + def defaultunit(): + return 19 diff --git a/flopy/modflow/mfriv.py b/flopy/modflow/mfriv.py index b27293da03..d16944f56c 100644 --- a/flopy/modflow/mfriv.py +++ b/flopy/modflow/mfriv.py @@ -1,351 +1,351 @@ -""" -mfriv module. Contains the ModflowRiv class. Note that the user can access -the ModflowRiv class as `flopy.modflow.ModflowRiv`. - -Additional information for this MODFLOW package can be found at the `Online -MODFLOW Guide -`_. - -""" -import sys -import numpy as np -from ..pakbase import Package -from ..utils import MfList -from ..utils.recarray_utils import create_empty_recarray - - -class ModflowRiv(Package): - """ - MODFLOW River Package Class. - - Parameters - ---------- - model : model object - The model object (of type :class:`flopy.modflow.mf.Modflow`) to which - this package will be added. - ipakcb : int - A flag that is used to determine if cell-by-cell budget data should be - saved. If ipakcb is non-zero cell-by-cell budget data will be saved. - (default is 0). - stress_period_data : list of boundaries, or recarray of boundaries, or - dictionary of boundaries. - Each river cell is defined through definition of - layer (int), row (int), column (int), stage (float), cond (float), - rbot (float). - The simplest form is a dictionary with a lists of boundaries for each - stress period, where each list of boundaries itself is a list of - boundaries. Indices of the dictionary are the numbers of the stress - period. This gives the form of:: - - stress_period_data = - {0: [ - [lay, row, col, stage, cond, rbot], - [lay, row, col, stage, cond, rbot], - [lay, row, col, stage, cond, rbot] - ], - 1: [ - [lay, row, col, stage, cond, rbot], - [lay, row, col, stage, cond, rbot], - [lay, row, col, stage, cond, rbot] - ], ... - kper: - [ - [lay, row, col, stage, cond, rbot], - [lay, row, col, stage, cond, rbot], - [lay, row, col, stage, cond, rbot] - ] - } - - Note that if the number of lists is smaller than the number of stress - periods, then the last list of rivers will apply until the end of the - simulation. Full details of all options to specify stress_period_data - can be found in the flopy3 boundaries Notebook in the basic - subdirectory of the examples directory. - dtype : custom datatype of stress_period_data. - (default is None) - If None the default river datatype will be applied. - naux : int - number of auxiliary variables - extension : string - Filename extension (default is 'riv') - options : list of strings - Package options. (default is None). - unitnumber : int - File unit number (default is None). - filenames : str or list of str - Filenames to use for the package and the output files. If - filenames=None the package name will be created using the model name - and package extension and the cbc output name will be created using - the model name and .cbc extension (for example, modflowtest.cbc), - if ipakcbc is a number greater than zero. If a single string is passed - the package will be set to the string and cbc output names will be - created using the model name and .cbc extension, if ipakcbc is a - number greater than zero. To define the names for all package files - (input and output) the length of the list of strings should be 2. - Default is None. - - Attributes - ---------- - mxactr : int - Maximum number of river cells for a stress period. This is calculated - automatically by FloPy based on the information in - layer_row_column_data. - - Methods - ------- - - See Also - -------- - - Notes - ----- - Parameters are not supported in FloPy. - - Examples - -------- - - >>> import flopy - >>> m = flopy.modflow.Modflow() - >>> lrcd = {} - >>> lrcd[0] = [[2, 3, 4, 15.6, 1050., -4]] #this river boundary will be - >>> #applied to all stress periods - >>> riv = flopy.modflow.ModflowRiv(m, stress_period_data=lrcd) - - """ - - def __init__(self, model, ipakcb=None, stress_period_data=None, dtype=None, - extension='riv', options=None, unitnumber=None, - filenames=None, **kwargs): - """ - Package constructor. - - """ - # set default unit number of one is not specified - if unitnumber is None: - unitnumber = ModflowRiv.defaultunit() - - # set filenames - if filenames is None: - filenames = [None, None] - elif isinstance(filenames, str): - filenames = [filenames, None] - elif isinstance(filenames, list): - if len(filenames) < 2: - filenames.append(None) - - # update external file information with cbc output, if necessary - if ipakcb is not None: - fname = filenames[1] - model.add_output_file(ipakcb, fname=fname, - package=ModflowRiv.ftype()) - else: - ipakcb = 0 - - # Fill namefile items - name = [ModflowRiv.ftype()] - units = [unitnumber] - extra = [''] - - # set package name - fname = [filenames[0]] - - # Call ancestor's init to set self.parent, extension, name and - # unit number - Package.__init__(self, model, extension=extension, name=name, - unit_number=units, extra=extra, filenames=fname) - - self.heading = '# {} package for '.format(self.name[0]) + \ - ' {}, '.format(model.version_types[model.version]) + \ - 'generated by Flopy.' - self.url = 'riv.htm' - - self.ipakcb = ipakcb - self.mxactr = 0 - self.np = 0 - if options is None: - options = [] - self.options = options - if dtype is not None: - self.dtype = dtype - else: - self.dtype = self.get_default_dtype( - structured=self.parent.structured) - self.stress_period_data = MfList(self, stress_period_data) - self.parent.add_package(self) - - def check(self, f=None, verbose=True, level=1, checktype=None): - """ - Check package data for common errors. - - Parameters - ---------- - f : str or file handle - String defining file name or file handle for summary file - of check method output. If a string is passed a file handle - is created. If f is None, check method does not write - results to a summary file. (default is None) - verbose : bool - Boolean flag used to determine if check method results are - written to the screen. - level : int - Check method analysis level. If level=0, summary checks are - performed. If level=1, full checks are performed. - - Returns - ------- - None - - Examples - -------- - - >>> import flopy - >>> m = flopy.modflow.Modflow.load('model.nam') - >>> m.riv.check() - - """ - basechk = super(ModflowRiv, self).check(verbose=False, - checktype=checktype) - chk = self._get_check(f, verbose, level, checktype) - chk.summary_array = basechk.summary_array - - for per in self.stress_period_data.data.keys(): - if isinstance(self.stress_period_data.data[per], np.recarray): - spd = self.stress_period_data.data[per] - inds = (spd.k, spd.i, spd.j) if self.parent.structured else ( - spd.node) - - # check that river stage and bottom are above model cell - # bottoms also checks for nan values - botms = self.parent.dis.botm.array[inds] - - for elev in ['stage', 'rbot']: - txt = '{} below cell bottom'.format(elev) - chk.stress_period_data_values(spd, spd[elev] < botms, - col=elev, - error_name=txt, - error_type='Error') - - # check that river stage is above the rbot - txt = 'RIV stage below rbots' - chk.stress_period_data_values(spd, spd['rbot'] > spd['stage'], - col='stage', - error_name=txt, - error_type='Error') - chk.summarize() - return chk - - @staticmethod - def get_empty(ncells=0, aux_names=None, structured=True): - # get an empty recarray that corresponds to dtype - dtype = ModflowRiv.get_default_dtype(structured=structured) - if aux_names is not None: - dtype = Package.add_to_dtype(dtype, aux_names, np.float32) - return create_empty_recarray(ncells, dtype, default_value=-1.0E+10) - - @staticmethod - def get_default_dtype(structured=True): - if structured: - dtype = np.dtype([("k", np.int), ("i", np.int), - ("j", np.int), ("stage", np.float32), - ("cond", np.float32), ("rbot", np.float32)]) - else: - dtype = np.dtype([("node", np.int), ("stage", np.float32), - ("cond", np.float32), ("rbot", np.float32)]) - - return dtype - - @staticmethod - def get_sfac_columns(): - return ['cond'] - - def ncells(self): - # Return the maximum number of cells that have river - # (developed for MT3DMS SSM package) - return self.stress_period_data.mxact - - def write_file(self, check=True): - """ - Write the package file. - - Parameters - ---------- - check : boolean - Check package data for common errors. (default True) - - Returns - ------- - None - - """ - # allows turning off package checks when writing files at model level - if check: - self.check(f='{}.chk'.format(self.name[0]), - verbose=self.parent.verbose, level=1) - f_riv = open(self.fn_path, 'w') - f_riv.write('{0}\n'.format(self.heading)) - line = '{0:10d}{1:10d}'.format(self.stress_period_data.mxact, - self.ipakcb) - for opt in self.options: - line += ' ' + str(opt) - line += '\n' - f_riv.write(line) - self.stress_period_data.write_transient(f_riv) - f_riv.close() - - def add_record(self, kper, index, values): - try: - self.stress_period_data.add_record(kper, index, values) - except Exception as e: - raise Exception("mfriv error adding record to list: " + str(e)) - - @staticmethod - def load(f, model, nper=None, ext_unit_dict=None, check=True): - """ - Load an existing package. - - Parameters - ---------- - f : filename or file handle - File to load. - model : model object - The model object (of type :class:`flopy.modflow.mf.Modflow`) to - which this package will be added. - nper : int - The number of stress periods. If nper is None, then nper will be - obtained from the model object. (default is None). - ext_unit_dict : dictionary, optional - If the arrays in the file are specified using EXTERNAL, - or older style array control records, then `f` should be a file - handle. In this case ext_unit_dict is required, which can be - constructed using the function - :class:`flopy.utils.mfreadnam.parsenamefile`. - check : boolean - Check package data for common errors. (default True) - - Returns - ------- - rch : ModflowRiv object - ModflowRiv object. - - Examples - -------- - - >>> import flopy - >>> m = flopy.modflow.Modflow() - >>> riv = flopy.modflow.ModflowRiv.load('test.riv', m) - - """ - - if model.verbose: - sys.stdout.write('loading riv package file...\n') - - return Package.load(f, model, ModflowRiv, nper=nper, check=check, - ext_unit_dict=ext_unit_dict) - - @staticmethod - def ftype(): - return 'RIV' - - @staticmethod - def defaultunit(): - return 18 +""" +mfriv module. Contains the ModflowRiv class. Note that the user can access +the ModflowRiv class as `flopy.modflow.ModflowRiv`. + +Additional information for this MODFLOW package can be found at the `Online +MODFLOW Guide +`_. + +""" +import sys +import numpy as np +from ..pakbase import Package +from ..utils import MfList +from ..utils.recarray_utils import create_empty_recarray + + +class ModflowRiv(Package): + """ + MODFLOW River Package Class. + + Parameters + ---------- + model : model object + The model object (of type :class:`flopy.modflow.mf.Modflow`) to which + this package will be added. + ipakcb : int + A flag that is used to determine if cell-by-cell budget data should be + saved. If ipakcb is non-zero cell-by-cell budget data will be saved. + (default is 0). + stress_period_data : list of boundaries, or recarray of boundaries, or + dictionary of boundaries. + Each river cell is defined through definition of + layer (int), row (int), column (int), stage (float), cond (float), + rbot (float). + The simplest form is a dictionary with a lists of boundaries for each + stress period, where each list of boundaries itself is a list of + boundaries. Indices of the dictionary are the numbers of the stress + period. This gives the form of:: + + stress_period_data = + {0: [ + [lay, row, col, stage, cond, rbot], + [lay, row, col, stage, cond, rbot], + [lay, row, col, stage, cond, rbot] + ], + 1: [ + [lay, row, col, stage, cond, rbot], + [lay, row, col, stage, cond, rbot], + [lay, row, col, stage, cond, rbot] + ], ... + kper: + [ + [lay, row, col, stage, cond, rbot], + [lay, row, col, stage, cond, rbot], + [lay, row, col, stage, cond, rbot] + ] + } + + Note that if the number of lists is smaller than the number of stress + periods, then the last list of rivers will apply until the end of the + simulation. Full details of all options to specify stress_period_data + can be found in the flopy3 boundaries Notebook in the basic + subdirectory of the examples directory. + dtype : custom datatype of stress_period_data. + (default is None) + If None the default river datatype will be applied. + naux : int + number of auxiliary variables + extension : string + Filename extension (default is 'riv') + options : list of strings + Package options. (default is None). + unitnumber : int + File unit number (default is None). + filenames : str or list of str + Filenames to use for the package and the output files. If + filenames=None the package name will be created using the model name + and package extension and the cbc output name will be created using + the model name and .cbc extension (for example, modflowtest.cbc), + if ipakcbc is a number greater than zero. If a single string is passed + the package will be set to the string and cbc output names will be + created using the model name and .cbc extension, if ipakcbc is a + number greater than zero. To define the names for all package files + (input and output) the length of the list of strings should be 2. + Default is None. + + Attributes + ---------- + mxactr : int + Maximum number of river cells for a stress period. This is calculated + automatically by FloPy based on the information in + layer_row_column_data. + + Methods + ------- + + See Also + -------- + + Notes + ----- + Parameters are not supported in FloPy. + + Examples + -------- + + >>> import flopy + >>> m = flopy.modflow.Modflow() + >>> lrcd = {} + >>> lrcd[0] = [[2, 3, 4, 15.6, 1050., -4]] #this river boundary will be + >>> #applied to all stress periods + >>> riv = flopy.modflow.ModflowRiv(m, stress_period_data=lrcd) + + """ + + def __init__(self, model, ipakcb=None, stress_period_data=None, dtype=None, + extension='riv', options=None, unitnumber=None, + filenames=None, **kwargs): + """ + Package constructor. + + """ + # set default unit number of one is not specified + if unitnumber is None: + unitnumber = ModflowRiv.defaultunit() + + # set filenames + if filenames is None: + filenames = [None, None] + elif isinstance(filenames, str): + filenames = [filenames, None] + elif isinstance(filenames, list): + if len(filenames) < 2: + filenames.append(None) + + # update external file information with cbc output, if necessary + if ipakcb is not None: + fname = filenames[1] + model.add_output_file(ipakcb, fname=fname, + package=ModflowRiv.ftype()) + else: + ipakcb = 0 + + # Fill namefile items + name = [ModflowRiv.ftype()] + units = [unitnumber] + extra = [''] + + # set package name + fname = [filenames[0]] + + # Call ancestor's init to set self.parent, extension, name and + # unit number + Package.__init__(self, model, extension=extension, name=name, + unit_number=units, extra=extra, filenames=fname) + + self.heading = '# {} package for '.format(self.name[0]) + \ + ' {}, '.format(model.version_types[model.version]) + \ + 'generated by Flopy.' + self.url = 'riv.htm' + + self.ipakcb = ipakcb + self.mxactr = 0 + self.np = 0 + if options is None: + options = [] + self.options = options + if dtype is not None: + self.dtype = dtype + else: + self.dtype = self.get_default_dtype( + structured=self.parent.structured) + self.stress_period_data = MfList(self, stress_period_data) + self.parent.add_package(self) + + def check(self, f=None, verbose=True, level=1, checktype=None): + """ + Check package data for common errors. + + Parameters + ---------- + f : str or file handle + String defining file name or file handle for summary file + of check method output. If a string is passed a file handle + is created. If f is None, check method does not write + results to a summary file. (default is None) + verbose : bool + Boolean flag used to determine if check method results are + written to the screen. + level : int + Check method analysis level. If level=0, summary checks are + performed. If level=1, full checks are performed. + + Returns + ------- + None + + Examples + -------- + + >>> import flopy + >>> m = flopy.modflow.Modflow.load('model.nam') + >>> m.riv.check() + + """ + basechk = super(ModflowRiv, self).check(verbose=False, + checktype=checktype) + chk = self._get_check(f, verbose, level, checktype) + chk.summary_array = basechk.summary_array + + for per in self.stress_period_data.data.keys(): + if isinstance(self.stress_period_data.data[per], np.recarray): + spd = self.stress_period_data.data[per] + inds = (spd.k, spd.i, spd.j) if self.parent.structured else ( + spd.node) + + # check that river stage and bottom are above model cell + # bottoms also checks for nan values + botms = self.parent.dis.botm.array[inds] + + for elev in ['stage', 'rbot']: + txt = '{} below cell bottom'.format(elev) + chk.stress_period_data_values(spd, spd[elev] < botms, + col=elev, + error_name=txt, + error_type='Error') + + # check that river stage is above the rbot + txt = 'RIV stage below rbots' + chk.stress_period_data_values(spd, spd['rbot'] > spd['stage'], + col='stage', + error_name=txt, + error_type='Error') + chk.summarize() + return chk + + @staticmethod + def get_empty(ncells=0, aux_names=None, structured=True): + # get an empty recarray that corresponds to dtype + dtype = ModflowRiv.get_default_dtype(structured=structured) + if aux_names is not None: + dtype = Package.add_to_dtype(dtype, aux_names, np.float32) + return create_empty_recarray(ncells, dtype, default_value=-1.0E+10) + + @staticmethod + def get_default_dtype(structured=True): + if structured: + dtype = np.dtype([("k", np.int), ("i", np.int), + ("j", np.int), ("stage", np.float32), + ("cond", np.float32), ("rbot", np.float32)]) + else: + dtype = np.dtype([("node", np.int), ("stage", np.float32), + ("cond", np.float32), ("rbot", np.float32)]) + + return dtype + + @staticmethod + def get_sfac_columns(): + return ['cond'] + + def ncells(self): + # Return the maximum number of cells that have river + # (developed for MT3DMS SSM package) + return self.stress_period_data.mxact + + def write_file(self, check=True): + """ + Write the package file. + + Parameters + ---------- + check : boolean + Check package data for common errors. (default True) + + Returns + ------- + None + + """ + # allows turning off package checks when writing files at model level + if check: + self.check(f='{}.chk'.format(self.name[0]), + verbose=self.parent.verbose, level=1) + f_riv = open(self.fn_path, 'w') + f_riv.write('{0}\n'.format(self.heading)) + line = '{0:10d}{1:10d}'.format(self.stress_period_data.mxact, + self.ipakcb) + for opt in self.options: + line += ' ' + str(opt) + line += '\n' + f_riv.write(line) + self.stress_period_data.write_transient(f_riv) + f_riv.close() + + def add_record(self, kper, index, values): + try: + self.stress_period_data.add_record(kper, index, values) + except Exception as e: + raise Exception("mfriv error adding record to list: " + str(e)) + + @staticmethod + def load(f, model, nper=None, ext_unit_dict=None, check=True): + """ + Load an existing package. + + Parameters + ---------- + f : filename or file handle + File to load. + model : model object + The model object (of type :class:`flopy.modflow.mf.Modflow`) to + which this package will be added. + nper : int + The number of stress periods. If nper is None, then nper will be + obtained from the model object. (default is None). + ext_unit_dict : dictionary, optional + If the arrays in the file are specified using EXTERNAL, + or older style array control records, then `f` should be a file + handle. In this case ext_unit_dict is required, which can be + constructed using the function + :class:`flopy.utils.mfreadnam.parsenamefile`. + check : boolean + Check package data for common errors. (default True) + + Returns + ------- + rch : ModflowRiv object + ModflowRiv object. + + Examples + -------- + + >>> import flopy + >>> m = flopy.modflow.Modflow() + >>> riv = flopy.modflow.ModflowRiv.load('test.riv', m) + + """ + + if model.verbose: + sys.stdout.write('loading riv package file...\n') + + return Package.load(f, model, ModflowRiv, nper=nper, check=check, + ext_unit_dict=ext_unit_dict) + + @staticmethod + def ftype(): + return 'RIV' + + @staticmethod + def defaultunit(): + return 18 diff --git a/flopy/modflow/mfsfr2.py b/flopy/modflow/mfsfr2.py index 835d85789e..8b3fcf9ed6 100644 --- a/flopy/modflow/mfsfr2.py +++ b/flopy/modflow/mfsfr2.py @@ -1,3110 +1,3110 @@ -__author__ = 'aleaf' - -import sys -import os -import numpy as np -import warnings -import copy -from numpy.lib import recfunctions -from ..pakbase import Package -from ..utils import MfList -from ..utils.flopy_io import line_parse -from ..utils.recarray_utils import create_empty_recarray -from ..utils.optionblock import OptionBlock -from collections import OrderedDict - -try: - import pandas as pd -except: - pd = False - -try: - from numpy.lib import NumpyVersion - numpy114 = NumpyVersion(np.__version__) >= '1.14.0' -except ImportError: - numpy114 = False -if numpy114: - # use numpy's floating-point formatter (Dragon4) - default_float_format = '{!s}' -else: - # single-precision floats have ~7.2 decimal digits - default_float_format = '{:.8g}' - - -class ModflowSfr2(Package): - """ - Streamflow-Routing (SFR2) Package Class - - Parameters - ---------- - model : model object - The model object (of type :class:'flopy.modflow.mf.Modflow') to which - this package will be added. - nstrm : integer - An integer value that can be specified to be positive or negative. The - absolute value of NSTRM is equal to the number of stream reaches - (finite-difference cells) that are active during the simulation and - the number of lines of data to be included in Item 2, described below. - When NSTRM is specified to be a negative integer, it is also used as a - flag for changing the format of the data input, for simulating - unsaturated flow beneath streams, and (or) for simulating transient - streamflow routing (for MODFLOW-2005 simulations only), depending - on the values specified for variables ISFROPT and IRTFLG, as described - below. When NSTRM is negative, NSFRPAR must be set to zero, which means - that parameters cannot be specified. By default, nstrm is set to - negative. - nss : integer - An integer value equal to the number of stream segments (consisting of - one or more reaches) that are used to define the complete stream - network. The value of NSS represents the number of segments that must - be defined through a combination of parameters and variables in Item 4 - or variables in Item 6. - nparseg : integer - An integer value equal to (or exceeding) the number of stream-segment - definitions associated with all parameters. This number can be more - than the total number of segments (NSS) in the stream network because - the same segment can be defined in multiple parameters, and because - parameters can be time-varying. NPARSEG must equal or exceed the sum - of NLST x N for all parameters, where N is the greater of 1 and - NUMINST; that is, NPARSEG must equal or exceed the total number of - repetitions of item 4b. This variable must be zero when NSTRM is - negative. - const : float - A real value (or conversion factor) used in calculating stream depth - for stream reach. If stream depth is not calculated using Manning's - equation for any stream segment (that is, ICALC does not equal 1 or 2), - then a value of zero can be entered. If Manning's equation is used, a - constant of 1.486 is used for flow units of cubic feet per second, and - a constant of 1.0 is used for units of cubic meters per second. The - constant must be multiplied by 86,400 when using time units of days in - the simulation. An explanation of time units used in MODFLOW is given - by Harbaugh and others (2000, p. 10). - dleak : float - A real value equal to the tolerance level of stream depth used in - computing leakage between each stream reach and active model cell. - Value is in units of length. Usually a value of 0.0001 is sufficient - when units of feet or meters are used in model. - ipakcb : integer - An integer value used as a flag for writing stream-aquifer leakage - values. If ipakcb > 0, unformatted leakage between each stream reach - and corresponding model cell will be saved to the main cell-by-cell - budget file whenever when a cell-by-cell budget has been specified in - Output Control (see Harbaugh and others, 2000, pages 52-55). If - ipakcb = 0, leakage values will not be printed or saved. Printing to - the listing file (ipakcb < 0) is not supported. - istcb2 : integer - An integer value used as a flag for writing to a separate formatted - file all information on inflows and outflows from each reach; on - stream depth, width, and streambed conductance; and on head difference - and gradient across the streambed. If ISTCB2 > 0, then ISTCB2 also - represents the unit number to which all information for each stream - reach will be saved to a separate file when a cell-by-cell budget has - been specified in Output Control. If ISTCB2 < 0, it is the unit number - to which unformatted streamflow out of each reach will be saved to a - file whenever the cell-by-cell budget has been specified in Output - Control. Unformatted output will be saved to .sfq. - isfropt : integer - An integer value that defines the format of the input data and whether - or not unsaturated flow is simulated beneath streams. Values of ISFROPT - are defined as follows - - 0 No vertical unsaturated flow beneath streams. Streambed elevations, - stream slope, streambed thickness, and streambed hydraulic - conductivity are read for each stress period using variables - defined in Items 6b and 6c; the optional variables in Item 2 are - not used. - 1 No vertical unsaturated flow beneath streams. Streambed elevation, - stream slope, streambed thickness, and streambed hydraulic - conductivity are read for each reach only once at the beginning of - the simulation using optional variables defined in Item 2; Items 6b - and 6c are used to define stream width and depth for ICALC = 0 and - stream width for ICALC = 1. - 2 Streambed and unsaturated-zone properties are read for each reach - only once at the beginning of the simulation using optional - variables defined in Item 2; Items 6b and 6c are used to define - stream width and depth for ICALC = 0 and stream width for - ICALC = 1. When using the LPF Package, saturated vertical - hydraulic conductivity for the unsaturated zone is the same as - the vertical hydraulic conductivity of the corresponding layer in - LPF and input variable UHC is not read. - 3 Same as 2 except saturated vertical hydraulic conductivity for the - unsaturated zone (input variable UHC) is read for each reach. - 4 Streambed and unsaturated-zone properties are read for the - beginning and end of each stream segment using variables defined - in Items 6b and 6c; the optional variables in Item 2 are not used. - Streambed properties can vary each stress period. When using the - LPF Package, saturated vertical hydraulic conductivity for the - unsaturated zone is the same as the vertical hydraulic conductivity - of the corresponding layer in LPF and input variable UHC1 is not - read. - 5 Same as 4 except saturated vertical hydraulic conductivity for the - unsaturated zone (input variable UHC1) is read for each segment at - the beginning of the first stress period only. - - nstrail : integer - An integer value that is the number of trailing wave increments used to - represent a trailing wave. Trailing waves are used to represent a - decrease in the surface infiltration rate. The value can be increased - to improve mass balance in the unsaturated zone. Values between 10 and - 20 work well and result in unsaturated-zone mass balance errors beneath - streams ranging between 0.001 and 0.01 percent. Please see Smith (1983) - for further details. (default is 10; for MODFLOW-2005 simulations only - when isfropt > 1) - isuzn : integer - An integer value that is the maximum number of vertical cells used to - define the unsaturated zone beneath a stream reach. If ICALC is 1 for - all segments then ISUZN should be set to 1. (default is 1; for - MODFLOW-2005 simulations only when isfropt > 1) - nsfrsets : integer - An integer value that is the maximum number of different sets of - trailing waves used to allocate arrays. Arrays are allocated by - multiplying NSTRAIL by NSFRSETS. A value of 30 is sufficient for - problems where the stream depth varies often. NSFRSETS does not affect - model run time. (default is 30; for MODFLOW-2005 simulations only - when isfropt > 1) - irtflg : integer - An integer value that indicates whether transient streamflow routing is - active. IRTFLG must be specified if NSTRM < 0. If IRTFLG > 0, - streamflow will be routed using the kinematic-wave equation (see USGS - Techniques and Methods 6-D1, p. 68-69); otherwise, IRTFLG should be - specified as 0. Transient streamflow routing is only available for - MODFLOW-2005; IRTFLG can be left blank for MODFLOW-2000 simulations. - (default is 1) - numtim : integer - An integer value equal to the number of sub time steps used to route - streamflow. The time step that will be used to route streamflow will - be equal to the MODFLOW time step divided by NUMTIM. (default is 2; - for MODFLOW-2005 simulations only when irtflg > 0) - weight : float - A real number equal to the time weighting factor used to calculate the - change in channel storage. WEIGHT has a value between 0.5 and 1. Please - refer to equation 83 in USGS Techniques and Methods 6-D1 for further - details. (default is 0.75; for MODFLOW-2005 simulations only when - irtflg > 0) - flwtol : float - A real number equal to the streamflow tolerance for convergence of the - kinematic wave equation used for transient streamflow routing. A value - of 0.00003 cubic meters per second has been used successfully in test - simulations (and would need to be converted to whatever units are being - used in the particular simulation). (default is 0.0001; for - MODFLOW-2005 simulations only when irtflg > 0) - reach_data : recarray - Numpy record array of length equal to nstrm, with columns for each - variable entered in item 2 (see SFR package input instructions). In - following flopy convention, layer, row, column and node number - (for unstructured grids) are zero-based; segment and reach are - one-based. - segment_data : recarray - Numpy record array of length equal to nss, with columns for each - variable entered in items 6a, 6b and 6c (see SFR package input - instructions). Segment numbers are one-based. - dataset_5 : dict of lists - Optional; will be built automatically from segment_data unless - specified. Dict of lists, with key for each stress period. Each list - contains the variables [itmp, irdflag, iptflag]. (see SFR documentation - for more details): - itmp : list of integers (len = NPER) - For each stress period, an integer value for reusing or reading stream - segment data that can change each stress period. If ITMP = 0 then all - stream segment data are defined by Item 4 (NSFRPAR > 0; number of - stream parameters is greater than 0). If ITMP > 0, then stream segment - data are not defined in Item 4 and must be defined in Item 6 below for - a number of segments equal to the value of ITMP. If ITMP < 0, then - stream segment data not defined in Item 4 will be reused from the last - stress period (Item 6 is not read for the current stress period). ITMP - must be defined >= 0 for the first stress period of a simulation. - irdflag : int or list of integers (len = NPER) - For each stress period, an integer value for printing input data - specified for this stress period. If IRDFLG = 0, input data for this - stress period will be printed. If IRDFLG > 0, then input data for this - stress period will not be printed. - iptflag : int or list of integers (len = NPER) - For each stress period, an integer value for printing streamflow- - routing results during this stress period. If IPTFLG = 0, or whenever - the variable ICBCFL or "Save Budget" is specified in Output Control, - the results for specified time steps during this stress period will be - printed. If IPTFLG > 0, then the results during this stress period will - not be printed. - extension : string - Filename extension (default is 'sfr') - unit_number : int - File unit number (default is None). - filenames : str or list of str - Filenames to use for the package and the output files. If - filenames=None the package name will be created using the model name - and package extension and the cbc output and sfr output name will be - created using the model name and .cbc the .sfr.bin/.sfr.out extensions - (for example, modflowtest.cbc, and modflowtest.sfr.bin), if ipakcbc and - istcb2 are numbers greater than zero. If a single string is passed the - package name will be set to the string and other uzf output files will - be set to the model name with the appropriate output file extensions. - To define the names for all package files (input and output) the - length of the list of strings should be 3. Default is None. - - Attributes - ---------- - outlets : nested dictionary - Contains the outlet for each SFR segment; format is - {per: {segment: outlet}} This attribute is created by the - get_outlets() method. - outsegs : dictionary of arrays - Each array is of shape nss rows x maximum of nss columns. The first - column contains the SFR segments, the second column contains the - outsegs of those segments; the third column the outsegs of the outsegs, - and so on, until all outlets have been encountered, or nss is reached. - The latter case indicates circular routing. This attribute is created - by the get_outlets() method. - - Methods - ------- - - See Also - -------- - - Notes - ----- - Parameters are not supported in FloPy. - - MODFLOW-OWHM is not supported. - - The Ground-Water Transport (GWT) process is not supported. - - Limitations on which features are supported... - - Examples - -------- - - >>> import flopy - >>> ml = flopy.modflow.Modflow() - >>> sfr2 = flopy.modflow.ModflowSfr2(ml, ...) - - """ - _options = OrderedDict([("reachinput", - OptionBlock.simple_flag), - ("transroute", - OptionBlock.simple_flag), - ("tabfiles", - OptionBlock.simple_tabfile), - ("lossfactor", {OptionBlock.dtype: np.bool_, - OptionBlock.nested: True, - OptionBlock.n_nested: 1, - OptionBlock.vars: - {"factor": - OptionBlock.simple_float}}), - ("strhc1kh", {OptionBlock.dtype: np.bool_, - OptionBlock.nested: True, - OptionBlock.n_nested: 1, - OptionBlock.vars: - {"factorkh": - OptionBlock.simple_float}}), - ("strhc1kv", {OptionBlock.dtype: np.bool_, - OptionBlock.nested: True, - OptionBlock.n_nested: 1, - OptionBlock.vars: - {"factorkv": - OptionBlock.simple_float}})]) - - nsfrpar = 0 - heading = '# Streamflow-Routing (SFR2) file for MODFLOW, generated by Flopy' - default_value = 0. - # LENUNI = {"u": 0, "f": 1, "m": 2, "c": 3} - len_const = {1: 1.486, 2: 1.0, 3: 100.} - # {"u": 0, "s": 1, "m": 2, "h": 3, "d": 4, "y": 5} - time_const = {1: 1., 2: 60., 3: 3600., 4: 86400., 5: 31557600.} - - def __init__(self, model, nstrm=-2, nss=1, nsfrpar=0, nparseg=0, - const=None, dleak=0.0001, ipakcb=None, istcb2=None, - isfropt=0, - nstrail=10, isuzn=1, nsfrsets=30, irtflg=0, numtim=2, - weight=0.75, flwtol=0.0001, - reach_data=None, - segment_data=None, - channel_geometry_data=None, - channel_flow_data=None, - dataset_5=None, irdflag=0, iptflag=0, - reachinput=False, transroute=False, - tabfiles=False, tabfiles_dict=None, - extension='sfr', unit_number=None, - filenames=None, options=None): - - """ - Package constructor - """ - # set default unit number of one is not specified - if unit_number is None: - unit_number = ModflowSfr2.defaultunit() - - # set filenames - if filenames is None: - filenames = [None, None, None] - elif isinstance(filenames, str): - filenames = [filenames, None, None] - elif isinstance(filenames, list): - if len(filenames) < 3: - for _ in range(len(filenames), 3): - filenames.append(None) - - # update external file information with cbc output, if necessary - if ipakcb is not None: - fname = filenames[1] - model.add_output_file(ipakcb, fname=fname, - package=ModflowSfr2.ftype()) - else: - ipakcb = 0 - - # add sfr flow output file - if istcb2 is not None: - if abs(istcb2) > 0: - binflag = False - ext = 'out' - if istcb2 < 0: - binflag = True - ext = 'bin' - fname = filenames[2] - if fname is None: - fname = model.name + '.sfr.{}'.format(ext) - model.add_output_file(abs(istcb2), fname=fname, - binflag=binflag, - package=ModflowSfr2.ftype()) - else: - istcb2 = 0 - - # Fill namefile items - name = [ModflowSfr2.ftype()] - units = [unit_number] - extra = [''] - - # set package name - fname = [filenames[0]] - - # Call ancestor's init to set self.parent, extension, name and unit number - Package.__init__(self, model, extension=extension, name=name, - unit_number=units, extra=extra, filenames=fname) - - self.url = 'sfr2.htm' - self._graph = None # dict of routing connections - - # Dataset 0 - self.heading = '# {} package for '.format(self.name[0]) + \ - ' {}, '.format(model.version_types[model.version]) + \ - 'generated by Flopy.' - - # Dataset 1a and 1b - self.reachinput = reachinput - self.transroute = transroute - self.tabfiles = tabfiles - self.tabfiles_dict = tabfiles_dict - self.numtab = 0 if not tabfiles else len(tabfiles_dict) - self.maxval = np.max([tb['numval'] for tb in - tabfiles_dict.values()]) if self.numtab > 0 else 0 - - if options is None: - if (reachinput, transroute, tabfiles) != (False, False, False): - options = OptionBlock("", ModflowSfr2, block=False) - - self.options = options - - # Dataset 1c. - # number of reaches, negative value is flag for unsat. - # flow beneath streams and/or transient routing - self._nstrm = np.sign(nstrm) * len( - reach_data) if reach_data is not None else nstrm - if segment_data is not None: - # segment_data is a zero-d array - if not isinstance(segment_data, dict): - if len(segment_data.shape) == 0: - segment_data = np.atleast_1d(segment_data) - nss = len(segment_data) - segment_data = {0: segment_data} - nss = len(set(reach_data["iseg"])) - else: - pass - # use atleast_1d for length since segment_data might be a 0D array - # this seems to be OK, because self.segment_data is produced by the constructor (never 0D) - self.nsfrpar = nsfrpar - self.nparseg = nparseg - # conversion factor used in calculating stream depth for stream reach (icalc = 1 or 2) - self._const = const if const is not None else None - self.dleak = dleak # tolerance level of stream depth used in computing leakage - - self.ipakcb = ipakcb - # flag; unit number for writing table of SFR output to text file - self.istcb2 = istcb2 - - # if nstrm < 0 - # defines the format of the input data and whether or not unsaturated flow is simulated - self.isfropt = isfropt - - # if isfropt > 1 - # number of trailing wave increments - self.nstrail = nstrail - # max number of vertical cells used to define unsat. zone - self.isuzn = isuzn - # max number trailing waves sets - self.nsfrsets = nsfrsets - - # if nstrm < 0 (MF-2005 only) - # switch for transient streamflow routing (> 0 = kinematic wave) - self.irtflg = irtflg - # if irtflg > 0 - # number of subtimesteps used for routing - self.numtim = numtim - # time weighting factor used to calculate the change in channel storage - self.weight = weight - # streamflow tolerance for convergence of the kinematic wave equation - self.flwtol = flwtol - - # Dataset 2. - self.reach_data = self.get_empty_reach_data(np.abs(self._nstrm)) - if reach_data is not None: - for n in reach_data.dtype.names: - self.reach_data[n] = reach_data[n] - - # assign node numbers if there are none (structured grid) - if np.diff( - self.reach_data.node).max() == 0 and self.parent.has_package( - 'DIS'): - # first make kij list - lrc = np.array(self.reach_data)[['k', 'i', 'j']].tolist() - self.reach_data['node'] = self.parent.dis.get_node(lrc) - # assign unique ID and outreach columns to each reach - self.reach_data.sort(order=['iseg', 'ireach']) - new_cols = {'reachID': np.arange(1, len(self.reach_data) + 1), - 'outreach': np.zeros(len(self.reach_data))} - for k, v in new_cols.items(): - if k not in self.reach_data.dtype.names: - recfunctions.append_fields(self.reach_data, names=k, data=v, - asrecarray=True) - # create a stress_period_data attribute to enable parent functions (e.g. plot) - self.stress_period_data = MfList(self, self.reach_data, - dtype=self.reach_data.dtype) - - # Datasets 4 and 6. - - # list of values that indicate segments outside of the model - # (depending on how SFR package was constructed) - self.not_a_segment_values = [999999] - - self._segments = None - self.segment_data = {0: self.get_empty_segment_data(nss)} - if segment_data is not None: - for i in segment_data.keys(): - nseg = len(segment_data[i]) - self.segment_data[i] = self.get_empty_segment_data(nseg) - for n in segment_data[i].dtype.names: - # inds = (segment_data[i]['nseg'] -1).astype(int) - self.segment_data[i][n] = segment_data[i][n] - # compute outreaches if nseg and outseg columns have non-default values - if np.diff(self.reach_data.iseg).max() != 0 and \ - np.max(list(set(self.graph.keys()))) != 0 \ - and np.max(list(set(self.graph.values()))) != 0: - if len(self.graph) == 1: - self.segment_data[0]['nseg'] = 1 - self.reach_data['iseg'] = 1 - - consistent_seg_numbers = len(set(self.reach_data.iseg).difference( - set(self.graph.keys()))) == 0 - if not consistent_seg_numbers: - warnings.warn( - "Inconsistent segment numbers of reach_data and segment_data") - - # first convert any not_a_segment_values to 0 - for v in self.not_a_segment_values: - self.segment_data[0].outseg[ - self.segment_data[0].outseg == v] = 0 - self.set_outreaches() - self.channel_geometry_data = channel_geometry_data - self.channel_flow_data = channel_flow_data - - # Dataset 5 - # set by property from segment_data unless specified manually - self._dataset_5 = dataset_5 - self.irdflag = irdflag - self.iptflag = iptflag - - # Attributes not included in SFR package input - # dictionary of arrays; see Attributes section of documentation - self.outsegs = {} - # nested dictionary of format {per: {segment: outlet}} - self.outlets = {} - # input format checks: - assert isfropt in [0, 1, 2, 3, 4, 5] - - # derived attributes - self._paths = None - - self.parent.add_package(self) - - def __setattr__(self, key, value): - if key == "nstrm": - super(ModflowSfr2, self). \ - __setattr__("_nstrm", value) - elif key == "dataset_5": - super(ModflowSfr2, self). \ - __setattr__("_dataset_5", value) - elif key == "segment_data": - super(ModflowSfr2, self). \ - __setattr__("segment_data", value) - self._dataset_5 = None - elif key == "const": - super(ModflowSfr2, self). \ - __setattr__("_const", value) - else: # return to default behavior of pakbase - super(ModflowSfr2, self).__setattr__(key, value) - - @property - def const(self): - if self._const is None: - const = self.len_const[self.parent.dis.lenuni] * \ - self.time_const[self.parent.dis.itmuni] - else: - const = self._const - return const - - @property - def nss(self): - # number of stream segments - return len(set(self.reach_data["iseg"])) - - @property - def nstrm(self): - return np.sign(self._nstrm) * len(self.reach_data) - - @property - def nper(self): - nper = self.parent.nrow_ncol_nlay_nper[-1] - nper = 1 if nper == 0 else nper # otherwise iterations from 0, nper won't run - return nper - - @property - def dataset_5(self): - """ - auto-update itmp so it is consistent with segment_data. - """ - ds5 = self._dataset_5 - nss = self.nss - if ds5 is None: - irdflag = self._get_flag('irdflag') - iptflag = self._get_flag('iptflag') - ds5 = {0: [nss, irdflag[0], iptflag[0]]} - for per in range(1, self.nper): - sd = self.segment_data.get(per, None) - if sd is None: - ds5[per] = [-nss, irdflag[per], iptflag[per]] - else: - ds5[per] = [len(sd), irdflag[per], iptflag[per]] - return ds5 - - @property - def graph(self): - """Dictionary of routing connections between segments.""" - if self._graph is None: - self._graph = self._make_graph() - return self._graph - - @property - def paths(self): - if self._paths is None: - self._set_paths() - return self._paths - # check to see if routing in segment data was changed - nseg = np.array(sorted(self._paths.keys()), dtype=int) - nseg = nseg[nseg > 0].copy() - outseg = np.array([self._paths[k][1] for k in nseg]) - existing_nseg = sorted(list(self.graph.keys())) - existing_outseg = [self.graph[k] for k in existing_nseg] - if not np.array_equal(nseg, existing_nseg) or \ - not np.array_equal(outseg, existing_outseg): - self._set_paths() - return self._paths - - @property - def df(self): - if pd: - return pd.DataFrame(self.reach_data) - else: - msg = 'ModflowSfr2.df: pandas not available' - raise ImportError(msg) - - def _make_graph(self): - # get all segments and their outseg - graph = {} - for recarray in self.segment_data.values(): - graph.update(dict(zip(recarray['nseg'], recarray['outseg']))) - - outlets = set(graph.values()).difference( - set(graph.keys())) # including lakes - graph.update({o: 0 for o in outlets if o != 0}) - return graph - - def _set_paths(self): - graph = self.graph - self._paths = {seg: find_path(graph, seg) for seg in graph.keys()} - - def _get_flag(self, flagname): - """ - populate values for each stress period - """ - flg = self.__dict__[flagname] - flg = [flg] if np.isscalar(flg) else flg - if len(flg) < self.nper: - return flg + [flg[-1]] * (self.nper - len(flg)) - return flg - - @staticmethod - def get_empty_reach_data(nreaches=0, aux_names=None, structured=True, - default_value=0.): - # get an empty recarray that corresponds to dtype - dtype = ModflowSfr2.get_default_reach_dtype(structured=structured) - if aux_names is not None: - dtype = Package.add_to_dtype(dtype, aux_names, np.float32) - d = create_empty_recarray(nreaches, dtype, default_value=default_value) - d['reachID'] = np.arange(1, nreaches + 1) - return d - - @staticmethod - def get_empty_segment_data(nsegments=0, aux_names=None, default_value=0.): - # get an empty recarray that corresponds to dtype - dtype = ModflowSfr2.get_default_segment_dtype() - if aux_names is not None: - dtype = Package.add_to_dtype(dtype, aux_names, np.float32) - d = create_empty_recarray(nsegments, dtype, - default_value=default_value) - return d - - @staticmethod - def get_default_reach_dtype(structured=True): - if structured: - # include node column for structured grids (useful for indexing) - return np.dtype([('node', np.int), - ('k', np.int), - ('i', np.int), - ('j', np.int), - ('iseg', np.int), - ('ireach', np.int), - ('rchlen', np.float32), - ('strtop', np.float32), - ('slope', np.float32), - ('strthick', np.float32), - ('strhc1', np.float32), - ('thts', np.float32), - ('thti', np.float32), - ('eps', np.float32), - ('uhc', np.float32), - ('reachID', np.int), - ('outreach', np.int)]) - else: - return np.dtype([('node', np.int), - ('iseg', np.int), - ('ireach', np.int), - ('rchlen', np.float32), - ('strtop', np.float32), - ('slope', np.float32), - ('strthick', np.float32), - ('strhc1', np.float32), - ('thts', np.float32), - ('thti', np.float32), - ('eps', np.float32), - ('uhc', np.float32), - ('reachID', np.int), - ('outreach', np.int)]) - - @staticmethod - def get_default_segment_dtype(): - return np.dtype([('nseg', np.int), - ('icalc', np.int), - ('outseg', np.int), - ('iupseg', np.int), - ('iprior', np.int), - ('nstrpts', np.int), - ('flow', np.float32), - ('runoff', np.float32), - ('etsw', np.float32), - ('pptsw', np.float32), - ('roughch', np.float32), - ('roughbk', np.float32), - ('cdpth', np.float32), - ('fdpth', np.float32), - ('awdth', np.float32), - ('bwdth', np.float32), - ('hcond1', np.float32), - ('thickm1', np.float32), - ('elevup', np.float32), - ('width1', np.float32), - ('depth1', np.float32), - ('thts1', np.float32), - ('thti1', np.float32), - ('eps1', np.float32), - ('uhc1', np.float32), - ('hcond2', np.float32), - ('thickm2', np.float32), - ('elevdn', np.float32), - ('width2', np.float32), - ('depth2', np.float32), - ('thts2', np.float32), - ('thti2', np.float32), - ('eps2', np.float32), - ('uhc2', np.float32)]) - - @staticmethod - def load(f, model, nper=None, gwt=False, nsol=1, ext_unit_dict=None): - - if model.verbose: - sys.stdout.write('loading sfr2 package file...\n') - - tabfiles = False - tabfiles_dict = {} - transroute = False - reachinput = False - structured = model.structured - if nper is None: - nper = model.nper - nper = 1 if nper == 0 else nper # otherwise iterations from 0, nper won't run - - openfile = not hasattr(f, 'read') - if openfile: - filename = f - f = open(filename, 'r') - - # Item 0 -- header - while True: - line = f.readline() - if line[0] != '#': - break - - options = None - if model.version == "mfnwt" and "options" in line.lower(): - options = OptionBlock.load_options(f, ModflowSfr2) - - else: - query = ("reachinput", "transroute", "tabfiles", - "lossfactor", "strhc1kh", "strhc1kv") - for i in query: - if i in line.lower(): - options = OptionBlock(line.lower().strip(), - ModflowSfr2, block=False) - break - - if options is not None: - line = f.readline() - # check for 1b in modflow-2005 - if "tabfile" in line.lower(): - t = line.strip().split() - options.tabfiles = True - options.numtab = int(t[1]) - options.maxval = int(t[2]) - line = f.readline() - - # set varibles to be passed to class args - transroute = options.transroute - reachinput = options.reachinput - tabfiles = isinstance(options.tabfiles, np.ndarray) - numtab = options.numtab if tabfiles else 0 - - # item 1c - nstrm, nss, nsfrpar, nparseg, const, dleak, ipakcb, istcb2, \ - isfropt, nstrail, isuzn, nsfrsets, \ - irtflg, numtim, weight, flwtol, option = _parse_1c(line, - reachinput=reachinput, - transroute=transroute) - - # item 2 - # set column names, dtypes - names = _get_item2_names(nstrm, reachinput, isfropt, structured) - dtypes = [d for d in ModflowSfr2.get_default_reach_dtype().descr - if d[0] in names] - - lines = [] - for i in range(abs(nstrm)): - line = f.readline() - line = line_parse(line) - ireach = tuple(map(float, line[:len(dtypes)])) - lines.append(ireach) - - tmp = np.array(lines, dtype=dtypes) - # initialize full reach_data array with all possible columns - reach_data = ModflowSfr2.get_empty_reach_data(len(lines)) - for n in names: - reach_data[n] = tmp[ - n] # not sure if there's a way to assign multiple columns - - # zero-based convention - inds = ['k', 'i', 'j'] if structured else ['node'] - _markitzero(reach_data, inds) - - # items 3 and 4 are skipped (parameters not supported) - # item 5 - segment_data = {} - channel_geometry_data = {} - channel_flow_data = {} - dataset_5 = {} - aux_variables = {} # not sure where the auxiliary variables are supposed to go - for i in range(0, nper): - # Dataset 5 - dataset_5[i] = _get_dataset(f.readline(), [-1, 0, 0, 0]) - itmp = dataset_5[i][0] - if itmp > 0: - # Item 6 - current = ModflowSfr2.get_empty_segment_data(nsegments=itmp, - aux_names=option) - # container to hold any auxiliary variables - current_aux = {} - # these could also be implemented as structured arrays with a column for segment number - current_6d = {} - current_6e = {} - # print(i,icalc,nstrm,isfropt,reachinput) - for j in range(itmp): - dataset_6a = _parse_6a(f.readline(), option) - current_aux[j] = dataset_6a[-1] - dataset_6a = dataset_6a[:-1] # drop xyz - icalc = dataset_6a[1] - # link dataset 6d, 6e by nseg of dataset_6a - temp_nseg = dataset_6a[0] - # datasets 6b and 6c aren't read under the conditions below - # see table under description of dataset 6c, - # in the MODFLOW Online Guide for a description - # of this logic - # https://water.usgs.gov/ogw/modflow-nwt/MODFLOW-NWT-Guide/sfr.htm - dataset_6b, dataset_6c = (0,) * 9, (0,) * 9 - if not (isfropt in [2, 3] and icalc == 1 and i > 1) and \ - not (isfropt in [1, 2, 3] and icalc >= 2): - dataset_6b = _parse_6bc(f.readline(), icalc, nstrm, - isfropt, - reachinput, per=i) - dataset_6c = _parse_6bc(f.readline(), icalc, nstrm, - isfropt, - reachinput, per=i) - current[j] = dataset_6a + dataset_6b + dataset_6c - - if icalc == 2: - # ATL: not sure exactly how isfropt logic functions for this - # dataset 6d description suggests that this line isn't read for isfropt > 1 - # but description of icalc suggest that icalc=2 (8-point channel) can be used with any isfropt - if i == 0 or nstrm > 0 and not reachinput or isfropt <= 1: - dataset_6d = [] - for _ in range(2): - dataset_6d.append( - _get_dataset(f.readline(), [0.0] * 8)) - # dataset_6d.append(list(map(float, f.readline().strip().split()))) - current_6d[temp_nseg] = dataset_6d - if icalc == 4: - nstrpts = dataset_6a[5] - dataset_6e = [] - for _ in range(3): - dataset_6e.append( - _get_dataset(f.readline(), [0.0] * nstrpts)) - current_6e[temp_nseg] = dataset_6e - - segment_data[i] = current - aux_variables[j + 1] = current_aux - if len(current_6d) > 0: - channel_geometry_data[i] = current_6d - if len(current_6e) > 0: - channel_flow_data[i] = current_6e - - if tabfiles and i == 0: - for j in range(numtab): - segnum, numval, iunit = map(int, - f.readline().strip().split()) - tabfiles_dict[segnum] = {'numval': numval, 'inuit': iunit} - - else: - continue - - if openfile: - f.close() - - # determine specified unit number - unitnumber = None - filenames = [None, None, None] - if ext_unit_dict is not None: - for key, value in ext_unit_dict.items(): - if value.filetype == ModflowSfr2.ftype(): - unitnumber = key - filenames[0] = os.path.basename(value.filename) - - if ipakcb > 0: - if key == ipakcb: - filenames[1] = os.path.basename(value.filename) - model.add_pop_key_list(key) - - if abs(istcb2) > 0: - if key == abs(istcb2): - filenames[2] = os.path.basename(value.filename) - model.add_pop_key_list(key) - - return ModflowSfr2(model, nstrm=nstrm, nss=nss, nsfrpar=nsfrpar, - nparseg=nparseg, const=const, dleak=dleak, - ipakcb=ipakcb, istcb2=istcb2, - isfropt=isfropt, nstrail=nstrail, isuzn=isuzn, - nsfrsets=nsfrsets, irtflg=irtflg, - numtim=numtim, weight=weight, flwtol=flwtol, - reach_data=reach_data, - segment_data=segment_data, - dataset_5=dataset_5, - channel_geometry_data=channel_geometry_data, - channel_flow_data=channel_flow_data, - reachinput=reachinput, transroute=transroute, - tabfiles=tabfiles, tabfiles_dict=tabfiles_dict, - unit_number=unitnumber, filenames=filenames, - options=options) - - def check(self, f=None, verbose=True, level=1, checktype=None): - """ - Check sfr2 package data for common errors. - - Parameters - ---------- - f : str or file handle - String defining file name or file handle for summary file - of check method output. If a string is passed a file handle - is created. If f is None, check method does not write - results to a summary file. (default is None) - verbose : bool - Boolean flag used to determine if check method results are - written to the screen - level : int - Check method analysis level. If level=0, summary checks are - performed. If level=1, full checks are performed. - - Returns - ------- - None - - Examples - -------- - - >>> import flopy - >>> m = flopy.modflow.Modflow.load('model.nam') - >>> m.sfr2.check() - """ - self._graph = None # remake routing graph from segment data - chk = check(self, verbose=verbose, level=level) - chk.for_nans() - chk.numbering() - chk.routing() - chk.overlapping_conductance() - chk.elevations() - chk.slope() - - if f is not None: - if isinstance(f, str): - pth = os.path.join(self.parent.model_ws, f) - f = open(pth, 'w') - f.write('{}\n'.format(chk.txt)) - # f.close() - return chk - - def assign_layers(self, adjust_botms=False, pad=1.): - """ - Assigns the appropriate layer for each SFR reach, - based on cell bottoms at location of reach. - - Parameters - ---------- - adjust_botms : bool - Streambed bottom elevations below the model bottom - will cause an error in MODFLOW. If True, adjust - bottom elevations in lowest layer of the model - so they are at least pad distance below any co-located - streambed elevations. - pad : scalar - Minimum distance below streambed bottom to set - any conflicting model bottom elevations. - - Notes - ----- - Streambed bottom = strtop - strthick - This routine updates the elevations in the botm array - of the flopy.model.ModflowDis instance. To produce a - new DIS package file, model.write() or flopy.model.ModflowDis.write() - must be run. - - """ - streambotms = self.reach_data.strtop - self.reach_data.strthick - i, j = self.reach_data.i, self.reach_data.j - layers = self.parent.dis.get_layer(i, j, streambotms) - - # check against model bottom - logfile = 'sfr_botm_conflicts.chk' - mbotms = self.parent.dis.botm.array[-1, i, j] - below = streambotms <= mbotms - below_i = self.reach_data.i[below] - below_j = self.reach_data.j[below] - l = [] - header = '' - if np.any(below): - print('Warning: SFR streambed elevations below model bottom. ' - 'See sfr_botm_conflicts.chk') - if not adjust_botms: - l += [below_i, - below_j, - mbotms[below], - streambotms[below]] - header += 'i,j,model_botm,streambed_botm' - else: - print('Fixing elevation conflicts...') - botm = self.parent.dis.botm.array.copy() - for ib, jb in zip(below_i, below_j): - inds = (self.reach_data.i == ib) & ( - self.reach_data.j == jb) - botm[-1, ib, jb] = streambotms[inds].min() - pad - # l.append(botm[-1, ib, jb]) - # botm[-1, below_i, below_j] = streambotms[below] - pad - l.append(botm[-1, below_i, below_j]) - header += ',new_model_botm' - self.parent.dis.botm = botm - mbotms = self.parent.dis.botm.array[-1, i, j] - assert not np.any(streambotms <= mbotms) - print('New bottom array assigned to Flopy DIS package ' - 'instance.\nRun flopy.model.write() or ' - 'flopy.model.ModflowDis.write() to write new DIS file.') - header += '\n' - - with open(logfile, 'w') as log: - log.write(header) - a = np.array(l).transpose() - for line in a: - log.write(','.join(map(str, line)) + '\n') - self.reach_data['k'] = layers - - def deactivate_ibound_above(self): - """ - Sets ibound to 0 for all cells above active SFR cells. - - Parameters - ---------- - none - - Notes - ----- - This routine updates the ibound array of the flopy.model.ModflowBas6 - instance. To produce a new BAS6 package file, model.write() or - flopy.model.ModflowBas6.write() must be run. - - """ - ib = self.parent.bas6.ibound.array - deact_lays = [list(range(i)) for i in self.reach_data.k] - for ks, i, j in zip(deact_lays, self.reach_data.i, self.reach_data.j): - for k in ks: - ib[k, i, j] = 0 - self.parent.bas6.ibound = ib - - def get_outlets(self, level=0, verbose=True): - """ - Traces all routing connections from each headwater to the outlet. - """ - txt = '' - for per in range(self.nper): - if per > 0 > self.dataset_5[per][ - 0]: # skip stress periods where seg data not defined - continue - # segments = self.segment_data[per].nseg - # outsegs = self.segment_data[per].outseg - # - # all_outsegs = np.vstack([segments, outsegs]) - # max_outseg = all_outsegs[-1].max() - # knt = 1 - # while max_outseg > 0: - # - # nextlevel = np.array([outsegs[s - 1] if s > 0 and s < 999999 else 0 - # for s in all_outsegs[-1]]) - # - # all_outsegs = np.vstack([all_outsegs, nextlevel]) - # max_outseg = nextlevel.max() - # if max_outseg == 0: - # break - # knt += 1 - # if knt > self.nss: - # # subset outsegs map to only include rows with outseg number > 0 in last column - # circular_segs = all_outsegs.T[all_outsegs[-1] > 0] - # - # # only retain one instance of each outseg number at iteration=nss - # vals = [] # append outseg values to vals after they've appeared once - # mask = [(True, vals.append(v))[0] - # if v not in vals - # else False for v in circular_segs[-1]] - # circular_segs = circular_segs[:, np.array(mask)] - # - # # cull the circular segments array to remove duplicate instances of routing circles - # circles = [] - # duplicates = [] - # for i in range(np.shape(circular_segs)[0]): - # # find where values in the row equal the last value; - # # record the index of the second to last instance of last value - # repeat_start_ind = np.where(circular_segs[i] == circular_segs[i, -1])[0][-2:][0] - # # use that index to slice out the repeated segment sequence - # circular_seq = circular_segs[i, repeat_start_ind:].tolist() - # # keep track of unique sequences of repeated segments - # if set(circular_seq) not in circles: - # circles.append(set(circular_seq)) - # duplicates.append(False) - # else: - # duplicates.append(True) - # circular_segs = circular_segs[~np.array(duplicates), :] - # - # txt += '{0} instances where an outlet was not found after {1} consecutive segments!\n' \ - # .format(len(circular_segs), self.nss) - # if level == 1: - # txt += '\n'.join([' '.join(map(str, row)) for row in circular_segs]) + '\n' - # else: - # f = 'circular_routing.csv' - # np.savetxt(f, circular_segs, fmt='%d', delimiter=',', header=txt) - # txt += 'See {} for details.'.format(f) - # if verbose: - # print(txt) - # break - # # the array of segment sequence is useful for other other operations, - # # such as plotting elevation profiles - # self.outsegs[per] = all_outsegs - # - # use graph instead of above loop - nrow = len(self.segment_data[per].nseg) - ncol = np.max( - [len(v) if v is not None else 0 for v in self.paths.values()]) - all_outsegs = np.zeros((nrow, ncol), dtype=int) - for i, (k, v) in enumerate(self.paths.items()): - if k > 0: - all_outsegs[i, :len(v)] = v - all_outsegs.sort(axis=0) - self.outsegs[per] = all_outsegs - # create a dictionary listing outlets associated with each segment - # outlet is the last value in each row of outseg array that is != 0 or 999999 - # self.outlets[per] = {i + 1: r[(r != 0) & (r != 999999)][-1] - # if len(r[(r != 0) & (r != 999999)]) > 0 - # else i + 1 - # for i, r in enumerate(all_outsegs.T)} - self.outlets[per] = {k: self.paths[k][-1] if k in self.paths - else k for k in self.segment_data[per].nseg} - return txt - - def reset_reaches(self): - self.reach_data.sort(order=['iseg', 'ireach']) - reach_data = self.reach_data - segment_data = list(set(self.reach_data.iseg))# self.segment_data[0] - reach_counts = np.bincount(reach_data.iseg)[1:] - reach_counts = dict(zip(range(1, len(reach_counts) + 1), - reach_counts)) - ireach = [list(range(1, reach_counts[s] + 1)) - for s in segment_data] - ireach = np.concatenate(ireach) - self.reach_data['ireach'] = ireach - - def set_outreaches(self): - """ - Determine the outreach for each SFR reach (requires a reachID - column in reach_data). Uses the segment routing specified for the - first stress period to route reaches between segments. - """ - self.reach_data.sort(order=['iseg', 'ireach']) - # ensure that each segment starts with reach 1 - self.reset_reaches() - # ensure that all outsegs are segments, outlets, or negative (lakes) - self.repair_outsegs() - rd = self.reach_data - outseg = self.graph - reach1IDs = dict(zip(rd[rd.ireach == 1].iseg, - rd[rd.ireach == 1].reachID)) - outreach = [] - for i in range(len(rd)): - # if at the end of reach data or current segment - if i + 1 == len(rd) or rd.ireach[i + 1] == 1: - nextseg = outseg[rd.iseg[i]] # get next segment - if nextseg > 0: # current reach is not an outlet - nextrchid = reach1IDs[ - nextseg] # get reach 1 of next segment - else: - nextrchid = 0 - else: # otherwise, it's the next reachID - nextrchid = rd.reachID[i + 1] - outreach.append(nextrchid) - self.reach_data['outreach'] = outreach - - def get_slopes(self, default_slope=0.001, minimum_slope=0.0001, - maximum_slope=1.): - """ - Compute slopes by reach using values in strtop (streambed top) - and rchlen (reach length) columns of reach_data. The slope for a - reach n is computed as strtop(n+1) - strtop(n) / rchlen(n). - Slopes for outlet reaches are set equal to a default value - (default_slope). Populates the slope column in reach_data. - - Parameters - ---------- - default_slope : float - Slope value applied to outlet reaches - (where water leaves the model). Default value is 0.001 - minimum_slope : float - Assigned to reaches with computed slopes less than this value. - This ensures that the Manning's equation won't produce unreasonable - values of stage (in other words, that stage is consistent with - assumption that streamflow is primarily drive by the streambed - gradient). Default value is 0.0001. - maximum_slope : float - Assigned to reaches with computed slopes more than this value. - Default value is 1. - - """ - # compute outreaches if they aren't there already - if np.diff(self.reach_data.outreach).max() == 0: - self.set_outreaches() - rd = self.reach_data - elev = dict(zip(rd.reachID, rd.strtop)) - dist = dict(zip(rd.reachID, rd.rchlen)) - dnelev = {rid: elev[rd.outreach[i]] if rd.outreach[i] != 0 - else -9999 for i, rid in enumerate(rd.reachID)} - slopes = np.array( - [(elev[i] - dnelev[i]) / dist[i] if dnelev[i] != -9999 - else default_slope for i in rd.reachID]) - slopes[slopes < minimum_slope] = minimum_slope - slopes[slopes > maximum_slope] = maximum_slope - self.reach_data['slope'] = slopes - - def get_upsegs(self): - """ - From segment_data, returns nested dict of all upstream segments by - segment, by stress period. - - Returns - ------- - all_upsegs : dict - Nested dictionary of form - {stress period: {segment: [list of upsegs]}} - - Notes - ----- - This method will not work if there are instances of circular routing. - - """ - all_upsegs = {} - for per in range(self.nper): - if per > 0 > self.dataset_5[per][ - 0]: # skip stress periods where seg data not defined - continue - segment_data = self.segment_data[per] - - # make a list of adjacent upsegments keyed to outseg list in Mat2 - upsegs = {o: segment_data.nseg[segment_data.outseg == o].tolist() - for o in np.unique(segment_data.outseg)} - - outsegs = [k for k in list(upsegs.keys()) if - k > 0] # exclude 0, which is the outlet designator - - # for each outseg key, for each upseg, check for more upsegs, - # append until headwaters has been reached - for outseg in outsegs: - - up = True - upsegslist = upsegs[outseg] - while up: - added_upsegs = [] - for us in upsegslist: - if us in outsegs: - added_upsegs += upsegs[us] - if len(added_upsegs) == 0: - up = False - break - else: - upsegslist = added_upsegs - upsegs[outseg] += added_upsegs - - # the above algorithm is recursive, so lower order streams - # get duplicated many times use a set to get unique upsegs - all_upsegs[per] = {u: list(set(upsegs[u])) for u in outsegs} - return all_upsegs - - def get_variable_by_stress_period(self, varname): - - dtype = [] - all_data = np.zeros((self.nss, self.nper), dtype=float) - for per in range(self.nper): - inds = self.segment_data[per].nseg - 1 - all_data[inds, per] = self.segment_data[per][varname] - dtype.append(('{}{}'.format(varname, per), float)) - isvar = all_data.sum(axis=1) != 0 - ra = np.core.records.fromarrays(all_data[isvar].transpose().copy(), - dtype=dtype) - segs = self.segment_data[0].nseg[isvar] - isseg = np.array( - [True if s in segs else False for s in self.reach_data.iseg]) - isinlet = isseg & (self.reach_data.ireach == 1) - rd = np.array(self.reach_data[isinlet])[ - ['k', 'i', 'j', 'iseg', 'ireach']] - ra = recfunctions.merge_arrays([rd, ra], flatten=True, usemask=False) - return ra.view(np.recarray) - - def repair_outsegs(self): - isasegment = np.in1d(self.segment_data[0].outseg, - self.segment_data[0].nseg) - isasegment = isasegment | (self.segment_data[0].outseg < 0) - self.segment_data[0]['outseg'][~isasegment] = 0. - self._graph = None - - def renumber_segments(self): - """ - Renumber segments so that segment numbering is continuous and always - increases in the downstream direction. This may speed convergence of - the NWT solver in some situations. - - Returns - ------- - r : dictionary mapping old segment numbers to new - """ - - nseg = sorted(list(self.graph.keys())) - outseg = [self.graph[k] for k in nseg] - - # explicitly fix any gaps in the numbering - # (i.e. from removing segments) - nseg2 = np.arange(1, len(nseg) + 1) - # intermediate mapping that - r1 = dict(zip(nseg, nseg2)) - r1[0] = 0 - outseg2 = np.array([r1[s] for s in outseg]) - - # function re-assigning upseg numbers consecutively at one level - # relative to outlet(s). Counts down from the number of segments - def reassign_upsegs(r, nexts, upsegs): - nextupsegs = [] - for u in upsegs: - r[u] = nexts if u > 0 else u # handle lakes - nexts -= 1 - nextupsegs += list(nseg2[outseg2 == u]) - return r, nexts, nextupsegs - - ns = len(nseg) - - # start at outlets with nss; - # renumber upsegs consecutively at each level - # until all headwaters have been reached - nexts = ns - r2 = {0: 0} - nextupsegs = nseg2[outseg2 == 0] - for _ in range(ns): - r2, nexts, nextupsegs = reassign_upsegs(r2, nexts, nextupsegs) - if len(nextupsegs) == 0: - break - # map original segment numbers to new numbers - r = {k: r2.get(v, v) for k, v in r1.items()} - - # renumber segments in all stress period data - for per in self.segment_data.keys(): - self.segment_data[per]['nseg'] = [r.get(s, s) for s in - self.segment_data[per].nseg] - self.segment_data[per]['outseg'] = [r.get(s, s) for s in - self.segment_data[per].outseg] - self.segment_data[per].sort(order='nseg') - nseg = self.segment_data[per].nseg - outseg = self.segment_data[per].outseg - inds = (outseg > 0) & (nseg > outseg) - assert not np.any(inds) - assert len(self.segment_data[per]['nseg']) == \ - self.segment_data[per]['nseg'].max() - self._graph = None # reset routing dict - - # renumber segments in reach_data - self.reach_data['iseg'] = [r.get(s, s) for s in self.reach_data.iseg] - self.reach_data.sort(order=['iseg', 'ireach']) - self.reach_data['reachID'] = np.arange(1, len(self.reach_data) + 1) - self.set_outreaches() # reset the outreaches to ensure continuity - - # renumber segments in other datasets - def renumber_channel_data(d): - if d is not None: - d2 = {} - for k, v in d.items(): - d2[k] = {} - for s, vv in v.items(): - d2[k][r[s]] = vv - else: - d2 = None - return d2 - - self.channel_geometry_data = renumber_channel_data( - self.channel_geometry_data) - self.channel_flow_data = renumber_channel_data(self.channel_flow_data) - return r - - def plot_path(self, start_seg=None, end_seg=0, plot_segment_lines=True): - """ - Plot a profile of streambed elevation and model top - along a path of segments. - - Parameters - ---------- - start_seg : int - Number of first segment in path. - end_seg : int - Number of last segment in path (defaults to 0/outlet). - plot_segment_lines : bool - Controls plotting of segment end locations along profile. - (default True) - - Returns - ------- - ax : matplotlib.axes._subplots.AxesSubplot object - """ - import matplotlib.pyplot as plt - if not pd: - msg = 'ModflowSfr2.plot_path: pandas not available' - raise ImportError(msg) - - df = self.df - m = self.parent - mfunits = m.sr.model_length_units - - to_miles = {'feet': 1 / 5280., 'meters': 1 / (.3048 * 5280.)} - - # slice the path - path = np.array(self.paths[start_seg]) - endidx = np.where(path == end_seg)[0] - endidx = endidx if len(endidx) > 0 else None - path = path[:np.squeeze(endidx)] - path = [s for s in path if s > 0] # skip lakes for now - - # get the values - groups = df.groupby('iseg') - tmp = pd.concat([groups.get_group(s) for s in path]) - tops = m.dis.top.array[tmp.i, tmp.j] - dist = np.cumsum(tmp.rchlen.values) * to_miles.get(mfunits, 1.) - - # segment starts - starts = dist[np.where(tmp.ireach.values == 1)[0]] - - ax = plt.subplots(figsize=(11, 8.5))[-1] - ax.plot(dist, tops, label='Model top') - ax.plot(dist, tmp.strtop, label='Streambed top') - ax.set_xlabel('Distance along path, in miles') - ax.set_ylabel('Elevation, in {}'.format(mfunits)) - ymin, ymax = ax.get_ylim() - plt.autoscale(False) - - if plot_segment_lines: # plot segment ends as vertical lines - ax.vlines(x=starts, ymin=ymin, ymax=ymax, lw=.1, alpha=.1, - label='Gray lines indicate\nsegment ends.') - ax.legend() - - # plot selected segment numbers along path - stride = np.floor(len(dist) / 10) - stride = 1 if stride < 1 else stride - inds = np.arange(0, len(dist), stride, dtype=int) - plot_segnumbers = tmp.iseg.values[inds] - xlocs = dist[inds] - pad = 0.04 * (ymax - ymin) - for x, sn in zip(xlocs, plot_segnumbers): - ax.text(x, ymin + pad, '{}'.format(sn), va='top') - ax.text(xlocs[0], ymin + pad * 1.2, 'Segment numbers:', va='bottom', - fontweight='bold') - ax.text(dist[-1], ymin + pad, '{}'.format(end_seg), ha='center', - va='top') - return ax - - def _get_headwaters(self, per=0): - """ - List all segments that are not outsegs (that do not have any - segments upstream). - - Parameters - ---------- - per : int - Stress period for which to list headwater segments (default 0) - - Returns - ------- - headwaters : np.ndarray (1-D) - One dimensional array listing all headwater segments. - """ - upsegs = [self.segment_data[per].nseg[ - self.segment_data[per].outseg == s].tolist() - for s in self.segment_data[0].nseg] - return self.segment_data[per].nseg[ - np.array([i for i, u in enumerate(upsegs) if len(u) == 0])] - - def _interpolate_to_reaches(self, segvar1, segvar2, per=0): - """ - Interpolate values in datasets 6b and 6c to each reach in - stream segment - - Parameters - ---------- - segvar1 : str - Column/variable name in segment_data array for representing start - of segment (e.g. hcond1 for hydraulic conductivity) - For segments with icalc=2 (specified channel geometry); if width1 - is given, the eighth distance point (XCPT8) from dataset 6d will - be used as the stream width. - For icalc=3, an arbitrary width of 5 is assigned. - For icalc=4, the mean value for width given in item 6e is used. - segvar2 : str - Column/variable name in segment_data array for representing start - of segment (e.g. hcond2 for hydraulic conductivity) - per : int - Stress period with segment data to interpolate - - Returns - ------- - reach_values : 1D array - One dimensional array of interpolated values of same length as - reach_data array. For example, hcond1 and hcond2 could be entered - as inputs to get values for the strhc1 (hydraulic conductivity) - column in reach_data. - - """ - reach_data = self.reach_data - segment_data = self.segment_data[per] - segment_data.sort(order='nseg') - reach_data.sort(order=['iseg', 'ireach']) - reach_values = [] - for seg in segment_data.nseg: - reaches = reach_data[reach_data.iseg == seg] - dist = np.cumsum(reaches.rchlen) - 0.5 * reaches.rchlen - icalc = segment_data.icalc[segment_data.nseg == seg] - # get width from channel cross section length - if 'width' in segvar1 and icalc == 2: - channel_geometry_data = self.channel_geometry_data[per] - reach_values += list( - np.ones(len(reaches)) * channel_geometry_data[seg][0][-1]) - # assign arbitrary width since width is based on flow - elif 'width' in segvar1 and icalc == 3: - reach_values += list(np.ones(len(reaches)) * 5) - # assume width to be mean from streamflow width/flow table - elif 'width' in segvar1 and icalc == 4: - channel_flow_data = self.channel_flow_data[per] - reach_values += list( - np.ones(len(reaches)) * np.mean(channel_flow_data[seg][2])) - else: - fp = [segment_data[segment_data['nseg'] == seg][segvar1][0], - segment_data[segment_data['nseg'] == seg][segvar2][0]] - xp = [dist[0], dist[-1]] - reach_values += np.interp(dist, xp, fp).tolist() - return np.array(reach_values) - - def _write_1c(self, f_sfr): - - # NSTRM NSS NSFRPAR NPARSEG CONST DLEAK ipakcb ISTCB2 - # [ISFROPT] [NSTRAIL] [ISUZN] [NSFRSETS] [IRTFLG] [NUMTIM] [WEIGHT] [FLWTOL] - f_sfr.write('{:.0f} {:.0f} {:.0f} {:.0f} {:.8f} {:.8f} {:.0f} {:.0f} ' - .format(self.nstrm, self.nss, self.nsfrpar, self.nparseg, - self.const, self.dleak, self.ipakcb, self.istcb2)) - if self.reachinput: - self.nstrm = abs( - self.nstrm) # see explanation for dataset 1c in online guide - f_sfr.write('{:.0f} '.format(self.isfropt)) - if self.isfropt > 1: - f_sfr.write('{:.0f} {:.0f} {:.0f} '.format(self.nstrail, - self.isuzn, - self.nsfrsets)) - if self.nstrm < 0: - f_sfr.write('{:.0f} '.format(self.isfropt)) - if self.isfropt > 1: - f_sfr.write('{:.0f} {:.0f} {:.0f} '.format(self.nstrail, - self.isuzn, - self.nsfrsets)) - if self.nstrm < 0 or self.transroute: - f_sfr.write('{:.0f} '.format(self.irtflg)) - if self.irtflg > 0: - f_sfr.write('{:.0f} {:.8f} {:.8f} '.format(self.numtim, - self.weight, - self.flwtol)) - f_sfr.write('\n') - - def _write_reach_data(self, f_sfr): - - # Write the recarray (data) to the file (or file handle) f - assert isinstance(self.reach_data, - np.recarray), "MfList.__tofile() data arg " + \ - "not a recarray" - - # decide which columns to write - # columns = self._get_item2_names() - columns = _get_item2_names(self.nstrm, self.reachinput, self.isfropt, - structured=self.parent.structured) - - # Add one to the kij indices - # names = self.reach_data.dtype.names - # lnames = [] - # [lnames.append(name.lower()) for name in names] - # --make copy of data for multiple calls - d = np.array(self.reach_data) - for idx in ['k', 'i', 'j', 'node']: - if (idx in columns): - d[idx] += 1 - d = d[columns] # data columns sorted - formats = _fmt_string(d) + '\n' - for rec in d: - f_sfr.write(formats.format(*rec)) - - def _write_segment_data(self, i, j, f_sfr): - cols = ['nseg', 'icalc', 'outseg', 'iupseg', 'iprior', 'nstrpts', - 'flow', 'runoff', - 'etsw', 'pptsw', 'roughch', 'roughbk', 'cdpth', 'fdpth', - 'awdth', 'bwdth'] - seg_dat = np.array(self.segment_data[i])[cols][j] - fmts = _fmt_string_list(seg_dat) - - nseg, icalc, outseg, iupseg, iprior, nstrpts, flow, runoff, etsw, \ - pptsw, roughch, roughbk, cdpth, fdpth, awdth, bwdth = \ - [0 if v == self.default_value else v for v in seg_dat] - - f_sfr.write( - ' '.join(fmts[0:4]).format(nseg, icalc, outseg, iupseg) + ' ') - - if iupseg > 0: - f_sfr.write(fmts[4].format(iprior) + ' ') - if icalc == 4: - f_sfr.write(fmts[5].format(nstrpts) + ' ') - - f_sfr.write( - ' '.join(fmts[6:10]).format(flow, runoff, etsw, pptsw) + ' ') - - if icalc in [1, 2]: - f_sfr.write(fmts[10].format(roughch) + ' ') - if icalc == 2: - f_sfr.write(fmts[11].format(roughbk) + ' ') - - if icalc == 3: - f_sfr.write( - ' '.join(fmts[12:16]).format(cdpth, fdpth, awdth, bwdth) + ' ') - f_sfr.write('\n') - - self._write_6bc(i, j, f_sfr, - cols=['hcond1', 'thickm1', 'elevup', 'width1', - 'depth1', 'thts1', 'thti1', - 'eps1', 'uhc1']) - self._write_6bc(i, j, f_sfr, - cols=['hcond2', 'thickm2', 'elevdn', 'width2', - 'depth2', 'thts2', 'thti2', - 'eps2', 'uhc2']) - - def _write_6bc(self, i, j, f_sfr, cols=()): - cols = list(cols) - icalc = self.segment_data[i][j][1] - seg_dat = np.array(self.segment_data[i])[cols][j] - fmts = _fmt_string_list(seg_dat) - hcond, thickm, elevupdn, width, depth, thts, thti, eps, uhc = \ - [0 if v == self.default_value else v for v in seg_dat] - - if self.isfropt in [0, 4, 5] and icalc <= 0: - f_sfr.write( - ' '.join(fmts[0:5]).format(hcond, thickm, elevupdn, width, - depth) + ' ') - - elif self.isfropt in [0, 4, 5] and icalc == 1: - f_sfr.write(fmts[0].format(hcond) + ' ') - - if i == 0: - f_sfr.write( - ' '.join(fmts[1:4]).format(thickm, elevupdn, width) + ' ') - if self.isfropt in [4, 5]: - f_sfr.write( - ' '.join(fmts[5:8]).format(thts, thti, eps) + ' ') - - if self.isfropt == 5: - f_sfr.write(fmts[8].format(uhc) + ' ') - - elif i > 0 and self.isfropt == 0: - f_sfr.write( - ' '.join(fmts[1:4]).format(thickm, elevupdn, width) + ' ') - - elif self.isfropt in [0, 4, 5] and icalc >= 2: - f_sfr.write(fmts[0].format(hcond) + ' ') - - if self.isfropt in [4, 5] and i > 0 and icalc == 2: - pass - else: - f_sfr.write(' '.join(fmts[1:3]).format(thickm, elevupdn) + ' ') - - if self.isfropt in [4, 5] and icalc == 2 and i == 0: - f_sfr.write( - ' '.join(fmts[3:6]).format(thts, thti, eps) + ' ') - - if self.isfropt == 5: - f_sfr.write(fmts[8].format(uhc) + ' ') - else: - pass - elif self.isfropt == 1 and icalc <= 1: - f_sfr.write(fmts[3].format(width) + ' ') - if icalc <= 0: - f_sfr.write(fmts[4].format(depth) + ' ') - elif self.isfropt in [2, 3]: - if icalc <= 0: - f_sfr.write(fmts[3].format(width) + ' ') - f_sfr.write(fmts[4].format(depth) + ' ') - elif icalc == 1: - if i > 0: - pass - else: - f_sfr.write(fmts[3].format(width) + ' ') - else: - pass - - else: - return - f_sfr.write('\n') - - def write_file(self, filename=None): - """ - Write the package file. - - Returns - ------- - None - - """ - - # tabfiles = False - # tabfiles_dict = {} - # transroute = False - # reachinput = False - if filename is not None: - self.fn_path = filename - - f_sfr = open(self.fn_path, 'w') - - # Item 0 -- header - f_sfr.write('{0}\n'.format(self.heading)) - - # Item 1 - if isinstance(self.options, - OptionBlock) and self.parent.version == "mfnwt": - self.options.update_from_package(self) - self.options.write_options(f_sfr) - elif isinstance(self.options, OptionBlock): - self.options.update_from_package(self) - self.options.block = False - self.options.write_options(f_sfr) - else: - pass - - self._write_1c(f_sfr) - - # item 2 - self._write_reach_data(f_sfr) - - # items 3 and 4 are skipped (parameters not supported) - - for i in range(0, self.nper): - - # item 5 - itmp = self.dataset_5[i][0] - f_sfr.write(' '.join(map(str, self.dataset_5[i])) + '\n') - if itmp > 0: - - # Item 6 - for j in range(itmp): - - # write datasets 6a, 6b and 6c - self._write_segment_data(i, j, f_sfr) - - icalc = self.segment_data[i].icalc[j] - nseg = self.segment_data[i].nseg[j] - if icalc == 2: - # or isfropt <= 1: - if i == 0 or self.nstrm > 0 and \ - not self.reachinput or self.isfropt <=1: - for k in range(2): - for d in self.channel_geometry_data[i][nseg][ - k]: - f_sfr.write('{:.2f} '.format(d)) - f_sfr.write('\n') - - if icalc == 4: - # nstrpts = self.segment_data[i][j][5] - for k in range(3): - for d in self.channel_flow_data[i][nseg][k]: - f_sfr.write('{:.2f} '.format(d)) - f_sfr.write('\n') - if self.tabfiles and i == 0: - for j in sorted(self.tabfiles_dict.keys()): - f_sfr.write('{:.0f} {:.0f} {:.0f}\n'.format(j, - self.tabfiles_dict[ - j][ - 'numval'], - self.tabfiles_dict[ - j][ - 'inuit'])) - else: - continue - f_sfr.close() - - def export(self, f, **kwargs): - if isinstance(f, str) and f.lower().endswith(".shp"): - from flopy.utils.geometry import Polygon - from flopy.export.shapefile_utils import recarray2shp - geoms = [] - for ix, i in enumerate(self.reach_data.i): - verts = self.parent.modelgrid.get_cell_vertices( - i, self.reach_data.j[ix]) - geoms.append(Polygon(verts)) - recarray2shp(self.reach_data, geoms, shpname=f, **kwargs) - else: - from flopy import export - return export.utils.package_export(f, self, **kwargs) - - def export_linkages(self, f, **kwargs): - """ - Export linework shapefile showing all routing connections between - SFR reaches. A length field containing the distance between connected - reaches can be used to filter for the longest connections in a GIS. - - """ - from flopy.utils.geometry import LineString - from flopy.export.shapefile_utils import recarray2shp - rd = self.reach_data.copy() - m = self.parent - rd.sort(order=['reachID']) - - # get the cell centers for each reach - mg = m.modelgrid - x0 = mg.xcellcenters[rd.i, rd.j] - y0 = mg.ycellcenters[rd.i, rd.j] - loc = dict(zip(rd.reachID, zip(x0, y0))) - - # make lines of the reach connections between cell centers - geoms = [] - lengths = [] - for r in rd.reachID: - x0, y0 = loc[r] - outreach = rd.outreach[r - 1] - if outreach == 0: - x1, y1 = x0, y0 - else: - x1, y1 = loc[outreach] - geoms.append(LineString([(x0, y0), (x1, y1)])) - lengths.append(np.sqrt((x1 - x0) ** 2 + (y1 - y0) ** 2)) - lengths = np.array(lengths) - - # append connection lengths for filtering in GIS - rd = recfunctions.append_fields(rd, - names=['length'], - data=[lengths], - usemask=False, - asrecarray=True) - recarray2shp(rd, geoms, f, **kwargs) - - def export_outlets(self, f, **kwargs): - """ - Export point shapefile showing locations where streamflow is leaving - the model (outset=0). - - """ - from flopy.utils.geometry import Point - from flopy.export.shapefile_utils import recarray2shp - rd = self.reach_data - if np.min(rd.outreach) == np.max(rd.outreach): - self.set_outreaches() - rd = self.reach_data[self.reach_data.outreach == 0].copy() - m = self.parent - rd.sort(order=['iseg', 'ireach']) - - # get the cell centers for each reach - mg = m.modelgrid - x0 = mg.xcellcenters[rd.i, rd.j] - y0 = mg.ycellcenters[rd.i, rd.j] - geoms = [Point(x, y) for x, y in zip(x0, y0)] - recarray2shp(rd, geoms, f, **kwargs) - - def export_transient_variable(self, f, varname, **kwargs): - """ - Export point shapefile showing locations with a given segment_data - variable applied. For example, segments where streamflow is entering - or leaving the upstream end of a stream segment (FLOW) or where RUNOFF - is applied. Cell centroids of the first reach of segments with non-zero - terms of varname are exported; values of varname are exported by stress - period in the attribute fields (e.g. flow0, flow1, flow2... for FLOW - in stress periods 0, 1, 2... - - Parameters - ---------- - f : str, filename - varname : str - Variable in SFR Package dataset 6a (see SFR package documentation) - - """ - from flopy.utils.geometry import Point - from flopy.export.shapefile_utils import recarray2shp - - rd = self.reach_data - if np.min(rd.outreach) == np.max(rd.outreach): - self.set_outreaches() - ra = self.get_variable_by_stress_period(varname.lower()) - - # get the cell centers for each reach - m = self.parent - mg = m.modelgrid - x0 = mg.xcellcenters[ra.i, ra.j] - y0 = mg.ycellcenters[ra.i, ra.j] - geoms = [Point(x, y) for x, y in zip(x0, y0)] - recarray2shp(ra, geoms, f, **kwargs) - - @staticmethod - def ftype(): - return 'SFR' - - @staticmethod - def defaultunit(): - return 17 - - -class check: - """ - Check SFR2 package for common errors - - Parameters - ---------- - sfrpackage : object - Instance of Flopy ModflowSfr2 class. - verbose : bool - Boolean flag used to determine if check method results are - written to the screen - level : int - Check method analysis level. If level=0, summary checks are - performed. If level=1, full checks are performed. - - Notes - ----- - - Daniel Feinstein's top 10 SFR problems (7/16/2014): - 1) cell gaps btw adjacent reaches in a single segment - 2) cell gaps btw routed segments. possibly because of re-entry problems at domain edge - 3) adjacent reaches with STOP sloping the wrong way - 4) routed segments with end/start sloping the wrong way - 5) STOP>TOP1 violations, i.e.,floaters - 6) STOP< col2, subsets array to only include rows where - col1 is greater. Creates another column with differences - (col1-col2), and prints the array sorted by the differences - column (diff). - - Parameters - ---------- - array : record array - Array with columns to compare. - col1 : string - Column name in array. - col2 : string - Column name in array. - sort_ascending : T/F; default True - If True, printed array will be sorted by differences in - ascending order. - print_delimiter : str - Delimiter for printed array. - - Returns - ------- - txt : str - Error messages and printed array (if .level attribute of - checker is set to 1). Returns an empty string if no - values in col1 are greater than col2. - - Notes - ----- - info about appending to record arrays (views vs. copies and upcoming - changes to numpy): - http://stackoverflow.com/questions/22865877/how-do-i-write-to-multiple-fields-of-a-structured-array - """ - txt = '' - array = array.view(np.recarray).copy() - if isinstance(col1, np.ndarray): - array = recfunctions.append_fields(array, names='tmp1', data=col1, - asrecarray=True) - col1 = 'tmp1' - if isinstance(col2, np.ndarray): - array = recfunctions.append_fields(array, names='tmp2', data=col2, - asrecarray=True) - col2 = 'tmp2' - if isinstance(col1, tuple): - array = recfunctions.append_fields(array, names=col1[0], - data=col1[1], - asrecarray=True) - col1 = col1[0] - if isinstance(col2, tuple): - array = recfunctions.append_fields(array, names=col2[0], - data=col2[1], - asrecarray=True) - col2 = col2[0] - - failed = array[col1] > array[col2] - if np.any(failed): - failed_info = np.array(array)[failed] - txt += level0txt.format(len(failed_info)) + '\n' - if self.level == 1: - diff = failed_info[col2] - failed_info[col1] - cols = [c for c in failed_info.dtype.names if - failed_info[c].sum() != 0 - and c != 'diff' - and 'tmp' not in c] - failed_info = recfunctions.append_fields( - failed_info[cols].copy(), names='diff', data=diff, - usemask=False, asrecarray=False) - failed_info.sort(order='diff', axis=0) - if not sort_ascending: - failed_info = failed_info[::-1] - txt += level1txt + '\n' - txt += _print_rec_array(failed_info, delimiter=print_delimiter) - txt += '\n' - return txt - - def _txt_footer(self, headertxt, txt, testname, passed=False, - warning=True): - if len(txt) == 0 or passed: - txt += 'passed.' - self.passed.append(testname) - elif warning: - self.warnings.append(testname) - else: - self.errors.append(testname) - if self.verbose: - print(txt + '\n') - self.txt += headertxt + txt + '\n' - - def for_nans(self): - """ - Check for nans in reach or segment data - - """ - headertxt = 'Checking for nan values...\n' - txt = '' - passed = False - isnan = np.any(np.isnan(np.array(self.reach_data.tolist())), axis=1) - nanreaches = self.reach_data[isnan] - if np.any(isnan): - txt += 'Found {} reachs with nans:\n'.format(len(nanreaches)) - if self.level == 1: - txt += _print_rec_array(nanreaches, delimiter=' ') - for per, sd in self.segment_data.items(): - isnan = np.any(np.isnan(np.array(sd.tolist())), axis=1) - nansd = sd[isnan] - if np.any(isnan): - txt += 'Per {}: found {} segments with nans:\n'.format(per, - len( - nanreaches)) - if self.level == 1: - txt += _print_rec_array(nansd, delimiter=' ') - if len(txt) == 0: - passed = True - self._txt_footer(headertxt, txt, 'nan values', passed) - - def run_all(self): - return self.sfr.check() - - def numbering(self): - """ - Checks for continuity in segment and reach numbering - """ - - headertxt = 'Checking for continuity in segment and reach numbering...\n' - if self.verbose: - print(headertxt.strip()) - txt = '' - passed = False - - sd = self.segment_data[0] - # check segment numbering - txt += _check_numbers(self.sfr.nss, - sd['nseg'], - level=self.level, - datatype='segment') - - # check reach numbering - for segment in np.arange(1, self.sfr.nss + 1): - reaches = self.reach_data.ireach[self.reach_data.iseg == segment] - t = _check_numbers(len(reaches), - reaches, - level=self.level, - datatype='reach') - if len(t) > 0: - txt += 'Segment {} has {}'.format(segment, t) - if txt == '': - passed = True - self._txt_footer(headertxt, txt, - 'continuity in segment and reach numbering', passed, - warning=False) - - headertxt = 'Checking for increasing segment numbers in downstream direction...\n' - txt = '' - passed = False - if self.verbose: - print(headertxt.strip()) - # for per, segment_data in self.segment_data.items(): - - inds = (sd.outseg < sd.nseg) & (sd.outseg != 0) - - if len(txt) == 0 and np.any(inds): - decreases = np.array(sd[inds])[['nseg', 'outseg']] - txt += 'Found {} segment numbers decreasing in the downstream direction.\n'.format( - len(decreases)) - txt += 'MODFLOW will run but convergence may be slowed:\n' - if self.level == 1: - txt += 'nseg outseg\n' - t = '' - for nseg, outseg in decreases: - t += '{} {}\n'.format(nseg, outseg) - txt += t # '\n'.join(textwrap.wrap(t, width=10)) - if len(t) == 0: - passed = True - self._txt_footer(headertxt, txt, 'segment numbering order', passed) - - def routing(self): - """ - Checks for breaks in routing and does comprehensive check for - circular routing - - """ - headertxt = 'Checking for circular routing...\n' - txt = '' - if self.verbose: - print(headertxt.strip()) - - # txt += self.sfr.get_outlets(level=self.level, verbose=False) # will print twice if verbose=True - # simpler check method using paths from routing graph - circular_segs = [k for k, v in self.sfr.paths.items() if v is None] - if len(circular_segs) > 0: - txt += '{0} instances where an outlet was not found after {1} consecutive segments!\n' \ - .format(len(circular_segs), self.sfr.nss) - if self.level == 1: - txt += ' '.join(map(str, circular_segs)) + '\n' - else: - f = os.path.join(self.sfr.parent._model_ws, - 'circular_routing.chk.csv') - np.savetxt(f, circular_segs, fmt='%d', delimiter=',', - header=txt) - txt += 'See {} for details.'.format(f) - if self.verbose: - print(txt) - self._txt_footer(headertxt, txt, 'circular routing', warning=False) - - # check reach connections for proximity - if self.mg is not None or self.mg is not None: - rd = self.sfr.reach_data.copy() - rd.sort(order=['reachID']) - try: - xcentergrid, ycentergrid, zc = self.mg.get_cellcenters() - del zc - except AttributeError: - xcentergrid = self.mg.xcellcenters - ycentergrid = self.mg.ycellcenters - - x0 = xcentergrid[rd.i, rd.j] - y0 = ycentergrid[rd.i, rd.j] - loc = dict(zip(rd.reachID, zip(x0, y0))) - - # compute distances between node centers of connected reaches - headertxt = 'Checking reach connections for proximity...\n' - txt = '' - if self.verbose: - print(headertxt.strip()) - dist = [] - for r in rd.reachID: - x0, y0 = loc[r] - outreach = rd.outreach[r - 1] - if outreach == 0: - dist.append(0) - else: - x1, y1 = loc[outreach] - dist.append(np.sqrt((x1 - x0) ** 2 + (y1 - y0) ** 2)) - dist = np.array(dist) - - # compute max width of reach nodes (hypotenuse for rectangular nodes) - delr = self.mg.delr - delc = self.mg.delc - - dx = delr[rd.j] # (delr * self.sr.length_multiplier)[rd.j] - dy = delc[rd.i] # (delc * self.sr.length_multiplier)[rd.i] - hyp = np.sqrt(dx ** 2 + dy ** 2) - - # breaks are when the connection distance is greater than - # max node with * a tolerance - # 1.25 * hyp is greater than distance of two diagonally adjacent nodes - # where one is 1.5x larger than the other - breaks = np.where(dist > hyp * 1.25) - breaks_reach_data = rd[breaks] - segments_with_breaks = set(breaks_reach_data.iseg) - if len(breaks) > 0: - txt += '{0} segments '.format(len(segments_with_breaks)) + \ - 'with non-adjacent reaches found.\n' - if self.level == 1: - txt += 'At segments:\n' - txt += ' '.join(map(str, segments_with_breaks)) + '\n' - else: - f = os.path.join(self.sfr.parent._model_ws, - 'reach_connection_gaps.chk.csv') - rd.tofile(f, sep='\t') - txt += 'See {} for details.'.format(f) - if self.verbose: - print(txt) - self._txt_footer(headertxt, txt, 'reach connections', - warning=False) - else: - txt += 'No DIS package or SpatialReference object; cannot ' + \ - 'check reach proximities.' - self._txt_footer(headertxt, txt, '') - - def overlapping_conductance(self, tol=1e-6): - """ - Checks for multiple SFR reaches in one cell; and whether more than - one reach has Cond > 0 - - """ - headertxt = 'Checking for model cells with multiple non-zero ' + \ - 'SFR conductances...\n' - txt = '' - if self.verbose: - print(headertxt.strip()) - - # make nreach vectors of each conductance parameter - reach_data = np.array(self.reach_data) - # if no dis file was supplied, can't compute node numbers - # make nodes based on unique row, col pairs - # if np.diff(reach_data.node).max() == 0: - # always use unique rc, since flopy assigns nodes by k, i, j - uniquerc = {} - for i, (r, c) in enumerate(reach_data[['i', 'j']]): - if (r, c) not in uniquerc: - uniquerc[(r, c)] = i + 1 - reach_data['node'] = [uniquerc[(r, c)] for r, c in - reach_data[['i', 'j']]] - - K = reach_data['strhc1'] - if K.max() == 0: - K = self.sfr._interpolate_to_reaches('hcond1', 'hcond2') - b = reach_data['strthick'] - if b.max() == 0: - b = self.sfr._interpolate_to_reaches('thickm1', 'thickm2') - L = reach_data['rchlen'] - w = self.sfr._interpolate_to_reaches('width1', 'width2') - - # Calculate SFR conductance for each reach - binv = np.zeros(b.shape, dtype=b.dtype) - idx = b > 0. - binv[idx] = 1. / b[idx] - Cond = K * w * L * binv - - shared_cells = _get_duplicates(reach_data['node']) - - nodes_with_multiple_conductance = set() - for node in shared_cells: - - # select the collocated reaches for this cell - conductances = Cond[reach_data['node'] == node].copy() - conductances.sort() - - # list nodes with multiple non-zero SFR reach conductances - if (conductances[-1] != 0.0 and - (conductances[0] / conductances[-1] > tol)): - nodes_with_multiple_conductance.update({node}) - - if len(nodes_with_multiple_conductance) > 0: - txt += '{} model cells with multiple non-zero SFR conductances found.\n' \ - 'This may lead to circular routing between collocated reaches.\n' \ - .format(len(nodes_with_multiple_conductance)) - if self.level == 1: - txt += 'Nodes with overlapping conductances:\n' - - reach_data['strthick'] = b - reach_data['strhc1'] = K - - cols = [c for c in reach_data.dtype.names if c in \ - ['k', 'i', 'j', 'iseg', 'ireach', 'rchlen', 'strthick', - 'strhc1', 'width', 'conductance']] - - reach_data = recfunctions.append_fields( - reach_data, - names=['width', 'conductance'], data=[w, Cond], - usemask=False, asrecarray=False) - has_multiple = np.array( - [True if n in nodes_with_multiple_conductance - else False for n in reach_data['node']]) - reach_data = reach_data[has_multiple] - reach_data = reach_data[cols] - txt += _print_rec_array(reach_data, delimiter='\t') - - self._txt_footer(headertxt, txt, 'overlapping conductance') - - def elevations(self, min_strtop=-10, max_strtop=15000): - """ - Checks streambed elevations for downstream rises and inconsistencies - with model grid - - """ - headertxt = 'Checking for streambed tops of less ' + \ - 'than {}...\n'.format(min_strtop) - txt = '' - if self.verbose: - print(headertxt.strip()) - - passed = False - if self.sfr.isfropt in [1, 2, 3]: - if np.diff(self.reach_data.strtop).max() == 0: - txt += 'isfropt setting of 1,2 or 3 requires strtop information!\n' - else: - is_less = self.reach_data.strtop < min_strtop - if np.any(is_less): - below_minimum = self.reach_data[is_less] - txt += '{} instances of streambed top below minimum found.\n'.format( - len(below_minimum)) - if self.level == 1: - txt += 'Reaches with low strtop:\n' - txt += _print_rec_array(below_minimum, delimiter='\t') - if len(txt) == 0: - passed = True - else: - txt += 'strtop not specified for isfropt={}\n'.format( - self.sfr.isfropt) - passed = True - self._txt_footer(headertxt, txt, 'minimum streambed top', passed) - - headertxt = 'Checking for streambed tops of ' + \ - 'greater than {}...\n'.format(max_strtop) - txt = '' - if self.verbose: - print(headertxt.strip()) - - passed = False - if self.sfr.isfropt in [1, 2, 3]: - if np.diff(self.reach_data.strtop).max() == 0: - txt += 'isfropt setting of 1,2 or 3 ' + \ - 'requires strtop information!\n' - else: - is_greater = self.reach_data.strtop > max_strtop - if np.any(is_greater): - above_max = self.reach_data[is_greater] - txt += '{} instances '.format(len(above_max)) + \ - 'of streambed top above the maximum found.\n' - if self.level == 1: - txt += 'Reaches with high strtop:\n' - txt += _print_rec_array(above_max, delimiter='\t') - if len(txt) == 0: - passed = True - else: - txt += 'strtop not specified for isfropt={}\n'.format( - self.sfr.isfropt) - passed = True - self._txt_footer(headertxt, txt, 'maximum streambed top', passed) - - headertxt = 'Checking segment_data for ' + \ - 'downstream rises in streambed elevation...\n' - txt = '' - if self.verbose: - print(headertxt.strip()) - - # decide whether to check elevup and elevdn from items 6b/c - # (see online guide to SFR input; Data Set 6b description) - passed = False - if self.sfr.isfropt in [0, 4, 5]: - pers = sorted(self.segment_data.keys()) - for per in pers: - segment_data = self.segment_data[per][ - self.segment_data[per].elevup > -999999] - - # enforce consecutive increasing segment numbers (for indexing) - segment_data.sort(order='nseg') - t = _check_numbers(len(segment_data), segment_data.nseg, - level=1, datatype='Segment') - if len(t) > 0: - txt += 'Elevation check requires ' + \ - 'consecutive segment numbering.' - self._txt_footer(headertxt, txt, '') - return - - # first check for segments where elevdn > elevup - d_elev = segment_data.elevdn - segment_data.elevup - segment_data = recfunctions.append_fields(segment_data, - names='d_elev', - data=d_elev, - asrecarray=True) - txt += self._boolean_compare( - np.array(segment_data)[['nseg', 'outseg', 'elevup', - 'elevdn', 'd_elev']], - col1='d_elev', col2=np.zeros(len(segment_data)), - level0txt='Stress Period {}: '.format(per + 1) + \ - '{} segments encountered with elevdn > elevup.', - level1txt='Backwards segments:', - ) - - # next check for rises between segments - non_outlets = segment_data.outseg > 0 - non_outlets_seg_data = segment_data[ - non_outlets] # lake outsegs are < 0 - outseg_elevup = np.array( - [segment_data.elevup[o - 1] for o in segment_data.outseg if - o > 0]) - d_elev2 = outseg_elevup - segment_data.elevdn[non_outlets] - non_outlets_seg_data = recfunctions.append_fields( - non_outlets_seg_data, - names=['outseg_elevup', 'd_elev2'], - data=[outseg_elevup, d_elev2], - usemask=False, asrecarray=False) - - txt += self._boolean_compare( - non_outlets_seg_data[['nseg', 'outseg', 'elevdn', - 'outseg_elevup', 'd_elev2']], - col1='d_elev2', col2=np.zeros(len(non_outlets_seg_data)), - level0txt='Stress Period {}: '.format(per + 1) + \ - '{} segments encountered with segments encountered ' \ - 'with outseg elevup > elevdn.', - level1txt='Backwards segment connections:', - ) - - if len(txt) == 0: - passed = True - else: - txt += 'Segment elevup and elevdn not ' + \ - 'specified for nstrm=' + \ - '{} and isfropt={}\n'.format(self.sfr.nstrm, - self.sfr.isfropt) - passed = True - self._txt_footer(headertxt, txt, 'segment elevations', passed) - - headertxt = 'Checking reach_data for ' + \ - 'downstream rises in streambed elevation...\n' - txt = '' - if self.verbose: - print(headertxt.strip()) - passed = False - if self.sfr.nstrm < 0 or self.sfr.reachinput and self.sfr.isfropt in [ - 1, 2, 3]: # see SFR input instructions - - # compute outreaches if they aren't there already - if np.diff(self.sfr.reach_data.outreach).max() == 0: - self.sfr.set_outreaches() - - # compute changes in elevation - rd = self.reach_data.copy() - elev = dict(zip(rd.reachID, rd.strtop)) - dnelev = {rid: elev[rd.outreach[i]] if rd.outreach[i] != 0 - else -9999 for i, rid in enumerate(rd.reachID)} - strtopdn = np.array([dnelev[r] for r in rd.reachID]) - diffs = np.array([(dnelev[i] - elev[i]) if dnelev[i] != -9999 - else -.001 for i in rd.reachID]) - - reach_data = self.sfr.reach_data # inconsistent with other checks that work with - # reach_data attribute of check class. Want to have get_outreaches as a method of sfr class - # (for other uses). Not sure if other check methods should also copy reach_data directly from - # SFR package instance for consistency. - - # use outreach values to get downstream elevations - # non_outlets = reach_data[reach_data.outreach != 0] - # outreach_elevdn = np.array([reach_data.strtop[o - 1] for o in reach_data.outreach]) - # d_strtop = outreach_elevdn[reach_data.outreach != 0] - non_outlets.strtop - rd = recfunctions.append_fields( - rd, names=['strtopdn', 'd_strtop'], data=[strtopdn, diffs], - usemask=False, asrecarray=False) - - txt += self._boolean_compare( - rd[['k', 'i', 'j', 'iseg', 'ireach', 'strtop', 'strtopdn', - 'd_strtop', 'reachID']], - col1='d_strtop', col2=np.zeros(len(rd)), - level0txt='{} reaches encountered with strtop < strtop of downstream reach.', - level1txt='Elevation rises:', - ) - if len(txt) == 0: - passed = True - else: - txt += 'Reach strtop not specified for nstrm={}, reachinput={} and isfropt={}\n' \ - .format(self.sfr.nstrm, self.sfr.reachinput, self.sfr.isfropt) - passed = True - self._txt_footer(headertxt, txt, 'reach elevations', passed) - - headertxt = 'Checking reach_data for inconsistencies between streambed elevations and the model grid...\n' - if self.verbose: - print(headertxt.strip()) - txt = '' - if self.sfr.parent.dis is None: - txt += 'No DIS file supplied; cannot check SFR elevations against model grid.' - self._txt_footer(headertxt, txt, '') - return - passed = False - warning = True - if (self.sfr.nstrm < 0 or self.sfr.reachinput and - self.sfr.isfropt in [1, 2, 3]): # see SFR input instructions - reach_data = np.array(self.reach_data) - i, j, k = reach_data['i'], reach_data['j'], reach_data['k'] - - # check streambed bottoms in relation to respective cell bottoms - bots = self.sfr.parent.dis.botm.array[k, i, j] - streambed_bots = reach_data['strtop'] - reach_data['strthick'] - reach_data = recfunctions.append_fields( - reach_data, names=['layerbot', 'strbot'], - data=[bots, streambed_bots], usemask=False, asrecarray=False) - - txt += self._boolean_compare( - reach_data[['k', 'i', 'j', 'iseg', 'ireach', 'strtop', - 'strthick', 'strbot', 'layerbot', 'reachID']], - col1='layerbot', col2='strbot', - level0txt='{} reaches encountered with streambed bottom below layer bottom.', - level1txt='Layer bottom violations:', - ) - if len(txt) > 0: - warning = False # this constitutes an error (MODFLOW won't run) - # check streambed elevations in relation to model top - tops = self.sfr.parent.dis.top.array[i, j] - reach_data = recfunctions.append_fields( - reach_data, names='modeltop', data=tops, - usemask=False, asrecarray=False) - - txt += self._boolean_compare( - reach_data[['k', 'i', 'j', 'iseg', 'ireach', - 'strtop', 'modeltop', 'strhc1', 'reachID']], - col1='strtop', col2='modeltop', - level0txt='{} reaches encountered with streambed above model top.', - level1txt='Model top violations:', - ) - - if len(txt) == 0: - passed = True - else: - txt += 'Reach strtop, strthick not specified for nstrm={}, reachinput={} and isfropt={}\n' \ - .format(self.sfr.nstrm, self.sfr.reachinput, self.sfr.isfropt) - passed = True - self._txt_footer(headertxt, txt, - 'reach elevations vs. grid elevations', passed, - warning=warning) - - # In cases where segment end elevations/thicknesses are used, - # do these need to be checked for consistency with layer bottoms? - - headertxt = 'Checking segment_data for inconsistencies ' + \ - 'between segment end elevations and the model grid...\n' - txt = '' - if self.verbose: - print(headertxt.strip()) - passed = False - if self.sfr.isfropt in [0, 4, 5]: - reach_data = self.reach_data - pers = sorted(self.segment_data.keys()) - for per in pers: - segment_data = self.segment_data[per][ - self.segment_data[per].elevup > -999999] - - # enforce consecutive increasing segment numbers (for indexing) - segment_data.sort(order='nseg') - t = _check_numbers(len(segment_data), segment_data.nseg, - level=1, datatype='Segment') - if len(t) > 0: - raise Exception( - 'Elevation check requires consecutive segment numbering.') - - first_reaches = reach_data[reach_data.ireach == 1].copy() - last_reaches = reach_data[ - np.append((np.diff(reach_data.iseg) == 1), True)].copy() - segment_ends = recfunctions.stack_arrays( - [first_reaches, last_reaches], - asrecarray=True, usemask=False) - segment_ends['strtop'] = np.append(segment_data['elevup'], - segment_data['elevdn']) - i, j = segment_ends.i, segment_ends.j - tops = self.sfr.parent.dis.top.array[i, j] - diff = tops - segment_ends.strtop - segment_ends = recfunctions.append_fields( - segment_ends, - names=['modeltop', 'diff'], data=[tops, diff], - usemask=False, asrecarray=False) - - txt += self._boolean_compare(segment_ends[['k', 'i', 'j', 'iseg', - 'strtop', 'modeltop', - 'diff', - 'reachID']].copy(), - col1=np.zeros(len(segment_ends)), - col2='diff', - level0txt='{} reaches encountered with streambed above model top.', - level1txt='Model top violations:', - ) - - if len(txt) == 0: - passed = True - else: - txt += 'Segment elevup and elevdn not specified for nstrm={} and isfropt={}\n' \ - .format(self.sfr.nstrm, self.sfr.isfropt) - passed = True - self._txt_footer(headertxt, txt, 'segment elevations vs. model grid', - passed) - - def slope(self, minimum_slope=1e-4, maximum_slope=1.0): - """Checks that streambed slopes are greater than or equal to a specified minimum value. - Low slope values can cause "backup" or unrealistic stream stages with icalc options - where stage is computed. - """ - headertxt = 'Checking for streambed slopes of less than {}...\n'.format( - minimum_slope) - txt = '' - if self.verbose: - print(headertxt.strip()) - - passed = False - if self.sfr.isfropt in [1, 2, 3]: - if np.diff(self.reach_data.slope).max() == 0: - txt += 'isfropt setting of 1,2 or 3 requires slope information!\n' - else: - is_less = self.reach_data.slope < minimum_slope - if np.any(is_less): - below_minimum = self.reach_data[is_less] - txt += '{} instances of streambed slopes below minimum found.\n'.format( - len(below_minimum)) - if self.level == 1: - txt += 'Reaches with low slopes:\n' - txt += _print_rec_array(below_minimum, delimiter='\t') - if len(txt) == 0: - passed = True - else: - txt += 'slope not specified for isfropt={}\n'.format( - self.sfr.isfropt) - passed = True - self._txt_footer(headertxt, txt, 'minimum slope', passed) - - headertxt = 'Checking for streambed slopes of greater than {}...\n'.format( - maximum_slope) - txt = '' - if self.verbose: - print(headertxt.strip()) - - passed = False - if self.sfr.isfropt in [1, 2, 3]: - if np.diff(self.reach_data.slope).max() == 0: - txt += 'isfropt setting of 1,2 or 3 requires slope information!\n' - else: - is_greater = self.reach_data.slope > maximum_slope - - if np.any(is_greater): - above_max = self.reach_data[is_greater] - txt += '{} instances of streambed slopes above maximum found.\n'.format( - len(above_max)) - if self.level == 1: - txt += 'Reaches with high slopes:\n' - txt += _print_rec_array(above_max, delimiter='\t') - if len(txt) == 0: - passed = True - else: - txt += 'slope not specified for isfropt={}\n'.format( - self.sfr.isfropt) - passed = True - self._txt_footer(headertxt, txt, 'maximum slope', passed) - - -def _check_numbers(n, numbers, level=1, datatype='reach'): - """ - Check that a sequence of numbers is consecutive - (that the sequence is equal to the range from 1 to n+1, where n is - the expected length of the sequence). - - Parameters - ---------- - n : int - Expected length of the sequence (i.e. number of stream segments) - numbers : array - Sequence of numbers (i.e. 'nseg' column from the segment_data array) - level : int - Check method analysis level. If level=0, summary checks are - performed. If level=1, full checks are performed. - datatype : str, optional - Only used for reporting. - """ - txt = '' - num_range = np.arange(1, n + 1) - if not np.array_equal(num_range, numbers): - txt += 'Invalid {} numbering\n'.format(datatype) - if level == 1: - # consistent dimension for boolean array - non_consecutive = np.append(np.diff(numbers) != 1, - False) - gaps = num_range[non_consecutive] + 1 - if len(gaps) > 0: - gapstr = ' '.join(map(str, gaps)) - txt += 'Gaps in numbering at positions {}\n'.format(gapstr) - return txt - - -def _isnumeric(s): - try: - float(s) - return True - except: - return False - - -def _markitzero(recarray, inds): - """ - Subtracts 1 from columns specified in inds argument, to convert from - 1 to 0-based indexing - - """ - lnames = [n.lower() for n in recarray.dtype.names] - for idx in inds: - if (idx in lnames): - recarray[idx] -= 1 - - -def _pop_item(line): - try: - return float(line.pop(0)) - except: - return 0. - - -def _get_dataset(line, dataset): - # interpret number supplied with decimal points as floats, rest as ints - # this could be a bad idea (vs. explicitly formatting values for each dataset) - for i, s in enumerate(line_parse(line)): - try: - n = int(s) - except: - try: - n = float(s) - except: - break - dataset[i] = n - return dataset - - -def _get_duplicates(a): - """ - Returns duplicate values in an array, similar to pandas .duplicated() - method - http://stackoverflow.com/questions/11528078/determining-duplicate-values-in-an-array - """ - s = np.sort(a, axis=None) - equal_to_previous_item = np.append(s[1:] == s[:-1], - False) # maintain same dimension for boolean array - return np.unique(s[equal_to_previous_item]) - - -def _get_item2_names(nstrm, reachinput, isfropt, structured=False): - """ - Determine which variables should be in item 2, based on model grid type, - reachinput specification, and isfropt. - - Returns - ------- - names : list of str - List of names (same as variables in SFR Package input instructions) of - columns to assign (upon load) or retain (upon write) in reach_data - array. - - Notes - ----- - Lowercase is used for all variable names. - - """ - names = [] - if structured: - names += ['k', 'i', 'j'] - else: - names += ['node'] - names += ['iseg', 'ireach', 'rchlen'] - if nstrm < 0 or reachinput: - if isfropt in [1, 2, 3]: - names += ['strtop', 'slope', 'strthick', 'strhc1'] - if isfropt in [2, 3]: - names += ['thts', 'thti', 'eps'] - if isfropt == 3: - names += ['uhc'] - return names - - -def _fmt_string_list(array, float_format=default_float_format): - fmt_list = [] - for name in array.dtype.names: - vtype = array.dtype[name].str[1].lower() - if vtype == 'v': - continue - if vtype == 'i': - fmt_list.append('{:d}') - elif vtype == 'f': - fmt_list.append(float_format) - elif vtype == 'o': - float_format = '{!s}' - elif vtype == 's': - raise ValueError( - "'str' type found in dtype for {!r}. " - "This gives unpredictable results when " - "recarray to file - change to 'object' type".format(name)) - else: - raise ValueError( - "unknown dtype for {!r}: {!r}".format(name, vtype)) - return fmt_list - - -def _fmt_string(array, float_format=default_float_format): - return ' '.join(_fmt_string_list(array, float_format)) - - -def _print_rec_array(array, cols=None, delimiter=' ', - float_format=default_float_format): - """ - Print out a numpy record array to string, with column names. - - Parameters - ---------- - cols : list of strings - List of columns to print. - delimiter : string - Delimited to use. - - Returns - ------- - txt : string - Text string of array. - - """ - txt = '' - if cols is not None: - cols = [c for c in array.dtype.names if c in cols] - else: - cols = list(array.dtype.names) - # drop columns with no data - if np.shape(array)[0] > 1: - cols = [c for c in cols if array[c].min() > -999999] - # add _fmt_string call here - array = np.array(array)[cols] - fmts = _fmt_string_list(array, float_format=float_format) - txt += delimiter.join(cols) + '\n' - txt += '\n'.join( - [delimiter.join(fmts).format(*r) for r in array.tolist()]) - return txt - - -def _parse_1c(line, reachinput, transroute): - """ - Parse Data Set 1c for SFR2 package. - See http://water.usgs.gov/nrp/gwsoftware/modflow2000/MFDOC/index.html?sfr.htm for more info - - Parameters - ---------- - line : str - line read from SFR package input file - - Returns - ------- - a list of length 13 containing all variables for Data Set 6a - - """ - na = 0 - # line = _get_dataset(line, [0, 0, 0, 0, 0, 0, 0, 0, 0, 10, 1, 30, 1, 2, 0.75, 0.0001, []]) - # line = line.strip().split() - line = line_parse(line) - - nstrm = int(line.pop(0)) - nss = int(line.pop(0)) - nsfrpar = int(line.pop(0)) - nparseg = int(line.pop(0)) - const = float(line.pop(0)) - dleak = float(line.pop(0)) - ipakcb = int(line.pop(0)) - istcb2 = int(line.pop(0)) - - isfropt, nstrail, isuzn, nsfrsets = na, na, na, na - if reachinput: - nstrm = abs(nstrm) # see explanation for dataset 1c in online guide - isfropt = int(line.pop(0)) - if isfropt > 1: - nstrail = int(line.pop(0)) - isuzn = int(line.pop(0)) - nsfrsets = int(line.pop(0)) - if nstrm < 0: - isfropt = int(line.pop(0)) - if isfropt > 1: - nstrail = int(line.pop(0)) - isuzn = int(line.pop(0)) - nsfrsets = int(line.pop(0)) - - irtflg, numtim, weight, flwtol = na, na, na, na - if nstrm < 0 or transroute: - irtflg = int(_pop_item(line)) - if irtflg > 0: - numtim = int(line.pop(0)) - weight = float(line.pop(0)) - flwtol = float(line.pop(0)) - - # auxiliary variables (MODFLOW-LGR) - option = [line[i] for i in np.arange(1, len(line)) if - 'aux' in line[i - 1].lower()] - - return nstrm, nss, nsfrpar, nparseg, const, dleak, ipakcb, istcb2, \ - isfropt, nstrail, isuzn, nsfrsets, irtflg, numtim, weight, flwtol, \ - option - - -def _parse_6a(line, option): - """ - Parse Data Set 6a for SFR2 package. - See http://water.usgs.gov/nrp/gwsoftware/modflow2000/MFDOC/index.html?sfr.htm for more info - - Parameters - ---------- - line : str - line read from SFR package input file - - Returns - ------- - a list of length 13 containing all variables for Data Set 6a - """ - # line = line.strip().split() - line = line_parse(line) - - xyz = [] - # handle any aux variables at end of line - for s in line: - if s.lower() in option: - xyz.append(s.lower()) - - na = 0 - nseg = int(_pop_item(line)) - icalc = int(_pop_item(line)) - outseg = int(_pop_item(line)) - iupseg = int(_pop_item(line)) - iprior = na - nstrpts = na - - if iupseg > 0: - iprior = int(_pop_item(line)) - if icalc == 4: - nstrpts = int(_pop_item(line)) - - flow = _pop_item(line) - runoff = _pop_item(line) - etsw = _pop_item(line) - pptsw = _pop_item(line) - roughch = na - roughbk = na - - if icalc in [1, 2]: - roughch = _pop_item(line) - if icalc == 2: - roughbk = _pop_item(line) - - cdpth, fdpth, awdth, bwdth = na, na, na, na - if icalc == 3: - cdpth, fdpth, awdth, bwdth = map(float, line) - return nseg, icalc, outseg, iupseg, iprior, nstrpts, flow, runoff, etsw, \ - pptsw, roughch, roughbk, cdpth, fdpth, awdth, bwdth, xyz - - -def _parse_6bc(line, icalc, nstrm, isfropt, reachinput, per=0): - """ - Parse Data Set 6b for SFR2 package. - See http://water.usgs.gov/nrp/gwsoftware/modflow2000/MFDOC/index.html?sfr.htm for more info - - Parameters - ---------- - line : str - line read from SFR package input file - - Returns - ------- - a list of length 9 containing all variables for Data Set 6b - - """ - nvalues = sum([_isnumeric(s) for s in line_parse(line)]) - line = _get_dataset(line, [0] * nvalues) - - hcond, thickm, elevupdn, width, depth, thts, thti, eps, uhc = [0.0] * 9 - - if isfropt in [0, 4, 5] and icalc <= 0: - hcond = line.pop(0) - thickm = line.pop(0) - elevupdn = line.pop(0) - width = line.pop(0) - depth = line.pop(0) - elif isfropt in [0, 4, 5] and icalc == 1: - hcond = line.pop(0) - if isfropt in [4, 5] and per > 0: - pass - else: - thickm = line.pop(0) - elevupdn = line.pop(0) - # depth is not read if icalc == 1; see table in online guide - width = line.pop(0) - thts = _pop_item(line) - thti = _pop_item(line) - eps = _pop_item(line) - if isfropt == 5 and per == 0: - uhc = line.pop(0) - elif isfropt in [0, 4, 5] and icalc >= 2: - hcond = line.pop(0) - if isfropt in [4, 5] and per > 0 and icalc == 2: - pass - else: - thickm = line.pop(0) - elevupdn = line.pop(0) - if isfropt in [4, 5] and per == 0: - # table in online guide suggests that the following items should be present in this case - # but in the example - thts = _pop_item(line) - thti = _pop_item(line) - eps = _pop_item(line) - if isfropt == 5: - uhc = _pop_item(line) - else: - pass - elif isfropt == 1 and icalc <= 1: - width = line.pop(0) - if icalc <= 0: - depth = line.pop(0) - elif isfropt in [2, 3]: - if icalc <= 0: - width = line.pop(0) - depth = line.pop(0) - - elif icalc == 1: - if per > 0: - pass - else: - width = line.pop(0) - - else: - pass - else: - pass - return hcond, thickm, elevupdn, width, depth, thts, thti, eps, uhc - - -def find_path(graph, start, end=0, path=()): - - graph = graph.copy() - path = list(path) + [start] - if start == end: - return path - if start not in graph: - return None - if not isinstance(graph[start], list): - graph[start] = [graph[start]] - for node in graph[start]: - if node not in path: - newpath = find_path(graph, node, end, path) - if newpath: return newpath - return None +__author__ = 'aleaf' + +import sys +import os +import numpy as np +import warnings +import copy +from numpy.lib import recfunctions +from ..pakbase import Package +from ..utils import MfList +from ..utils.flopy_io import line_parse +from ..utils.recarray_utils import create_empty_recarray +from ..utils.optionblock import OptionBlock +from collections import OrderedDict + +try: + import pandas as pd +except: + pd = False + +try: + from numpy.lib import NumpyVersion + numpy114 = NumpyVersion(np.__version__) >= '1.14.0' +except ImportError: + numpy114 = False +if numpy114: + # use numpy's floating-point formatter (Dragon4) + default_float_format = '{!s}' +else: + # single-precision floats have ~7.2 decimal digits + default_float_format = '{:.8g}' + + +class ModflowSfr2(Package): + """ + Streamflow-Routing (SFR2) Package Class + + Parameters + ---------- + model : model object + The model object (of type :class:'flopy.modflow.mf.Modflow') to which + this package will be added. + nstrm : integer + An integer value that can be specified to be positive or negative. The + absolute value of NSTRM is equal to the number of stream reaches + (finite-difference cells) that are active during the simulation and + the number of lines of data to be included in Item 2, described below. + When NSTRM is specified to be a negative integer, it is also used as a + flag for changing the format of the data input, for simulating + unsaturated flow beneath streams, and (or) for simulating transient + streamflow routing (for MODFLOW-2005 simulations only), depending + on the values specified for variables ISFROPT and IRTFLG, as described + below. When NSTRM is negative, NSFRPAR must be set to zero, which means + that parameters cannot be specified. By default, nstrm is set to + negative. + nss : integer + An integer value equal to the number of stream segments (consisting of + one or more reaches) that are used to define the complete stream + network. The value of NSS represents the number of segments that must + be defined through a combination of parameters and variables in Item 4 + or variables in Item 6. + nparseg : integer + An integer value equal to (or exceeding) the number of stream-segment + definitions associated with all parameters. This number can be more + than the total number of segments (NSS) in the stream network because + the same segment can be defined in multiple parameters, and because + parameters can be time-varying. NPARSEG must equal or exceed the sum + of NLST x N for all parameters, where N is the greater of 1 and + NUMINST; that is, NPARSEG must equal or exceed the total number of + repetitions of item 4b. This variable must be zero when NSTRM is + negative. + const : float + A real value (or conversion factor) used in calculating stream depth + for stream reach. If stream depth is not calculated using Manning's + equation for any stream segment (that is, ICALC does not equal 1 or 2), + then a value of zero can be entered. If Manning's equation is used, a + constant of 1.486 is used for flow units of cubic feet per second, and + a constant of 1.0 is used for units of cubic meters per second. The + constant must be multiplied by 86,400 when using time units of days in + the simulation. An explanation of time units used in MODFLOW is given + by Harbaugh and others (2000, p. 10). + dleak : float + A real value equal to the tolerance level of stream depth used in + computing leakage between each stream reach and active model cell. + Value is in units of length. Usually a value of 0.0001 is sufficient + when units of feet or meters are used in model. + ipakcb : integer + An integer value used as a flag for writing stream-aquifer leakage + values. If ipakcb > 0, unformatted leakage between each stream reach + and corresponding model cell will be saved to the main cell-by-cell + budget file whenever when a cell-by-cell budget has been specified in + Output Control (see Harbaugh and others, 2000, pages 52-55). If + ipakcb = 0, leakage values will not be printed or saved. Printing to + the listing file (ipakcb < 0) is not supported. + istcb2 : integer + An integer value used as a flag for writing to a separate formatted + file all information on inflows and outflows from each reach; on + stream depth, width, and streambed conductance; and on head difference + and gradient across the streambed. If ISTCB2 > 0, then ISTCB2 also + represents the unit number to which all information for each stream + reach will be saved to a separate file when a cell-by-cell budget has + been specified in Output Control. If ISTCB2 < 0, it is the unit number + to which unformatted streamflow out of each reach will be saved to a + file whenever the cell-by-cell budget has been specified in Output + Control. Unformatted output will be saved to .sfq. + isfropt : integer + An integer value that defines the format of the input data and whether + or not unsaturated flow is simulated beneath streams. Values of ISFROPT + are defined as follows + + 0 No vertical unsaturated flow beneath streams. Streambed elevations, + stream slope, streambed thickness, and streambed hydraulic + conductivity are read for each stress period using variables + defined in Items 6b and 6c; the optional variables in Item 2 are + not used. + 1 No vertical unsaturated flow beneath streams. Streambed elevation, + stream slope, streambed thickness, and streambed hydraulic + conductivity are read for each reach only once at the beginning of + the simulation using optional variables defined in Item 2; Items 6b + and 6c are used to define stream width and depth for ICALC = 0 and + stream width for ICALC = 1. + 2 Streambed and unsaturated-zone properties are read for each reach + only once at the beginning of the simulation using optional + variables defined in Item 2; Items 6b and 6c are used to define + stream width and depth for ICALC = 0 and stream width for + ICALC = 1. When using the LPF Package, saturated vertical + hydraulic conductivity for the unsaturated zone is the same as + the vertical hydraulic conductivity of the corresponding layer in + LPF and input variable UHC is not read. + 3 Same as 2 except saturated vertical hydraulic conductivity for the + unsaturated zone (input variable UHC) is read for each reach. + 4 Streambed and unsaturated-zone properties are read for the + beginning and end of each stream segment using variables defined + in Items 6b and 6c; the optional variables in Item 2 are not used. + Streambed properties can vary each stress period. When using the + LPF Package, saturated vertical hydraulic conductivity for the + unsaturated zone is the same as the vertical hydraulic conductivity + of the corresponding layer in LPF and input variable UHC1 is not + read. + 5 Same as 4 except saturated vertical hydraulic conductivity for the + unsaturated zone (input variable UHC1) is read for each segment at + the beginning of the first stress period only. + + nstrail : integer + An integer value that is the number of trailing wave increments used to + represent a trailing wave. Trailing waves are used to represent a + decrease in the surface infiltration rate. The value can be increased + to improve mass balance in the unsaturated zone. Values between 10 and + 20 work well and result in unsaturated-zone mass balance errors beneath + streams ranging between 0.001 and 0.01 percent. Please see Smith (1983) + for further details. (default is 10; for MODFLOW-2005 simulations only + when isfropt > 1) + isuzn : integer + An integer value that is the maximum number of vertical cells used to + define the unsaturated zone beneath a stream reach. If ICALC is 1 for + all segments then ISUZN should be set to 1. (default is 1; for + MODFLOW-2005 simulations only when isfropt > 1) + nsfrsets : integer + An integer value that is the maximum number of different sets of + trailing waves used to allocate arrays. Arrays are allocated by + multiplying NSTRAIL by NSFRSETS. A value of 30 is sufficient for + problems where the stream depth varies often. NSFRSETS does not affect + model run time. (default is 30; for MODFLOW-2005 simulations only + when isfropt > 1) + irtflg : integer + An integer value that indicates whether transient streamflow routing is + active. IRTFLG must be specified if NSTRM < 0. If IRTFLG > 0, + streamflow will be routed using the kinematic-wave equation (see USGS + Techniques and Methods 6-D1, p. 68-69); otherwise, IRTFLG should be + specified as 0. Transient streamflow routing is only available for + MODFLOW-2005; IRTFLG can be left blank for MODFLOW-2000 simulations. + (default is 1) + numtim : integer + An integer value equal to the number of sub time steps used to route + streamflow. The time step that will be used to route streamflow will + be equal to the MODFLOW time step divided by NUMTIM. (default is 2; + for MODFLOW-2005 simulations only when irtflg > 0) + weight : float + A real number equal to the time weighting factor used to calculate the + change in channel storage. WEIGHT has a value between 0.5 and 1. Please + refer to equation 83 in USGS Techniques and Methods 6-D1 for further + details. (default is 0.75; for MODFLOW-2005 simulations only when + irtflg > 0) + flwtol : float + A real number equal to the streamflow tolerance for convergence of the + kinematic wave equation used for transient streamflow routing. A value + of 0.00003 cubic meters per second has been used successfully in test + simulations (and would need to be converted to whatever units are being + used in the particular simulation). (default is 0.0001; for + MODFLOW-2005 simulations only when irtflg > 0) + reach_data : recarray + Numpy record array of length equal to nstrm, with columns for each + variable entered in item 2 (see SFR package input instructions). In + following flopy convention, layer, row, column and node number + (for unstructured grids) are zero-based; segment and reach are + one-based. + segment_data : recarray + Numpy record array of length equal to nss, with columns for each + variable entered in items 6a, 6b and 6c (see SFR package input + instructions). Segment numbers are one-based. + dataset_5 : dict of lists + Optional; will be built automatically from segment_data unless + specified. Dict of lists, with key for each stress period. Each list + contains the variables [itmp, irdflag, iptflag]. (see SFR documentation + for more details): + itmp : list of integers (len = NPER) + For each stress period, an integer value for reusing or reading stream + segment data that can change each stress period. If ITMP = 0 then all + stream segment data are defined by Item 4 (NSFRPAR > 0; number of + stream parameters is greater than 0). If ITMP > 0, then stream segment + data are not defined in Item 4 and must be defined in Item 6 below for + a number of segments equal to the value of ITMP. If ITMP < 0, then + stream segment data not defined in Item 4 will be reused from the last + stress period (Item 6 is not read for the current stress period). ITMP + must be defined >= 0 for the first stress period of a simulation. + irdflag : int or list of integers (len = NPER) + For each stress period, an integer value for printing input data + specified for this stress period. If IRDFLG = 0, input data for this + stress period will be printed. If IRDFLG > 0, then input data for this + stress period will not be printed. + iptflag : int or list of integers (len = NPER) + For each stress period, an integer value for printing streamflow- + routing results during this stress period. If IPTFLG = 0, or whenever + the variable ICBCFL or "Save Budget" is specified in Output Control, + the results for specified time steps during this stress period will be + printed. If IPTFLG > 0, then the results during this stress period will + not be printed. + extension : string + Filename extension (default is 'sfr') + unit_number : int + File unit number (default is None). + filenames : str or list of str + Filenames to use for the package and the output files. If + filenames=None the package name will be created using the model name + and package extension and the cbc output and sfr output name will be + created using the model name and .cbc the .sfr.bin/.sfr.out extensions + (for example, modflowtest.cbc, and modflowtest.sfr.bin), if ipakcbc and + istcb2 are numbers greater than zero. If a single string is passed the + package name will be set to the string and other uzf output files will + be set to the model name with the appropriate output file extensions. + To define the names for all package files (input and output) the + length of the list of strings should be 3. Default is None. + + Attributes + ---------- + outlets : nested dictionary + Contains the outlet for each SFR segment; format is + {per: {segment: outlet}} This attribute is created by the + get_outlets() method. + outsegs : dictionary of arrays + Each array is of shape nss rows x maximum of nss columns. The first + column contains the SFR segments, the second column contains the + outsegs of those segments; the third column the outsegs of the outsegs, + and so on, until all outlets have been encountered, or nss is reached. + The latter case indicates circular routing. This attribute is created + by the get_outlets() method. + + Methods + ------- + + See Also + -------- + + Notes + ----- + Parameters are not supported in FloPy. + + MODFLOW-OWHM is not supported. + + The Ground-Water Transport (GWT) process is not supported. + + Limitations on which features are supported... + + Examples + -------- + + >>> import flopy + >>> ml = flopy.modflow.Modflow() + >>> sfr2 = flopy.modflow.ModflowSfr2(ml, ...) + + """ + _options = OrderedDict([("reachinput", + OptionBlock.simple_flag), + ("transroute", + OptionBlock.simple_flag), + ("tabfiles", + OptionBlock.simple_tabfile), + ("lossfactor", {OptionBlock.dtype: np.bool_, + OptionBlock.nested: True, + OptionBlock.n_nested: 1, + OptionBlock.vars: + {"factor": + OptionBlock.simple_float}}), + ("strhc1kh", {OptionBlock.dtype: np.bool_, + OptionBlock.nested: True, + OptionBlock.n_nested: 1, + OptionBlock.vars: + {"factorkh": + OptionBlock.simple_float}}), + ("strhc1kv", {OptionBlock.dtype: np.bool_, + OptionBlock.nested: True, + OptionBlock.n_nested: 1, + OptionBlock.vars: + {"factorkv": + OptionBlock.simple_float}})]) + + nsfrpar = 0 + heading = '# Streamflow-Routing (SFR2) file for MODFLOW, generated by Flopy' + default_value = 0. + # LENUNI = {"u": 0, "f": 1, "m": 2, "c": 3} + len_const = {1: 1.486, 2: 1.0, 3: 100.} + # {"u": 0, "s": 1, "m": 2, "h": 3, "d": 4, "y": 5} + time_const = {1: 1., 2: 60., 3: 3600., 4: 86400., 5: 31557600.} + + def __init__(self, model, nstrm=-2, nss=1, nsfrpar=0, nparseg=0, + const=None, dleak=0.0001, ipakcb=None, istcb2=None, + isfropt=0, + nstrail=10, isuzn=1, nsfrsets=30, irtflg=0, numtim=2, + weight=0.75, flwtol=0.0001, + reach_data=None, + segment_data=None, + channel_geometry_data=None, + channel_flow_data=None, + dataset_5=None, irdflag=0, iptflag=0, + reachinput=False, transroute=False, + tabfiles=False, tabfiles_dict=None, + extension='sfr', unit_number=None, + filenames=None, options=None): + + """ + Package constructor + """ + # set default unit number of one is not specified + if unit_number is None: + unit_number = ModflowSfr2.defaultunit() + + # set filenames + if filenames is None: + filenames = [None, None, None] + elif isinstance(filenames, str): + filenames = [filenames, None, None] + elif isinstance(filenames, list): + if len(filenames) < 3: + for _ in range(len(filenames), 3): + filenames.append(None) + + # update external file information with cbc output, if necessary + if ipakcb is not None: + fname = filenames[1] + model.add_output_file(ipakcb, fname=fname, + package=ModflowSfr2.ftype()) + else: + ipakcb = 0 + + # add sfr flow output file + if istcb2 is not None: + if abs(istcb2) > 0: + binflag = False + ext = 'out' + if istcb2 < 0: + binflag = True + ext = 'bin' + fname = filenames[2] + if fname is None: + fname = model.name + '.sfr.{}'.format(ext) + model.add_output_file(abs(istcb2), fname=fname, + binflag=binflag, + package=ModflowSfr2.ftype()) + else: + istcb2 = 0 + + # Fill namefile items + name = [ModflowSfr2.ftype()] + units = [unit_number] + extra = [''] + + # set package name + fname = [filenames[0]] + + # Call ancestor's init to set self.parent, extension, name and unit number + Package.__init__(self, model, extension=extension, name=name, + unit_number=units, extra=extra, filenames=fname) + + self.url = 'sfr2.htm' + self._graph = None # dict of routing connections + + # Dataset 0 + self.heading = '# {} package for '.format(self.name[0]) + \ + ' {}, '.format(model.version_types[model.version]) + \ + 'generated by Flopy.' + + # Dataset 1a and 1b + self.reachinput = reachinput + self.transroute = transroute + self.tabfiles = tabfiles + self.tabfiles_dict = tabfiles_dict + self.numtab = 0 if not tabfiles else len(tabfiles_dict) + self.maxval = np.max([tb['numval'] for tb in + tabfiles_dict.values()]) if self.numtab > 0 else 0 + + if options is None: + if (reachinput, transroute, tabfiles) != (False, False, False): + options = OptionBlock("", ModflowSfr2, block=False) + + self.options = options + + # Dataset 1c. + # number of reaches, negative value is flag for unsat. + # flow beneath streams and/or transient routing + self._nstrm = np.sign(nstrm) * len( + reach_data) if reach_data is not None else nstrm + if segment_data is not None: + # segment_data is a zero-d array + if not isinstance(segment_data, dict): + if len(segment_data.shape) == 0: + segment_data = np.atleast_1d(segment_data) + nss = len(segment_data) + segment_data = {0: segment_data} + nss = len(set(reach_data["iseg"])) + else: + pass + # use atleast_1d for length since segment_data might be a 0D array + # this seems to be OK, because self.segment_data is produced by the constructor (never 0D) + self.nsfrpar = nsfrpar + self.nparseg = nparseg + # conversion factor used in calculating stream depth for stream reach (icalc = 1 or 2) + self._const = const if const is not None else None + self.dleak = dleak # tolerance level of stream depth used in computing leakage + + self.ipakcb = ipakcb + # flag; unit number for writing table of SFR output to text file + self.istcb2 = istcb2 + + # if nstrm < 0 + # defines the format of the input data and whether or not unsaturated flow is simulated + self.isfropt = isfropt + + # if isfropt > 1 + # number of trailing wave increments + self.nstrail = nstrail + # max number of vertical cells used to define unsat. zone + self.isuzn = isuzn + # max number trailing waves sets + self.nsfrsets = nsfrsets + + # if nstrm < 0 (MF-2005 only) + # switch for transient streamflow routing (> 0 = kinematic wave) + self.irtflg = irtflg + # if irtflg > 0 + # number of subtimesteps used for routing + self.numtim = numtim + # time weighting factor used to calculate the change in channel storage + self.weight = weight + # streamflow tolerance for convergence of the kinematic wave equation + self.flwtol = flwtol + + # Dataset 2. + self.reach_data = self.get_empty_reach_data(np.abs(self._nstrm)) + if reach_data is not None: + for n in reach_data.dtype.names: + self.reach_data[n] = reach_data[n] + + # assign node numbers if there are none (structured grid) + if np.diff( + self.reach_data.node).max() == 0 and self.parent.has_package( + 'DIS'): + # first make kij list + lrc = np.array(self.reach_data)[['k', 'i', 'j']].tolist() + self.reach_data['node'] = self.parent.dis.get_node(lrc) + # assign unique ID and outreach columns to each reach + self.reach_data.sort(order=['iseg', 'ireach']) + new_cols = {'reachID': np.arange(1, len(self.reach_data) + 1), + 'outreach': np.zeros(len(self.reach_data))} + for k, v in new_cols.items(): + if k not in self.reach_data.dtype.names: + recfunctions.append_fields(self.reach_data, names=k, data=v, + asrecarray=True) + # create a stress_period_data attribute to enable parent functions (e.g. plot) + self.stress_period_data = MfList(self, self.reach_data, + dtype=self.reach_data.dtype) + + # Datasets 4 and 6. + + # list of values that indicate segments outside of the model + # (depending on how SFR package was constructed) + self.not_a_segment_values = [999999] + + self._segments = None + self.segment_data = {0: self.get_empty_segment_data(nss)} + if segment_data is not None: + for i in segment_data.keys(): + nseg = len(segment_data[i]) + self.segment_data[i] = self.get_empty_segment_data(nseg) + for n in segment_data[i].dtype.names: + # inds = (segment_data[i]['nseg'] -1).astype(int) + self.segment_data[i][n] = segment_data[i][n] + # compute outreaches if nseg and outseg columns have non-default values + if np.diff(self.reach_data.iseg).max() != 0 and \ + np.max(list(set(self.graph.keys()))) != 0 \ + and np.max(list(set(self.graph.values()))) != 0: + if len(self.graph) == 1: + self.segment_data[0]['nseg'] = 1 + self.reach_data['iseg'] = 1 + + consistent_seg_numbers = len(set(self.reach_data.iseg).difference( + set(self.graph.keys()))) == 0 + if not consistent_seg_numbers: + warnings.warn( + "Inconsistent segment numbers of reach_data and segment_data") + + # first convert any not_a_segment_values to 0 + for v in self.not_a_segment_values: + self.segment_data[0].outseg[ + self.segment_data[0].outseg == v] = 0 + self.set_outreaches() + self.channel_geometry_data = channel_geometry_data + self.channel_flow_data = channel_flow_data + + # Dataset 5 + # set by property from segment_data unless specified manually + self._dataset_5 = dataset_5 + self.irdflag = irdflag + self.iptflag = iptflag + + # Attributes not included in SFR package input + # dictionary of arrays; see Attributes section of documentation + self.outsegs = {} + # nested dictionary of format {per: {segment: outlet}} + self.outlets = {} + # input format checks: + assert isfropt in [0, 1, 2, 3, 4, 5] + + # derived attributes + self._paths = None + + self.parent.add_package(self) + + def __setattr__(self, key, value): + if key == "nstrm": + super(ModflowSfr2, self). \ + __setattr__("_nstrm", value) + elif key == "dataset_5": + super(ModflowSfr2, self). \ + __setattr__("_dataset_5", value) + elif key == "segment_data": + super(ModflowSfr2, self). \ + __setattr__("segment_data", value) + self._dataset_5 = None + elif key == "const": + super(ModflowSfr2, self). \ + __setattr__("_const", value) + else: # return to default behavior of pakbase + super(ModflowSfr2, self).__setattr__(key, value) + + @property + def const(self): + if self._const is None: + const = self.len_const[self.parent.dis.lenuni] * \ + self.time_const[self.parent.dis.itmuni] + else: + const = self._const + return const + + @property + def nss(self): + # number of stream segments + return len(set(self.reach_data["iseg"])) + + @property + def nstrm(self): + return np.sign(self._nstrm) * len(self.reach_data) + + @property + def nper(self): + nper = self.parent.nrow_ncol_nlay_nper[-1] + nper = 1 if nper == 0 else nper # otherwise iterations from 0, nper won't run + return nper + + @property + def dataset_5(self): + """ + auto-update itmp so it is consistent with segment_data. + """ + ds5 = self._dataset_5 + nss = self.nss + if ds5 is None: + irdflag = self._get_flag('irdflag') + iptflag = self._get_flag('iptflag') + ds5 = {0: [nss, irdflag[0], iptflag[0]]} + for per in range(1, self.nper): + sd = self.segment_data.get(per, None) + if sd is None: + ds5[per] = [-nss, irdflag[per], iptflag[per]] + else: + ds5[per] = [len(sd), irdflag[per], iptflag[per]] + return ds5 + + @property + def graph(self): + """Dictionary of routing connections between segments.""" + if self._graph is None: + self._graph = self._make_graph() + return self._graph + + @property + def paths(self): + if self._paths is None: + self._set_paths() + return self._paths + # check to see if routing in segment data was changed + nseg = np.array(sorted(self._paths.keys()), dtype=int) + nseg = nseg[nseg > 0].copy() + outseg = np.array([self._paths[k][1] for k in nseg]) + existing_nseg = sorted(list(self.graph.keys())) + existing_outseg = [self.graph[k] for k in existing_nseg] + if not np.array_equal(nseg, existing_nseg) or \ + not np.array_equal(outseg, existing_outseg): + self._set_paths() + return self._paths + + @property + def df(self): + if pd: + return pd.DataFrame(self.reach_data) + else: + msg = 'ModflowSfr2.df: pandas not available' + raise ImportError(msg) + + def _make_graph(self): + # get all segments and their outseg + graph = {} + for recarray in self.segment_data.values(): + graph.update(dict(zip(recarray['nseg'], recarray['outseg']))) + + outlets = set(graph.values()).difference( + set(graph.keys())) # including lakes + graph.update({o: 0 for o in outlets if o != 0}) + return graph + + def _set_paths(self): + graph = self.graph + self._paths = {seg: find_path(graph, seg) for seg in graph.keys()} + + def _get_flag(self, flagname): + """ + populate values for each stress period + """ + flg = self.__dict__[flagname] + flg = [flg] if np.isscalar(flg) else flg + if len(flg) < self.nper: + return flg + [flg[-1]] * (self.nper - len(flg)) + return flg + + @staticmethod + def get_empty_reach_data(nreaches=0, aux_names=None, structured=True, + default_value=0.): + # get an empty recarray that corresponds to dtype + dtype = ModflowSfr2.get_default_reach_dtype(structured=structured) + if aux_names is not None: + dtype = Package.add_to_dtype(dtype, aux_names, np.float32) + d = create_empty_recarray(nreaches, dtype, default_value=default_value) + d['reachID'] = np.arange(1, nreaches + 1) + return d + + @staticmethod + def get_empty_segment_data(nsegments=0, aux_names=None, default_value=0.): + # get an empty recarray that corresponds to dtype + dtype = ModflowSfr2.get_default_segment_dtype() + if aux_names is not None: + dtype = Package.add_to_dtype(dtype, aux_names, np.float32) + d = create_empty_recarray(nsegments, dtype, + default_value=default_value) + return d + + @staticmethod + def get_default_reach_dtype(structured=True): + if structured: + # include node column for structured grids (useful for indexing) + return np.dtype([('node', np.int), + ('k', np.int), + ('i', np.int), + ('j', np.int), + ('iseg', np.int), + ('ireach', np.int), + ('rchlen', np.float32), + ('strtop', np.float32), + ('slope', np.float32), + ('strthick', np.float32), + ('strhc1', np.float32), + ('thts', np.float32), + ('thti', np.float32), + ('eps', np.float32), + ('uhc', np.float32), + ('reachID', np.int), + ('outreach', np.int)]) + else: + return np.dtype([('node', np.int), + ('iseg', np.int), + ('ireach', np.int), + ('rchlen', np.float32), + ('strtop', np.float32), + ('slope', np.float32), + ('strthick', np.float32), + ('strhc1', np.float32), + ('thts', np.float32), + ('thti', np.float32), + ('eps', np.float32), + ('uhc', np.float32), + ('reachID', np.int), + ('outreach', np.int)]) + + @staticmethod + def get_default_segment_dtype(): + return np.dtype([('nseg', np.int), + ('icalc', np.int), + ('outseg', np.int), + ('iupseg', np.int), + ('iprior', np.int), + ('nstrpts', np.int), + ('flow', np.float32), + ('runoff', np.float32), + ('etsw', np.float32), + ('pptsw', np.float32), + ('roughch', np.float32), + ('roughbk', np.float32), + ('cdpth', np.float32), + ('fdpth', np.float32), + ('awdth', np.float32), + ('bwdth', np.float32), + ('hcond1', np.float32), + ('thickm1', np.float32), + ('elevup', np.float32), + ('width1', np.float32), + ('depth1', np.float32), + ('thts1', np.float32), + ('thti1', np.float32), + ('eps1', np.float32), + ('uhc1', np.float32), + ('hcond2', np.float32), + ('thickm2', np.float32), + ('elevdn', np.float32), + ('width2', np.float32), + ('depth2', np.float32), + ('thts2', np.float32), + ('thti2', np.float32), + ('eps2', np.float32), + ('uhc2', np.float32)]) + + @staticmethod + def load(f, model, nper=None, gwt=False, nsol=1, ext_unit_dict=None): + + if model.verbose: + sys.stdout.write('loading sfr2 package file...\n') + + tabfiles = False + tabfiles_dict = {} + transroute = False + reachinput = False + structured = model.structured + if nper is None: + nper = model.nper + nper = 1 if nper == 0 else nper # otherwise iterations from 0, nper won't run + + openfile = not hasattr(f, 'read') + if openfile: + filename = f + f = open(filename, 'r') + + # Item 0 -- header + while True: + line = f.readline() + if line[0] != '#': + break + + options = None + if model.version == "mfnwt" and "options" in line.lower(): + options = OptionBlock.load_options(f, ModflowSfr2) + + else: + query = ("reachinput", "transroute", "tabfiles", + "lossfactor", "strhc1kh", "strhc1kv") + for i in query: + if i in line.lower(): + options = OptionBlock(line.lower().strip(), + ModflowSfr2, block=False) + break + + if options is not None: + line = f.readline() + # check for 1b in modflow-2005 + if "tabfile" in line.lower(): + t = line.strip().split() + options.tabfiles = True + options.numtab = int(t[1]) + options.maxval = int(t[2]) + line = f.readline() + + # set varibles to be passed to class args + transroute = options.transroute + reachinput = options.reachinput + tabfiles = isinstance(options.tabfiles, np.ndarray) + numtab = options.numtab if tabfiles else 0 + + # item 1c + nstrm, nss, nsfrpar, nparseg, const, dleak, ipakcb, istcb2, \ + isfropt, nstrail, isuzn, nsfrsets, \ + irtflg, numtim, weight, flwtol, option = _parse_1c(line, + reachinput=reachinput, + transroute=transroute) + + # item 2 + # set column names, dtypes + names = _get_item2_names(nstrm, reachinput, isfropt, structured) + dtypes = [d for d in ModflowSfr2.get_default_reach_dtype().descr + if d[0] in names] + + lines = [] + for i in range(abs(nstrm)): + line = f.readline() + line = line_parse(line) + ireach = tuple(map(float, line[:len(dtypes)])) + lines.append(ireach) + + tmp = np.array(lines, dtype=dtypes) + # initialize full reach_data array with all possible columns + reach_data = ModflowSfr2.get_empty_reach_data(len(lines)) + for n in names: + reach_data[n] = tmp[ + n] # not sure if there's a way to assign multiple columns + + # zero-based convention + inds = ['k', 'i', 'j'] if structured else ['node'] + _markitzero(reach_data, inds) + + # items 3 and 4 are skipped (parameters not supported) + # item 5 + segment_data = {} + channel_geometry_data = {} + channel_flow_data = {} + dataset_5 = {} + aux_variables = {} # not sure where the auxiliary variables are supposed to go + for i in range(0, nper): + # Dataset 5 + dataset_5[i] = _get_dataset(f.readline(), [-1, 0, 0, 0]) + itmp = dataset_5[i][0] + if itmp > 0: + # Item 6 + current = ModflowSfr2.get_empty_segment_data(nsegments=itmp, + aux_names=option) + # container to hold any auxiliary variables + current_aux = {} + # these could also be implemented as structured arrays with a column for segment number + current_6d = {} + current_6e = {} + # print(i,icalc,nstrm,isfropt,reachinput) + for j in range(itmp): + dataset_6a = _parse_6a(f.readline(), option) + current_aux[j] = dataset_6a[-1] + dataset_6a = dataset_6a[:-1] # drop xyz + icalc = dataset_6a[1] + # link dataset 6d, 6e by nseg of dataset_6a + temp_nseg = dataset_6a[0] + # datasets 6b and 6c aren't read under the conditions below + # see table under description of dataset 6c, + # in the MODFLOW Online Guide for a description + # of this logic + # https://water.usgs.gov/ogw/modflow-nwt/MODFLOW-NWT-Guide/sfr.htm + dataset_6b, dataset_6c = (0,) * 9, (0,) * 9 + if not (isfropt in [2, 3] and icalc == 1 and i > 1) and \ + not (isfropt in [1, 2, 3] and icalc >= 2): + dataset_6b = _parse_6bc(f.readline(), icalc, nstrm, + isfropt, + reachinput, per=i) + dataset_6c = _parse_6bc(f.readline(), icalc, nstrm, + isfropt, + reachinput, per=i) + current[j] = dataset_6a + dataset_6b + dataset_6c + + if icalc == 2: + # ATL: not sure exactly how isfropt logic functions for this + # dataset 6d description suggests that this line isn't read for isfropt > 1 + # but description of icalc suggest that icalc=2 (8-point channel) can be used with any isfropt + if i == 0 or nstrm > 0 and not reachinput or isfropt <= 1: + dataset_6d = [] + for _ in range(2): + dataset_6d.append( + _get_dataset(f.readline(), [0.0] * 8)) + # dataset_6d.append(list(map(float, f.readline().strip().split()))) + current_6d[temp_nseg] = dataset_6d + if icalc == 4: + nstrpts = dataset_6a[5] + dataset_6e = [] + for _ in range(3): + dataset_6e.append( + _get_dataset(f.readline(), [0.0] * nstrpts)) + current_6e[temp_nseg] = dataset_6e + + segment_data[i] = current + aux_variables[j + 1] = current_aux + if len(current_6d) > 0: + channel_geometry_data[i] = current_6d + if len(current_6e) > 0: + channel_flow_data[i] = current_6e + + if tabfiles and i == 0: + for j in range(numtab): + segnum, numval, iunit = map(int, + f.readline().strip().split()) + tabfiles_dict[segnum] = {'numval': numval, 'inuit': iunit} + + else: + continue + + if openfile: + f.close() + + # determine specified unit number + unitnumber = None + filenames = [None, None, None] + if ext_unit_dict is not None: + for key, value in ext_unit_dict.items(): + if value.filetype == ModflowSfr2.ftype(): + unitnumber = key + filenames[0] = os.path.basename(value.filename) + + if ipakcb > 0: + if key == ipakcb: + filenames[1] = os.path.basename(value.filename) + model.add_pop_key_list(key) + + if abs(istcb2) > 0: + if key == abs(istcb2): + filenames[2] = os.path.basename(value.filename) + model.add_pop_key_list(key) + + return ModflowSfr2(model, nstrm=nstrm, nss=nss, nsfrpar=nsfrpar, + nparseg=nparseg, const=const, dleak=dleak, + ipakcb=ipakcb, istcb2=istcb2, + isfropt=isfropt, nstrail=nstrail, isuzn=isuzn, + nsfrsets=nsfrsets, irtflg=irtflg, + numtim=numtim, weight=weight, flwtol=flwtol, + reach_data=reach_data, + segment_data=segment_data, + dataset_5=dataset_5, + channel_geometry_data=channel_geometry_data, + channel_flow_data=channel_flow_data, + reachinput=reachinput, transroute=transroute, + tabfiles=tabfiles, tabfiles_dict=tabfiles_dict, + unit_number=unitnumber, filenames=filenames, + options=options) + + def check(self, f=None, verbose=True, level=1, checktype=None): + """ + Check sfr2 package data for common errors. + + Parameters + ---------- + f : str or file handle + String defining file name or file handle for summary file + of check method output. If a string is passed a file handle + is created. If f is None, check method does not write + results to a summary file. (default is None) + verbose : bool + Boolean flag used to determine if check method results are + written to the screen + level : int + Check method analysis level. If level=0, summary checks are + performed. If level=1, full checks are performed. + + Returns + ------- + None + + Examples + -------- + + >>> import flopy + >>> m = flopy.modflow.Modflow.load('model.nam') + >>> m.sfr2.check() + """ + self._graph = None # remake routing graph from segment data + chk = check(self, verbose=verbose, level=level) + chk.for_nans() + chk.numbering() + chk.routing() + chk.overlapping_conductance() + chk.elevations() + chk.slope() + + if f is not None: + if isinstance(f, str): + pth = os.path.join(self.parent.model_ws, f) + f = open(pth, 'w') + f.write('{}\n'.format(chk.txt)) + # f.close() + return chk + + def assign_layers(self, adjust_botms=False, pad=1.): + """ + Assigns the appropriate layer for each SFR reach, + based on cell bottoms at location of reach. + + Parameters + ---------- + adjust_botms : bool + Streambed bottom elevations below the model bottom + will cause an error in MODFLOW. If True, adjust + bottom elevations in lowest layer of the model + so they are at least pad distance below any co-located + streambed elevations. + pad : scalar + Minimum distance below streambed bottom to set + any conflicting model bottom elevations. + + Notes + ----- + Streambed bottom = strtop - strthick + This routine updates the elevations in the botm array + of the flopy.model.ModflowDis instance. To produce a + new DIS package file, model.write() or flopy.model.ModflowDis.write() + must be run. + + """ + streambotms = self.reach_data.strtop - self.reach_data.strthick + i, j = self.reach_data.i, self.reach_data.j + layers = self.parent.dis.get_layer(i, j, streambotms) + + # check against model bottom + logfile = 'sfr_botm_conflicts.chk' + mbotms = self.parent.dis.botm.array[-1, i, j] + below = streambotms <= mbotms + below_i = self.reach_data.i[below] + below_j = self.reach_data.j[below] + l = [] + header = '' + if np.any(below): + print('Warning: SFR streambed elevations below model bottom. ' + 'See sfr_botm_conflicts.chk') + if not adjust_botms: + l += [below_i, + below_j, + mbotms[below], + streambotms[below]] + header += 'i,j,model_botm,streambed_botm' + else: + print('Fixing elevation conflicts...') + botm = self.parent.dis.botm.array.copy() + for ib, jb in zip(below_i, below_j): + inds = (self.reach_data.i == ib) & ( + self.reach_data.j == jb) + botm[-1, ib, jb] = streambotms[inds].min() - pad + # l.append(botm[-1, ib, jb]) + # botm[-1, below_i, below_j] = streambotms[below] - pad + l.append(botm[-1, below_i, below_j]) + header += ',new_model_botm' + self.parent.dis.botm = botm + mbotms = self.parent.dis.botm.array[-1, i, j] + assert not np.any(streambotms <= mbotms) + print('New bottom array assigned to Flopy DIS package ' + 'instance.\nRun flopy.model.write() or ' + 'flopy.model.ModflowDis.write() to write new DIS file.') + header += '\n' + + with open(logfile, 'w') as log: + log.write(header) + a = np.array(l).transpose() + for line in a: + log.write(','.join(map(str, line)) + '\n') + self.reach_data['k'] = layers + + def deactivate_ibound_above(self): + """ + Sets ibound to 0 for all cells above active SFR cells. + + Parameters + ---------- + none + + Notes + ----- + This routine updates the ibound array of the flopy.model.ModflowBas6 + instance. To produce a new BAS6 package file, model.write() or + flopy.model.ModflowBas6.write() must be run. + + """ + ib = self.parent.bas6.ibound.array + deact_lays = [list(range(i)) for i in self.reach_data.k] + for ks, i, j in zip(deact_lays, self.reach_data.i, self.reach_data.j): + for k in ks: + ib[k, i, j] = 0 + self.parent.bas6.ibound = ib + + def get_outlets(self, level=0, verbose=True): + """ + Traces all routing connections from each headwater to the outlet. + """ + txt = '' + for per in range(self.nper): + if per > 0 > self.dataset_5[per][ + 0]: # skip stress periods where seg data not defined + continue + # segments = self.segment_data[per].nseg + # outsegs = self.segment_data[per].outseg + # + # all_outsegs = np.vstack([segments, outsegs]) + # max_outseg = all_outsegs[-1].max() + # knt = 1 + # while max_outseg > 0: + # + # nextlevel = np.array([outsegs[s - 1] if s > 0 and s < 999999 else 0 + # for s in all_outsegs[-1]]) + # + # all_outsegs = np.vstack([all_outsegs, nextlevel]) + # max_outseg = nextlevel.max() + # if max_outseg == 0: + # break + # knt += 1 + # if knt > self.nss: + # # subset outsegs map to only include rows with outseg number > 0 in last column + # circular_segs = all_outsegs.T[all_outsegs[-1] > 0] + # + # # only retain one instance of each outseg number at iteration=nss + # vals = [] # append outseg values to vals after they've appeared once + # mask = [(True, vals.append(v))[0] + # if v not in vals + # else False for v in circular_segs[-1]] + # circular_segs = circular_segs[:, np.array(mask)] + # + # # cull the circular segments array to remove duplicate instances of routing circles + # circles = [] + # duplicates = [] + # for i in range(np.shape(circular_segs)[0]): + # # find where values in the row equal the last value; + # # record the index of the second to last instance of last value + # repeat_start_ind = np.where(circular_segs[i] == circular_segs[i, -1])[0][-2:][0] + # # use that index to slice out the repeated segment sequence + # circular_seq = circular_segs[i, repeat_start_ind:].tolist() + # # keep track of unique sequences of repeated segments + # if set(circular_seq) not in circles: + # circles.append(set(circular_seq)) + # duplicates.append(False) + # else: + # duplicates.append(True) + # circular_segs = circular_segs[~np.array(duplicates), :] + # + # txt += '{0} instances where an outlet was not found after {1} consecutive segments!\n' \ + # .format(len(circular_segs), self.nss) + # if level == 1: + # txt += '\n'.join([' '.join(map(str, row)) for row in circular_segs]) + '\n' + # else: + # f = 'circular_routing.csv' + # np.savetxt(f, circular_segs, fmt='%d', delimiter=',', header=txt) + # txt += 'See {} for details.'.format(f) + # if verbose: + # print(txt) + # break + # # the array of segment sequence is useful for other other operations, + # # such as plotting elevation profiles + # self.outsegs[per] = all_outsegs + # + # use graph instead of above loop + nrow = len(self.segment_data[per].nseg) + ncol = np.max( + [len(v) if v is not None else 0 for v in self.paths.values()]) + all_outsegs = np.zeros((nrow, ncol), dtype=int) + for i, (k, v) in enumerate(self.paths.items()): + if k > 0: + all_outsegs[i, :len(v)] = v + all_outsegs.sort(axis=0) + self.outsegs[per] = all_outsegs + # create a dictionary listing outlets associated with each segment + # outlet is the last value in each row of outseg array that is != 0 or 999999 + # self.outlets[per] = {i + 1: r[(r != 0) & (r != 999999)][-1] + # if len(r[(r != 0) & (r != 999999)]) > 0 + # else i + 1 + # for i, r in enumerate(all_outsegs.T)} + self.outlets[per] = {k: self.paths[k][-1] if k in self.paths + else k for k in self.segment_data[per].nseg} + return txt + + def reset_reaches(self): + self.reach_data.sort(order=['iseg', 'ireach']) + reach_data = self.reach_data + segment_data = list(set(self.reach_data.iseg))# self.segment_data[0] + reach_counts = np.bincount(reach_data.iseg)[1:] + reach_counts = dict(zip(range(1, len(reach_counts) + 1), + reach_counts)) + ireach = [list(range(1, reach_counts[s] + 1)) + for s in segment_data] + ireach = np.concatenate(ireach) + self.reach_data['ireach'] = ireach + + def set_outreaches(self): + """ + Determine the outreach for each SFR reach (requires a reachID + column in reach_data). Uses the segment routing specified for the + first stress period to route reaches between segments. + """ + self.reach_data.sort(order=['iseg', 'ireach']) + # ensure that each segment starts with reach 1 + self.reset_reaches() + # ensure that all outsegs are segments, outlets, or negative (lakes) + self.repair_outsegs() + rd = self.reach_data + outseg = self.graph + reach1IDs = dict(zip(rd[rd.ireach == 1].iseg, + rd[rd.ireach == 1].reachID)) + outreach = [] + for i in range(len(rd)): + # if at the end of reach data or current segment + if i + 1 == len(rd) or rd.ireach[i + 1] == 1: + nextseg = outseg[rd.iseg[i]] # get next segment + if nextseg > 0: # current reach is not an outlet + nextrchid = reach1IDs[ + nextseg] # get reach 1 of next segment + else: + nextrchid = 0 + else: # otherwise, it's the next reachID + nextrchid = rd.reachID[i + 1] + outreach.append(nextrchid) + self.reach_data['outreach'] = outreach + + def get_slopes(self, default_slope=0.001, minimum_slope=0.0001, + maximum_slope=1.): + """ + Compute slopes by reach using values in strtop (streambed top) + and rchlen (reach length) columns of reach_data. The slope for a + reach n is computed as strtop(n+1) - strtop(n) / rchlen(n). + Slopes for outlet reaches are set equal to a default value + (default_slope). Populates the slope column in reach_data. + + Parameters + ---------- + default_slope : float + Slope value applied to outlet reaches + (where water leaves the model). Default value is 0.001 + minimum_slope : float + Assigned to reaches with computed slopes less than this value. + This ensures that the Manning's equation won't produce unreasonable + values of stage (in other words, that stage is consistent with + assumption that streamflow is primarily drive by the streambed + gradient). Default value is 0.0001. + maximum_slope : float + Assigned to reaches with computed slopes more than this value. + Default value is 1. + + """ + # compute outreaches if they aren't there already + if np.diff(self.reach_data.outreach).max() == 0: + self.set_outreaches() + rd = self.reach_data + elev = dict(zip(rd.reachID, rd.strtop)) + dist = dict(zip(rd.reachID, rd.rchlen)) + dnelev = {rid: elev[rd.outreach[i]] if rd.outreach[i] != 0 + else -9999 for i, rid in enumerate(rd.reachID)} + slopes = np.array( + [(elev[i] - dnelev[i]) / dist[i] if dnelev[i] != -9999 + else default_slope for i in rd.reachID]) + slopes[slopes < minimum_slope] = minimum_slope + slopes[slopes > maximum_slope] = maximum_slope + self.reach_data['slope'] = slopes + + def get_upsegs(self): + """ + From segment_data, returns nested dict of all upstream segments by + segment, by stress period. + + Returns + ------- + all_upsegs : dict + Nested dictionary of form + {stress period: {segment: [list of upsegs]}} + + Notes + ----- + This method will not work if there are instances of circular routing. + + """ + all_upsegs = {} + for per in range(self.nper): + if per > 0 > self.dataset_5[per][ + 0]: # skip stress periods where seg data not defined + continue + segment_data = self.segment_data[per] + + # make a list of adjacent upsegments keyed to outseg list in Mat2 + upsegs = {o: segment_data.nseg[segment_data.outseg == o].tolist() + for o in np.unique(segment_data.outseg)} + + outsegs = [k for k in list(upsegs.keys()) if + k > 0] # exclude 0, which is the outlet designator + + # for each outseg key, for each upseg, check for more upsegs, + # append until headwaters has been reached + for outseg in outsegs: + + up = True + upsegslist = upsegs[outseg] + while up: + added_upsegs = [] + for us in upsegslist: + if us in outsegs: + added_upsegs += upsegs[us] + if len(added_upsegs) == 0: + up = False + break + else: + upsegslist = added_upsegs + upsegs[outseg] += added_upsegs + + # the above algorithm is recursive, so lower order streams + # get duplicated many times use a set to get unique upsegs + all_upsegs[per] = {u: list(set(upsegs[u])) for u in outsegs} + return all_upsegs + + def get_variable_by_stress_period(self, varname): + + dtype = [] + all_data = np.zeros((self.nss, self.nper), dtype=float) + for per in range(self.nper): + inds = self.segment_data[per].nseg - 1 + all_data[inds, per] = self.segment_data[per][varname] + dtype.append(('{}{}'.format(varname, per), float)) + isvar = all_data.sum(axis=1) != 0 + ra = np.core.records.fromarrays(all_data[isvar].transpose().copy(), + dtype=dtype) + segs = self.segment_data[0].nseg[isvar] + isseg = np.array( + [True if s in segs else False for s in self.reach_data.iseg]) + isinlet = isseg & (self.reach_data.ireach == 1) + rd = np.array(self.reach_data[isinlet])[ + ['k', 'i', 'j', 'iseg', 'ireach']] + ra = recfunctions.merge_arrays([rd, ra], flatten=True, usemask=False) + return ra.view(np.recarray) + + def repair_outsegs(self): + isasegment = np.in1d(self.segment_data[0].outseg, + self.segment_data[0].nseg) + isasegment = isasegment | (self.segment_data[0].outseg < 0) + self.segment_data[0]['outseg'][~isasegment] = 0. + self._graph = None + + def renumber_segments(self): + """ + Renumber segments so that segment numbering is continuous and always + increases in the downstream direction. This may speed convergence of + the NWT solver in some situations. + + Returns + ------- + r : dictionary mapping old segment numbers to new + """ + + nseg = sorted(list(self.graph.keys())) + outseg = [self.graph[k] for k in nseg] + + # explicitly fix any gaps in the numbering + # (i.e. from removing segments) + nseg2 = np.arange(1, len(nseg) + 1) + # intermediate mapping that + r1 = dict(zip(nseg, nseg2)) + r1[0] = 0 + outseg2 = np.array([r1[s] for s in outseg]) + + # function re-assigning upseg numbers consecutively at one level + # relative to outlet(s). Counts down from the number of segments + def reassign_upsegs(r, nexts, upsegs): + nextupsegs = [] + for u in upsegs: + r[u] = nexts if u > 0 else u # handle lakes + nexts -= 1 + nextupsegs += list(nseg2[outseg2 == u]) + return r, nexts, nextupsegs + + ns = len(nseg) + + # start at outlets with nss; + # renumber upsegs consecutively at each level + # until all headwaters have been reached + nexts = ns + r2 = {0: 0} + nextupsegs = nseg2[outseg2 == 0] + for _ in range(ns): + r2, nexts, nextupsegs = reassign_upsegs(r2, nexts, nextupsegs) + if len(nextupsegs) == 0: + break + # map original segment numbers to new numbers + r = {k: r2.get(v, v) for k, v in r1.items()} + + # renumber segments in all stress period data + for per in self.segment_data.keys(): + self.segment_data[per]['nseg'] = [r.get(s, s) for s in + self.segment_data[per].nseg] + self.segment_data[per]['outseg'] = [r.get(s, s) for s in + self.segment_data[per].outseg] + self.segment_data[per].sort(order='nseg') + nseg = self.segment_data[per].nseg + outseg = self.segment_data[per].outseg + inds = (outseg > 0) & (nseg > outseg) + assert not np.any(inds) + assert len(self.segment_data[per]['nseg']) == \ + self.segment_data[per]['nseg'].max() + self._graph = None # reset routing dict + + # renumber segments in reach_data + self.reach_data['iseg'] = [r.get(s, s) for s in self.reach_data.iseg] + self.reach_data.sort(order=['iseg', 'ireach']) + self.reach_data['reachID'] = np.arange(1, len(self.reach_data) + 1) + self.set_outreaches() # reset the outreaches to ensure continuity + + # renumber segments in other datasets + def renumber_channel_data(d): + if d is not None: + d2 = {} + for k, v in d.items(): + d2[k] = {} + for s, vv in v.items(): + d2[k][r[s]] = vv + else: + d2 = None + return d2 + + self.channel_geometry_data = renumber_channel_data( + self.channel_geometry_data) + self.channel_flow_data = renumber_channel_data(self.channel_flow_data) + return r + + def plot_path(self, start_seg=None, end_seg=0, plot_segment_lines=True): + """ + Plot a profile of streambed elevation and model top + along a path of segments. + + Parameters + ---------- + start_seg : int + Number of first segment in path. + end_seg : int + Number of last segment in path (defaults to 0/outlet). + plot_segment_lines : bool + Controls plotting of segment end locations along profile. + (default True) + + Returns + ------- + ax : matplotlib.axes._subplots.AxesSubplot object + """ + import matplotlib.pyplot as plt + if not pd: + msg = 'ModflowSfr2.plot_path: pandas not available' + raise ImportError(msg) + + df = self.df + m = self.parent + mfunits = m.sr.model_length_units + + to_miles = {'feet': 1 / 5280., 'meters': 1 / (.3048 * 5280.)} + + # slice the path + path = np.array(self.paths[start_seg]) + endidx = np.where(path == end_seg)[0] + endidx = endidx if len(endidx) > 0 else None + path = path[:np.squeeze(endidx)] + path = [s for s in path if s > 0] # skip lakes for now + + # get the values + groups = df.groupby('iseg') + tmp = pd.concat([groups.get_group(s) for s in path]) + tops = m.dis.top.array[tmp.i, tmp.j] + dist = np.cumsum(tmp.rchlen.values) * to_miles.get(mfunits, 1.) + + # segment starts + starts = dist[np.where(tmp.ireach.values == 1)[0]] + + ax = plt.subplots(figsize=(11, 8.5))[-1] + ax.plot(dist, tops, label='Model top') + ax.plot(dist, tmp.strtop, label='Streambed top') + ax.set_xlabel('Distance along path, in miles') + ax.set_ylabel('Elevation, in {}'.format(mfunits)) + ymin, ymax = ax.get_ylim() + plt.autoscale(False) + + if plot_segment_lines: # plot segment ends as vertical lines + ax.vlines(x=starts, ymin=ymin, ymax=ymax, lw=.1, alpha=.1, + label='Gray lines indicate\nsegment ends.') + ax.legend() + + # plot selected segment numbers along path + stride = np.floor(len(dist) / 10) + stride = 1 if stride < 1 else stride + inds = np.arange(0, len(dist), stride, dtype=int) + plot_segnumbers = tmp.iseg.values[inds] + xlocs = dist[inds] + pad = 0.04 * (ymax - ymin) + for x, sn in zip(xlocs, plot_segnumbers): + ax.text(x, ymin + pad, '{}'.format(sn), va='top') + ax.text(xlocs[0], ymin + pad * 1.2, 'Segment numbers:', va='bottom', + fontweight='bold') + ax.text(dist[-1], ymin + pad, '{}'.format(end_seg), ha='center', + va='top') + return ax + + def _get_headwaters(self, per=0): + """ + List all segments that are not outsegs (that do not have any + segments upstream). + + Parameters + ---------- + per : int + Stress period for which to list headwater segments (default 0) + + Returns + ------- + headwaters : np.ndarray (1-D) + One dimensional array listing all headwater segments. + """ + upsegs = [self.segment_data[per].nseg[ + self.segment_data[per].outseg == s].tolist() + for s in self.segment_data[0].nseg] + return self.segment_data[per].nseg[ + np.array([i for i, u in enumerate(upsegs) if len(u) == 0])] + + def _interpolate_to_reaches(self, segvar1, segvar2, per=0): + """ + Interpolate values in datasets 6b and 6c to each reach in + stream segment + + Parameters + ---------- + segvar1 : str + Column/variable name in segment_data array for representing start + of segment (e.g. hcond1 for hydraulic conductivity) + For segments with icalc=2 (specified channel geometry); if width1 + is given, the eighth distance point (XCPT8) from dataset 6d will + be used as the stream width. + For icalc=3, an arbitrary width of 5 is assigned. + For icalc=4, the mean value for width given in item 6e is used. + segvar2 : str + Column/variable name in segment_data array for representing start + of segment (e.g. hcond2 for hydraulic conductivity) + per : int + Stress period with segment data to interpolate + + Returns + ------- + reach_values : 1D array + One dimensional array of interpolated values of same length as + reach_data array. For example, hcond1 and hcond2 could be entered + as inputs to get values for the strhc1 (hydraulic conductivity) + column in reach_data. + + """ + reach_data = self.reach_data + segment_data = self.segment_data[per] + segment_data.sort(order='nseg') + reach_data.sort(order=['iseg', 'ireach']) + reach_values = [] + for seg in segment_data.nseg: + reaches = reach_data[reach_data.iseg == seg] + dist = np.cumsum(reaches.rchlen) - 0.5 * reaches.rchlen + icalc = segment_data.icalc[segment_data.nseg == seg] + # get width from channel cross section length + if 'width' in segvar1 and icalc == 2: + channel_geometry_data = self.channel_geometry_data[per] + reach_values += list( + np.ones(len(reaches)) * channel_geometry_data[seg][0][-1]) + # assign arbitrary width since width is based on flow + elif 'width' in segvar1 and icalc == 3: + reach_values += list(np.ones(len(reaches)) * 5) + # assume width to be mean from streamflow width/flow table + elif 'width' in segvar1 and icalc == 4: + channel_flow_data = self.channel_flow_data[per] + reach_values += list( + np.ones(len(reaches)) * np.mean(channel_flow_data[seg][2])) + else: + fp = [segment_data[segment_data['nseg'] == seg][segvar1][0], + segment_data[segment_data['nseg'] == seg][segvar2][0]] + xp = [dist[0], dist[-1]] + reach_values += np.interp(dist, xp, fp).tolist() + return np.array(reach_values) + + def _write_1c(self, f_sfr): + + # NSTRM NSS NSFRPAR NPARSEG CONST DLEAK ipakcb ISTCB2 + # [ISFROPT] [NSTRAIL] [ISUZN] [NSFRSETS] [IRTFLG] [NUMTIM] [WEIGHT] [FLWTOL] + f_sfr.write('{:.0f} {:.0f} {:.0f} {:.0f} {:.8f} {:.8f} {:.0f} {:.0f} ' + .format(self.nstrm, self.nss, self.nsfrpar, self.nparseg, + self.const, self.dleak, self.ipakcb, self.istcb2)) + if self.reachinput: + self.nstrm = abs( + self.nstrm) # see explanation for dataset 1c in online guide + f_sfr.write('{:.0f} '.format(self.isfropt)) + if self.isfropt > 1: + f_sfr.write('{:.0f} {:.0f} {:.0f} '.format(self.nstrail, + self.isuzn, + self.nsfrsets)) + if self.nstrm < 0: + f_sfr.write('{:.0f} '.format(self.isfropt)) + if self.isfropt > 1: + f_sfr.write('{:.0f} {:.0f} {:.0f} '.format(self.nstrail, + self.isuzn, + self.nsfrsets)) + if self.nstrm < 0 or self.transroute: + f_sfr.write('{:.0f} '.format(self.irtflg)) + if self.irtflg > 0: + f_sfr.write('{:.0f} {:.8f} {:.8f} '.format(self.numtim, + self.weight, + self.flwtol)) + f_sfr.write('\n') + + def _write_reach_data(self, f_sfr): + + # Write the recarray (data) to the file (or file handle) f + assert isinstance(self.reach_data, + np.recarray), "MfList.__tofile() data arg " + \ + "not a recarray" + + # decide which columns to write + # columns = self._get_item2_names() + columns = _get_item2_names(self.nstrm, self.reachinput, self.isfropt, + structured=self.parent.structured) + + # Add one to the kij indices + # names = self.reach_data.dtype.names + # lnames = [] + # [lnames.append(name.lower()) for name in names] + # --make copy of data for multiple calls + d = np.array(self.reach_data) + for idx in ['k', 'i', 'j', 'node']: + if (idx in columns): + d[idx] += 1 + d = d[columns] # data columns sorted + formats = _fmt_string(d) + '\n' + for rec in d: + f_sfr.write(formats.format(*rec)) + + def _write_segment_data(self, i, j, f_sfr): + cols = ['nseg', 'icalc', 'outseg', 'iupseg', 'iprior', 'nstrpts', + 'flow', 'runoff', + 'etsw', 'pptsw', 'roughch', 'roughbk', 'cdpth', 'fdpth', + 'awdth', 'bwdth'] + seg_dat = np.array(self.segment_data[i])[cols][j] + fmts = _fmt_string_list(seg_dat) + + nseg, icalc, outseg, iupseg, iprior, nstrpts, flow, runoff, etsw, \ + pptsw, roughch, roughbk, cdpth, fdpth, awdth, bwdth = \ + [0 if v == self.default_value else v for v in seg_dat] + + f_sfr.write( + ' '.join(fmts[0:4]).format(nseg, icalc, outseg, iupseg) + ' ') + + if iupseg > 0: + f_sfr.write(fmts[4].format(iprior) + ' ') + if icalc == 4: + f_sfr.write(fmts[5].format(nstrpts) + ' ') + + f_sfr.write( + ' '.join(fmts[6:10]).format(flow, runoff, etsw, pptsw) + ' ') + + if icalc in [1, 2]: + f_sfr.write(fmts[10].format(roughch) + ' ') + if icalc == 2: + f_sfr.write(fmts[11].format(roughbk) + ' ') + + if icalc == 3: + f_sfr.write( + ' '.join(fmts[12:16]).format(cdpth, fdpth, awdth, bwdth) + ' ') + f_sfr.write('\n') + + self._write_6bc(i, j, f_sfr, + cols=['hcond1', 'thickm1', 'elevup', 'width1', + 'depth1', 'thts1', 'thti1', + 'eps1', 'uhc1']) + self._write_6bc(i, j, f_sfr, + cols=['hcond2', 'thickm2', 'elevdn', 'width2', + 'depth2', 'thts2', 'thti2', + 'eps2', 'uhc2']) + + def _write_6bc(self, i, j, f_sfr, cols=()): + cols = list(cols) + icalc = self.segment_data[i][j][1] + seg_dat = np.array(self.segment_data[i])[cols][j] + fmts = _fmt_string_list(seg_dat) + hcond, thickm, elevupdn, width, depth, thts, thti, eps, uhc = \ + [0 if v == self.default_value else v for v in seg_dat] + + if self.isfropt in [0, 4, 5] and icalc <= 0: + f_sfr.write( + ' '.join(fmts[0:5]).format(hcond, thickm, elevupdn, width, + depth) + ' ') + + elif self.isfropt in [0, 4, 5] and icalc == 1: + f_sfr.write(fmts[0].format(hcond) + ' ') + + if i == 0: + f_sfr.write( + ' '.join(fmts[1:4]).format(thickm, elevupdn, width) + ' ') + if self.isfropt in [4, 5]: + f_sfr.write( + ' '.join(fmts[5:8]).format(thts, thti, eps) + ' ') + + if self.isfropt == 5: + f_sfr.write(fmts[8].format(uhc) + ' ') + + elif i > 0 and self.isfropt == 0: + f_sfr.write( + ' '.join(fmts[1:4]).format(thickm, elevupdn, width) + ' ') + + elif self.isfropt in [0, 4, 5] and icalc >= 2: + f_sfr.write(fmts[0].format(hcond) + ' ') + + if self.isfropt in [4, 5] and i > 0 and icalc == 2: + pass + else: + f_sfr.write(' '.join(fmts[1:3]).format(thickm, elevupdn) + ' ') + + if self.isfropt in [4, 5] and icalc == 2 and i == 0: + f_sfr.write( + ' '.join(fmts[3:6]).format(thts, thti, eps) + ' ') + + if self.isfropt == 5: + f_sfr.write(fmts[8].format(uhc) + ' ') + else: + pass + elif self.isfropt == 1 and icalc <= 1: + f_sfr.write(fmts[3].format(width) + ' ') + if icalc <= 0: + f_sfr.write(fmts[4].format(depth) + ' ') + elif self.isfropt in [2, 3]: + if icalc <= 0: + f_sfr.write(fmts[3].format(width) + ' ') + f_sfr.write(fmts[4].format(depth) + ' ') + elif icalc == 1: + if i > 0: + pass + else: + f_sfr.write(fmts[3].format(width) + ' ') + else: + pass + + else: + return + f_sfr.write('\n') + + def write_file(self, filename=None): + """ + Write the package file. + + Returns + ------- + None + + """ + + # tabfiles = False + # tabfiles_dict = {} + # transroute = False + # reachinput = False + if filename is not None: + self.fn_path = filename + + f_sfr = open(self.fn_path, 'w') + + # Item 0 -- header + f_sfr.write('{0}\n'.format(self.heading)) + + # Item 1 + if isinstance(self.options, + OptionBlock) and self.parent.version == "mfnwt": + self.options.update_from_package(self) + self.options.write_options(f_sfr) + elif isinstance(self.options, OptionBlock): + self.options.update_from_package(self) + self.options.block = False + self.options.write_options(f_sfr) + else: + pass + + self._write_1c(f_sfr) + + # item 2 + self._write_reach_data(f_sfr) + + # items 3 and 4 are skipped (parameters not supported) + + for i in range(0, self.nper): + + # item 5 + itmp = self.dataset_5[i][0] + f_sfr.write(' '.join(map(str, self.dataset_5[i])) + '\n') + if itmp > 0: + + # Item 6 + for j in range(itmp): + + # write datasets 6a, 6b and 6c + self._write_segment_data(i, j, f_sfr) + + icalc = self.segment_data[i].icalc[j] + nseg = self.segment_data[i].nseg[j] + if icalc == 2: + # or isfropt <= 1: + if i == 0 or self.nstrm > 0 and \ + not self.reachinput or self.isfropt <=1: + for k in range(2): + for d in self.channel_geometry_data[i][nseg][ + k]: + f_sfr.write('{:.2f} '.format(d)) + f_sfr.write('\n') + + if icalc == 4: + # nstrpts = self.segment_data[i][j][5] + for k in range(3): + for d in self.channel_flow_data[i][nseg][k]: + f_sfr.write('{:.2f} '.format(d)) + f_sfr.write('\n') + if self.tabfiles and i == 0: + for j in sorted(self.tabfiles_dict.keys()): + f_sfr.write('{:.0f} {:.0f} {:.0f}\n'.format(j, + self.tabfiles_dict[ + j][ + 'numval'], + self.tabfiles_dict[ + j][ + 'inuit'])) + else: + continue + f_sfr.close() + + def export(self, f, **kwargs): + if isinstance(f, str) and f.lower().endswith(".shp"): + from flopy.utils.geometry import Polygon + from flopy.export.shapefile_utils import recarray2shp + geoms = [] + for ix, i in enumerate(self.reach_data.i): + verts = self.parent.modelgrid.get_cell_vertices( + i, self.reach_data.j[ix]) + geoms.append(Polygon(verts)) + recarray2shp(self.reach_data, geoms, shpname=f, **kwargs) + else: + from flopy import export + return export.utils.package_export(f, self, **kwargs) + + def export_linkages(self, f, **kwargs): + """ + Export linework shapefile showing all routing connections between + SFR reaches. A length field containing the distance between connected + reaches can be used to filter for the longest connections in a GIS. + + """ + from flopy.utils.geometry import LineString + from flopy.export.shapefile_utils import recarray2shp + rd = self.reach_data.copy() + m = self.parent + rd.sort(order=['reachID']) + + # get the cell centers for each reach + mg = m.modelgrid + x0 = mg.xcellcenters[rd.i, rd.j] + y0 = mg.ycellcenters[rd.i, rd.j] + loc = dict(zip(rd.reachID, zip(x0, y0))) + + # make lines of the reach connections between cell centers + geoms = [] + lengths = [] + for r in rd.reachID: + x0, y0 = loc[r] + outreach = rd.outreach[r - 1] + if outreach == 0: + x1, y1 = x0, y0 + else: + x1, y1 = loc[outreach] + geoms.append(LineString([(x0, y0), (x1, y1)])) + lengths.append(np.sqrt((x1 - x0) ** 2 + (y1 - y0) ** 2)) + lengths = np.array(lengths) + + # append connection lengths for filtering in GIS + rd = recfunctions.append_fields(rd, + names=['length'], + data=[lengths], + usemask=False, + asrecarray=True) + recarray2shp(rd, geoms, f, **kwargs) + + def export_outlets(self, f, **kwargs): + """ + Export point shapefile showing locations where streamflow is leaving + the model (outset=0). + + """ + from flopy.utils.geometry import Point + from flopy.export.shapefile_utils import recarray2shp + rd = self.reach_data + if np.min(rd.outreach) == np.max(rd.outreach): + self.set_outreaches() + rd = self.reach_data[self.reach_data.outreach == 0].copy() + m = self.parent + rd.sort(order=['iseg', 'ireach']) + + # get the cell centers for each reach + mg = m.modelgrid + x0 = mg.xcellcenters[rd.i, rd.j] + y0 = mg.ycellcenters[rd.i, rd.j] + geoms = [Point(x, y) for x, y in zip(x0, y0)] + recarray2shp(rd, geoms, f, **kwargs) + + def export_transient_variable(self, f, varname, **kwargs): + """ + Export point shapefile showing locations with a given segment_data + variable applied. For example, segments where streamflow is entering + or leaving the upstream end of a stream segment (FLOW) or where RUNOFF + is applied. Cell centroids of the first reach of segments with non-zero + terms of varname are exported; values of varname are exported by stress + period in the attribute fields (e.g. flow0, flow1, flow2... for FLOW + in stress periods 0, 1, 2... + + Parameters + ---------- + f : str, filename + varname : str + Variable in SFR Package dataset 6a (see SFR package documentation) + + """ + from flopy.utils.geometry import Point + from flopy.export.shapefile_utils import recarray2shp + + rd = self.reach_data + if np.min(rd.outreach) == np.max(rd.outreach): + self.set_outreaches() + ra = self.get_variable_by_stress_period(varname.lower()) + + # get the cell centers for each reach + m = self.parent + mg = m.modelgrid + x0 = mg.xcellcenters[ra.i, ra.j] + y0 = mg.ycellcenters[ra.i, ra.j] + geoms = [Point(x, y) for x, y in zip(x0, y0)] + recarray2shp(ra, geoms, f, **kwargs) + + @staticmethod + def ftype(): + return 'SFR' + + @staticmethod + def defaultunit(): + return 17 + + +class check: + """ + Check SFR2 package for common errors + + Parameters + ---------- + sfrpackage : object + Instance of Flopy ModflowSfr2 class. + verbose : bool + Boolean flag used to determine if check method results are + written to the screen + level : int + Check method analysis level. If level=0, summary checks are + performed. If level=1, full checks are performed. + + Notes + ----- + + Daniel Feinstein's top 10 SFR problems (7/16/2014): + 1) cell gaps btw adjacent reaches in a single segment + 2) cell gaps btw routed segments. possibly because of re-entry problems at domain edge + 3) adjacent reaches with STOP sloping the wrong way + 4) routed segments with end/start sloping the wrong way + 5) STOP>TOP1 violations, i.e.,floaters + 6) STOP< col2, subsets array to only include rows where + col1 is greater. Creates another column with differences + (col1-col2), and prints the array sorted by the differences + column (diff). + + Parameters + ---------- + array : record array + Array with columns to compare. + col1 : string + Column name in array. + col2 : string + Column name in array. + sort_ascending : T/F; default True + If True, printed array will be sorted by differences in + ascending order. + print_delimiter : str + Delimiter for printed array. + + Returns + ------- + txt : str + Error messages and printed array (if .level attribute of + checker is set to 1). Returns an empty string if no + values in col1 are greater than col2. + + Notes + ----- + info about appending to record arrays (views vs. copies and upcoming + changes to numpy): + http://stackoverflow.com/questions/22865877/how-do-i-write-to-multiple-fields-of-a-structured-array + """ + txt = '' + array = array.view(np.recarray).copy() + if isinstance(col1, np.ndarray): + array = recfunctions.append_fields(array, names='tmp1', data=col1, + asrecarray=True) + col1 = 'tmp1' + if isinstance(col2, np.ndarray): + array = recfunctions.append_fields(array, names='tmp2', data=col2, + asrecarray=True) + col2 = 'tmp2' + if isinstance(col1, tuple): + array = recfunctions.append_fields(array, names=col1[0], + data=col1[1], + asrecarray=True) + col1 = col1[0] + if isinstance(col2, tuple): + array = recfunctions.append_fields(array, names=col2[0], + data=col2[1], + asrecarray=True) + col2 = col2[0] + + failed = array[col1] > array[col2] + if np.any(failed): + failed_info = np.array(array)[failed] + txt += level0txt.format(len(failed_info)) + '\n' + if self.level == 1: + diff = failed_info[col2] - failed_info[col1] + cols = [c for c in failed_info.dtype.names if + failed_info[c].sum() != 0 + and c != 'diff' + and 'tmp' not in c] + failed_info = recfunctions.append_fields( + failed_info[cols].copy(), names='diff', data=diff, + usemask=False, asrecarray=False) + failed_info.sort(order='diff', axis=0) + if not sort_ascending: + failed_info = failed_info[::-1] + txt += level1txt + '\n' + txt += _print_rec_array(failed_info, delimiter=print_delimiter) + txt += '\n' + return txt + + def _txt_footer(self, headertxt, txt, testname, passed=False, + warning=True): + if len(txt) == 0 or passed: + txt += 'passed.' + self.passed.append(testname) + elif warning: + self.warnings.append(testname) + else: + self.errors.append(testname) + if self.verbose: + print(txt + '\n') + self.txt += headertxt + txt + '\n' + + def for_nans(self): + """ + Check for nans in reach or segment data + + """ + headertxt = 'Checking for nan values...\n' + txt = '' + passed = False + isnan = np.any(np.isnan(np.array(self.reach_data.tolist())), axis=1) + nanreaches = self.reach_data[isnan] + if np.any(isnan): + txt += 'Found {} reachs with nans:\n'.format(len(nanreaches)) + if self.level == 1: + txt += _print_rec_array(nanreaches, delimiter=' ') + for per, sd in self.segment_data.items(): + isnan = np.any(np.isnan(np.array(sd.tolist())), axis=1) + nansd = sd[isnan] + if np.any(isnan): + txt += 'Per {}: found {} segments with nans:\n'.format(per, + len( + nanreaches)) + if self.level == 1: + txt += _print_rec_array(nansd, delimiter=' ') + if len(txt) == 0: + passed = True + self._txt_footer(headertxt, txt, 'nan values', passed) + + def run_all(self): + return self.sfr.check() + + def numbering(self): + """ + Checks for continuity in segment and reach numbering + """ + + headertxt = 'Checking for continuity in segment and reach numbering...\n' + if self.verbose: + print(headertxt.strip()) + txt = '' + passed = False + + sd = self.segment_data[0] + # check segment numbering + txt += _check_numbers(self.sfr.nss, + sd['nseg'], + level=self.level, + datatype='segment') + + # check reach numbering + for segment in np.arange(1, self.sfr.nss + 1): + reaches = self.reach_data.ireach[self.reach_data.iseg == segment] + t = _check_numbers(len(reaches), + reaches, + level=self.level, + datatype='reach') + if len(t) > 0: + txt += 'Segment {} has {}'.format(segment, t) + if txt == '': + passed = True + self._txt_footer(headertxt, txt, + 'continuity in segment and reach numbering', passed, + warning=False) + + headertxt = 'Checking for increasing segment numbers in downstream direction...\n' + txt = '' + passed = False + if self.verbose: + print(headertxt.strip()) + # for per, segment_data in self.segment_data.items(): + + inds = (sd.outseg < sd.nseg) & (sd.outseg != 0) + + if len(txt) == 0 and np.any(inds): + decreases = np.array(sd[inds])[['nseg', 'outseg']] + txt += 'Found {} segment numbers decreasing in the downstream direction.\n'.format( + len(decreases)) + txt += 'MODFLOW will run but convergence may be slowed:\n' + if self.level == 1: + txt += 'nseg outseg\n' + t = '' + for nseg, outseg in decreases: + t += '{} {}\n'.format(nseg, outseg) + txt += t # '\n'.join(textwrap.wrap(t, width=10)) + if len(t) == 0: + passed = True + self._txt_footer(headertxt, txt, 'segment numbering order', passed) + + def routing(self): + """ + Checks for breaks in routing and does comprehensive check for + circular routing + + """ + headertxt = 'Checking for circular routing...\n' + txt = '' + if self.verbose: + print(headertxt.strip()) + + # txt += self.sfr.get_outlets(level=self.level, verbose=False) # will print twice if verbose=True + # simpler check method using paths from routing graph + circular_segs = [k for k, v in self.sfr.paths.items() if v is None] + if len(circular_segs) > 0: + txt += '{0} instances where an outlet was not found after {1} consecutive segments!\n' \ + .format(len(circular_segs), self.sfr.nss) + if self.level == 1: + txt += ' '.join(map(str, circular_segs)) + '\n' + else: + f = os.path.join(self.sfr.parent._model_ws, + 'circular_routing.chk.csv') + np.savetxt(f, circular_segs, fmt='%d', delimiter=',', + header=txt) + txt += 'See {} for details.'.format(f) + if self.verbose: + print(txt) + self._txt_footer(headertxt, txt, 'circular routing', warning=False) + + # check reach connections for proximity + if self.mg is not None or self.mg is not None: + rd = self.sfr.reach_data.copy() + rd.sort(order=['reachID']) + try: + xcentergrid, ycentergrid, zc = self.mg.get_cellcenters() + del zc + except AttributeError: + xcentergrid = self.mg.xcellcenters + ycentergrid = self.mg.ycellcenters + + x0 = xcentergrid[rd.i, rd.j] + y0 = ycentergrid[rd.i, rd.j] + loc = dict(zip(rd.reachID, zip(x0, y0))) + + # compute distances between node centers of connected reaches + headertxt = 'Checking reach connections for proximity...\n' + txt = '' + if self.verbose: + print(headertxt.strip()) + dist = [] + for r in rd.reachID: + x0, y0 = loc[r] + outreach = rd.outreach[r - 1] + if outreach == 0: + dist.append(0) + else: + x1, y1 = loc[outreach] + dist.append(np.sqrt((x1 - x0) ** 2 + (y1 - y0) ** 2)) + dist = np.array(dist) + + # compute max width of reach nodes (hypotenuse for rectangular nodes) + delr = self.mg.delr + delc = self.mg.delc + + dx = delr[rd.j] # (delr * self.sr.length_multiplier)[rd.j] + dy = delc[rd.i] # (delc * self.sr.length_multiplier)[rd.i] + hyp = np.sqrt(dx ** 2 + dy ** 2) + + # breaks are when the connection distance is greater than + # max node with * a tolerance + # 1.25 * hyp is greater than distance of two diagonally adjacent nodes + # where one is 1.5x larger than the other + breaks = np.where(dist > hyp * 1.25) + breaks_reach_data = rd[breaks] + segments_with_breaks = set(breaks_reach_data.iseg) + if len(breaks) > 0: + txt += '{0} segments '.format(len(segments_with_breaks)) + \ + 'with non-adjacent reaches found.\n' + if self.level == 1: + txt += 'At segments:\n' + txt += ' '.join(map(str, segments_with_breaks)) + '\n' + else: + f = os.path.join(self.sfr.parent._model_ws, + 'reach_connection_gaps.chk.csv') + rd.tofile(f, sep='\t') + txt += 'See {} for details.'.format(f) + if self.verbose: + print(txt) + self._txt_footer(headertxt, txt, 'reach connections', + warning=False) + else: + txt += 'No DIS package or SpatialReference object; cannot ' + \ + 'check reach proximities.' + self._txt_footer(headertxt, txt, '') + + def overlapping_conductance(self, tol=1e-6): + """ + Checks for multiple SFR reaches in one cell; and whether more than + one reach has Cond > 0 + + """ + headertxt = 'Checking for model cells with multiple non-zero ' + \ + 'SFR conductances...\n' + txt = '' + if self.verbose: + print(headertxt.strip()) + + # make nreach vectors of each conductance parameter + reach_data = np.array(self.reach_data) + # if no dis file was supplied, can't compute node numbers + # make nodes based on unique row, col pairs + # if np.diff(reach_data.node).max() == 0: + # always use unique rc, since flopy assigns nodes by k, i, j + uniquerc = {} + for i, (r, c) in enumerate(reach_data[['i', 'j']]): + if (r, c) not in uniquerc: + uniquerc[(r, c)] = i + 1 + reach_data['node'] = [uniquerc[(r, c)] for r, c in + reach_data[['i', 'j']]] + + K = reach_data['strhc1'] + if K.max() == 0: + K = self.sfr._interpolate_to_reaches('hcond1', 'hcond2') + b = reach_data['strthick'] + if b.max() == 0: + b = self.sfr._interpolate_to_reaches('thickm1', 'thickm2') + L = reach_data['rchlen'] + w = self.sfr._interpolate_to_reaches('width1', 'width2') + + # Calculate SFR conductance for each reach + binv = np.zeros(b.shape, dtype=b.dtype) + idx = b > 0. + binv[idx] = 1. / b[idx] + Cond = K * w * L * binv + + shared_cells = _get_duplicates(reach_data['node']) + + nodes_with_multiple_conductance = set() + for node in shared_cells: + + # select the collocated reaches for this cell + conductances = Cond[reach_data['node'] == node].copy() + conductances.sort() + + # list nodes with multiple non-zero SFR reach conductances + if (conductances[-1] != 0.0 and + (conductances[0] / conductances[-1] > tol)): + nodes_with_multiple_conductance.update({node}) + + if len(nodes_with_multiple_conductance) > 0: + txt += '{} model cells with multiple non-zero SFR conductances found.\n' \ + 'This may lead to circular routing between collocated reaches.\n' \ + .format(len(nodes_with_multiple_conductance)) + if self.level == 1: + txt += 'Nodes with overlapping conductances:\n' + + reach_data['strthick'] = b + reach_data['strhc1'] = K + + cols = [c for c in reach_data.dtype.names if c in \ + ['k', 'i', 'j', 'iseg', 'ireach', 'rchlen', 'strthick', + 'strhc1', 'width', 'conductance']] + + reach_data = recfunctions.append_fields( + reach_data, + names=['width', 'conductance'], data=[w, Cond], + usemask=False, asrecarray=False) + has_multiple = np.array( + [True if n in nodes_with_multiple_conductance + else False for n in reach_data['node']]) + reach_data = reach_data[has_multiple] + reach_data = reach_data[cols] + txt += _print_rec_array(reach_data, delimiter='\t') + + self._txt_footer(headertxt, txt, 'overlapping conductance') + + def elevations(self, min_strtop=-10, max_strtop=15000): + """ + Checks streambed elevations for downstream rises and inconsistencies + with model grid + + """ + headertxt = 'Checking for streambed tops of less ' + \ + 'than {}...\n'.format(min_strtop) + txt = '' + if self.verbose: + print(headertxt.strip()) + + passed = False + if self.sfr.isfropt in [1, 2, 3]: + if np.diff(self.reach_data.strtop).max() == 0: + txt += 'isfropt setting of 1,2 or 3 requires strtop information!\n' + else: + is_less = self.reach_data.strtop < min_strtop + if np.any(is_less): + below_minimum = self.reach_data[is_less] + txt += '{} instances of streambed top below minimum found.\n'.format( + len(below_minimum)) + if self.level == 1: + txt += 'Reaches with low strtop:\n' + txt += _print_rec_array(below_minimum, delimiter='\t') + if len(txt) == 0: + passed = True + else: + txt += 'strtop not specified for isfropt={}\n'.format( + self.sfr.isfropt) + passed = True + self._txt_footer(headertxt, txt, 'minimum streambed top', passed) + + headertxt = 'Checking for streambed tops of ' + \ + 'greater than {}...\n'.format(max_strtop) + txt = '' + if self.verbose: + print(headertxt.strip()) + + passed = False + if self.sfr.isfropt in [1, 2, 3]: + if np.diff(self.reach_data.strtop).max() == 0: + txt += 'isfropt setting of 1,2 or 3 ' + \ + 'requires strtop information!\n' + else: + is_greater = self.reach_data.strtop > max_strtop + if np.any(is_greater): + above_max = self.reach_data[is_greater] + txt += '{} instances '.format(len(above_max)) + \ + 'of streambed top above the maximum found.\n' + if self.level == 1: + txt += 'Reaches with high strtop:\n' + txt += _print_rec_array(above_max, delimiter='\t') + if len(txt) == 0: + passed = True + else: + txt += 'strtop not specified for isfropt={}\n'.format( + self.sfr.isfropt) + passed = True + self._txt_footer(headertxt, txt, 'maximum streambed top', passed) + + headertxt = 'Checking segment_data for ' + \ + 'downstream rises in streambed elevation...\n' + txt = '' + if self.verbose: + print(headertxt.strip()) + + # decide whether to check elevup and elevdn from items 6b/c + # (see online guide to SFR input; Data Set 6b description) + passed = False + if self.sfr.isfropt in [0, 4, 5]: + pers = sorted(self.segment_data.keys()) + for per in pers: + segment_data = self.segment_data[per][ + self.segment_data[per].elevup > -999999] + + # enforce consecutive increasing segment numbers (for indexing) + segment_data.sort(order='nseg') + t = _check_numbers(len(segment_data), segment_data.nseg, + level=1, datatype='Segment') + if len(t) > 0: + txt += 'Elevation check requires ' + \ + 'consecutive segment numbering.' + self._txt_footer(headertxt, txt, '') + return + + # first check for segments where elevdn > elevup + d_elev = segment_data.elevdn - segment_data.elevup + segment_data = recfunctions.append_fields(segment_data, + names='d_elev', + data=d_elev, + asrecarray=True) + txt += self._boolean_compare( + np.array(segment_data)[['nseg', 'outseg', 'elevup', + 'elevdn', 'd_elev']], + col1='d_elev', col2=np.zeros(len(segment_data)), + level0txt='Stress Period {}: '.format(per + 1) + \ + '{} segments encountered with elevdn > elevup.', + level1txt='Backwards segments:', + ) + + # next check for rises between segments + non_outlets = segment_data.outseg > 0 + non_outlets_seg_data = segment_data[ + non_outlets] # lake outsegs are < 0 + outseg_elevup = np.array( + [segment_data.elevup[o - 1] for o in segment_data.outseg if + o > 0]) + d_elev2 = outseg_elevup - segment_data.elevdn[non_outlets] + non_outlets_seg_data = recfunctions.append_fields( + non_outlets_seg_data, + names=['outseg_elevup', 'd_elev2'], + data=[outseg_elevup, d_elev2], + usemask=False, asrecarray=False) + + txt += self._boolean_compare( + non_outlets_seg_data[['nseg', 'outseg', 'elevdn', + 'outseg_elevup', 'd_elev2']], + col1='d_elev2', col2=np.zeros(len(non_outlets_seg_data)), + level0txt='Stress Period {}: '.format(per + 1) + \ + '{} segments encountered with segments encountered ' \ + 'with outseg elevup > elevdn.', + level1txt='Backwards segment connections:', + ) + + if len(txt) == 0: + passed = True + else: + txt += 'Segment elevup and elevdn not ' + \ + 'specified for nstrm=' + \ + '{} and isfropt={}\n'.format(self.sfr.nstrm, + self.sfr.isfropt) + passed = True + self._txt_footer(headertxt, txt, 'segment elevations', passed) + + headertxt = 'Checking reach_data for ' + \ + 'downstream rises in streambed elevation...\n' + txt = '' + if self.verbose: + print(headertxt.strip()) + passed = False + if self.sfr.nstrm < 0 or self.sfr.reachinput and self.sfr.isfropt in [ + 1, 2, 3]: # see SFR input instructions + + # compute outreaches if they aren't there already + if np.diff(self.sfr.reach_data.outreach).max() == 0: + self.sfr.set_outreaches() + + # compute changes in elevation + rd = self.reach_data.copy() + elev = dict(zip(rd.reachID, rd.strtop)) + dnelev = {rid: elev[rd.outreach[i]] if rd.outreach[i] != 0 + else -9999 for i, rid in enumerate(rd.reachID)} + strtopdn = np.array([dnelev[r] for r in rd.reachID]) + diffs = np.array([(dnelev[i] - elev[i]) if dnelev[i] != -9999 + else -.001 for i in rd.reachID]) + + reach_data = self.sfr.reach_data # inconsistent with other checks that work with + # reach_data attribute of check class. Want to have get_outreaches as a method of sfr class + # (for other uses). Not sure if other check methods should also copy reach_data directly from + # SFR package instance for consistency. + + # use outreach values to get downstream elevations + # non_outlets = reach_data[reach_data.outreach != 0] + # outreach_elevdn = np.array([reach_data.strtop[o - 1] for o in reach_data.outreach]) + # d_strtop = outreach_elevdn[reach_data.outreach != 0] - non_outlets.strtop + rd = recfunctions.append_fields( + rd, names=['strtopdn', 'd_strtop'], data=[strtopdn, diffs], + usemask=False, asrecarray=False) + + txt += self._boolean_compare( + rd[['k', 'i', 'j', 'iseg', 'ireach', 'strtop', 'strtopdn', + 'd_strtop', 'reachID']], + col1='d_strtop', col2=np.zeros(len(rd)), + level0txt='{} reaches encountered with strtop < strtop of downstream reach.', + level1txt='Elevation rises:', + ) + if len(txt) == 0: + passed = True + else: + txt += 'Reach strtop not specified for nstrm={}, reachinput={} and isfropt={}\n' \ + .format(self.sfr.nstrm, self.sfr.reachinput, self.sfr.isfropt) + passed = True + self._txt_footer(headertxt, txt, 'reach elevations', passed) + + headertxt = 'Checking reach_data for inconsistencies between streambed elevations and the model grid...\n' + if self.verbose: + print(headertxt.strip()) + txt = '' + if self.sfr.parent.dis is None: + txt += 'No DIS file supplied; cannot check SFR elevations against model grid.' + self._txt_footer(headertxt, txt, '') + return + passed = False + warning = True + if (self.sfr.nstrm < 0 or self.sfr.reachinput and + self.sfr.isfropt in [1, 2, 3]): # see SFR input instructions + reach_data = np.array(self.reach_data) + i, j, k = reach_data['i'], reach_data['j'], reach_data['k'] + + # check streambed bottoms in relation to respective cell bottoms + bots = self.sfr.parent.dis.botm.array[k, i, j] + streambed_bots = reach_data['strtop'] - reach_data['strthick'] + reach_data = recfunctions.append_fields( + reach_data, names=['layerbot', 'strbot'], + data=[bots, streambed_bots], usemask=False, asrecarray=False) + + txt += self._boolean_compare( + reach_data[['k', 'i', 'j', 'iseg', 'ireach', 'strtop', + 'strthick', 'strbot', 'layerbot', 'reachID']], + col1='layerbot', col2='strbot', + level0txt='{} reaches encountered with streambed bottom below layer bottom.', + level1txt='Layer bottom violations:', + ) + if len(txt) > 0: + warning = False # this constitutes an error (MODFLOW won't run) + # check streambed elevations in relation to model top + tops = self.sfr.parent.dis.top.array[i, j] + reach_data = recfunctions.append_fields( + reach_data, names='modeltop', data=tops, + usemask=False, asrecarray=False) + + txt += self._boolean_compare( + reach_data[['k', 'i', 'j', 'iseg', 'ireach', + 'strtop', 'modeltop', 'strhc1', 'reachID']], + col1='strtop', col2='modeltop', + level0txt='{} reaches encountered with streambed above model top.', + level1txt='Model top violations:', + ) + + if len(txt) == 0: + passed = True + else: + txt += 'Reach strtop, strthick not specified for nstrm={}, reachinput={} and isfropt={}\n' \ + .format(self.sfr.nstrm, self.sfr.reachinput, self.sfr.isfropt) + passed = True + self._txt_footer(headertxt, txt, + 'reach elevations vs. grid elevations', passed, + warning=warning) + + # In cases where segment end elevations/thicknesses are used, + # do these need to be checked for consistency with layer bottoms? + + headertxt = 'Checking segment_data for inconsistencies ' + \ + 'between segment end elevations and the model grid...\n' + txt = '' + if self.verbose: + print(headertxt.strip()) + passed = False + if self.sfr.isfropt in [0, 4, 5]: + reach_data = self.reach_data + pers = sorted(self.segment_data.keys()) + for per in pers: + segment_data = self.segment_data[per][ + self.segment_data[per].elevup > -999999] + + # enforce consecutive increasing segment numbers (for indexing) + segment_data.sort(order='nseg') + t = _check_numbers(len(segment_data), segment_data.nseg, + level=1, datatype='Segment') + if len(t) > 0: + raise Exception( + 'Elevation check requires consecutive segment numbering.') + + first_reaches = reach_data[reach_data.ireach == 1].copy() + last_reaches = reach_data[ + np.append((np.diff(reach_data.iseg) == 1), True)].copy() + segment_ends = recfunctions.stack_arrays( + [first_reaches, last_reaches], + asrecarray=True, usemask=False) + segment_ends['strtop'] = np.append(segment_data['elevup'], + segment_data['elevdn']) + i, j = segment_ends.i, segment_ends.j + tops = self.sfr.parent.dis.top.array[i, j] + diff = tops - segment_ends.strtop + segment_ends = recfunctions.append_fields( + segment_ends, + names=['modeltop', 'diff'], data=[tops, diff], + usemask=False, asrecarray=False) + + txt += self._boolean_compare(segment_ends[['k', 'i', 'j', 'iseg', + 'strtop', 'modeltop', + 'diff', + 'reachID']].copy(), + col1=np.zeros(len(segment_ends)), + col2='diff', + level0txt='{} reaches encountered with streambed above model top.', + level1txt='Model top violations:', + ) + + if len(txt) == 0: + passed = True + else: + txt += 'Segment elevup and elevdn not specified for nstrm={} and isfropt={}\n' \ + .format(self.sfr.nstrm, self.sfr.isfropt) + passed = True + self._txt_footer(headertxt, txt, 'segment elevations vs. model grid', + passed) + + def slope(self, minimum_slope=1e-4, maximum_slope=1.0): + """Checks that streambed slopes are greater than or equal to a specified minimum value. + Low slope values can cause "backup" or unrealistic stream stages with icalc options + where stage is computed. + """ + headertxt = 'Checking for streambed slopes of less than {}...\n'.format( + minimum_slope) + txt = '' + if self.verbose: + print(headertxt.strip()) + + passed = False + if self.sfr.isfropt in [1, 2, 3]: + if np.diff(self.reach_data.slope).max() == 0: + txt += 'isfropt setting of 1,2 or 3 requires slope information!\n' + else: + is_less = self.reach_data.slope < minimum_slope + if np.any(is_less): + below_minimum = self.reach_data[is_less] + txt += '{} instances of streambed slopes below minimum found.\n'.format( + len(below_minimum)) + if self.level == 1: + txt += 'Reaches with low slopes:\n' + txt += _print_rec_array(below_minimum, delimiter='\t') + if len(txt) == 0: + passed = True + else: + txt += 'slope not specified for isfropt={}\n'.format( + self.sfr.isfropt) + passed = True + self._txt_footer(headertxt, txt, 'minimum slope', passed) + + headertxt = 'Checking for streambed slopes of greater than {}...\n'.format( + maximum_slope) + txt = '' + if self.verbose: + print(headertxt.strip()) + + passed = False + if self.sfr.isfropt in [1, 2, 3]: + if np.diff(self.reach_data.slope).max() == 0: + txt += 'isfropt setting of 1,2 or 3 requires slope information!\n' + else: + is_greater = self.reach_data.slope > maximum_slope + + if np.any(is_greater): + above_max = self.reach_data[is_greater] + txt += '{} instances of streambed slopes above maximum found.\n'.format( + len(above_max)) + if self.level == 1: + txt += 'Reaches with high slopes:\n' + txt += _print_rec_array(above_max, delimiter='\t') + if len(txt) == 0: + passed = True + else: + txt += 'slope not specified for isfropt={}\n'.format( + self.sfr.isfropt) + passed = True + self._txt_footer(headertxt, txt, 'maximum slope', passed) + + +def _check_numbers(n, numbers, level=1, datatype='reach'): + """ + Check that a sequence of numbers is consecutive + (that the sequence is equal to the range from 1 to n+1, where n is + the expected length of the sequence). + + Parameters + ---------- + n : int + Expected length of the sequence (i.e. number of stream segments) + numbers : array + Sequence of numbers (i.e. 'nseg' column from the segment_data array) + level : int + Check method analysis level. If level=0, summary checks are + performed. If level=1, full checks are performed. + datatype : str, optional + Only used for reporting. + """ + txt = '' + num_range = np.arange(1, n + 1) + if not np.array_equal(num_range, numbers): + txt += 'Invalid {} numbering\n'.format(datatype) + if level == 1: + # consistent dimension for boolean array + non_consecutive = np.append(np.diff(numbers) != 1, + False) + gaps = num_range[non_consecutive] + 1 + if len(gaps) > 0: + gapstr = ' '.join(map(str, gaps)) + txt += 'Gaps in numbering at positions {}\n'.format(gapstr) + return txt + + +def _isnumeric(s): + try: + float(s) + return True + except: + return False + + +def _markitzero(recarray, inds): + """ + Subtracts 1 from columns specified in inds argument, to convert from + 1 to 0-based indexing + + """ + lnames = [n.lower() for n in recarray.dtype.names] + for idx in inds: + if (idx in lnames): + recarray[idx] -= 1 + + +def _pop_item(line): + try: + return float(line.pop(0)) + except: + return 0. + + +def _get_dataset(line, dataset): + # interpret number supplied with decimal points as floats, rest as ints + # this could be a bad idea (vs. explicitly formatting values for each dataset) + for i, s in enumerate(line_parse(line)): + try: + n = int(s) + except: + try: + n = float(s) + except: + break + dataset[i] = n + return dataset + + +def _get_duplicates(a): + """ + Returns duplicate values in an array, similar to pandas .duplicated() + method + http://stackoverflow.com/questions/11528078/determining-duplicate-values-in-an-array + """ + s = np.sort(a, axis=None) + equal_to_previous_item = np.append(s[1:] == s[:-1], + False) # maintain same dimension for boolean array + return np.unique(s[equal_to_previous_item]) + + +def _get_item2_names(nstrm, reachinput, isfropt, structured=False): + """ + Determine which variables should be in item 2, based on model grid type, + reachinput specification, and isfropt. + + Returns + ------- + names : list of str + List of names (same as variables in SFR Package input instructions) of + columns to assign (upon load) or retain (upon write) in reach_data + array. + + Notes + ----- + Lowercase is used for all variable names. + + """ + names = [] + if structured: + names += ['k', 'i', 'j'] + else: + names += ['node'] + names += ['iseg', 'ireach', 'rchlen'] + if nstrm < 0 or reachinput: + if isfropt in [1, 2, 3]: + names += ['strtop', 'slope', 'strthick', 'strhc1'] + if isfropt in [2, 3]: + names += ['thts', 'thti', 'eps'] + if isfropt == 3: + names += ['uhc'] + return names + + +def _fmt_string_list(array, float_format=default_float_format): + fmt_list = [] + for name in array.dtype.names: + vtype = array.dtype[name].str[1].lower() + if vtype == 'v': + continue + if vtype == 'i': + fmt_list.append('{:d}') + elif vtype == 'f': + fmt_list.append(float_format) + elif vtype == 'o': + float_format = '{!s}' + elif vtype == 's': + raise ValueError( + "'str' type found in dtype for {!r}. " + "This gives unpredictable results when " + "recarray to file - change to 'object' type".format(name)) + else: + raise ValueError( + "unknown dtype for {!r}: {!r}".format(name, vtype)) + return fmt_list + + +def _fmt_string(array, float_format=default_float_format): + return ' '.join(_fmt_string_list(array, float_format)) + + +def _print_rec_array(array, cols=None, delimiter=' ', + float_format=default_float_format): + """ + Print out a numpy record array to string, with column names. + + Parameters + ---------- + cols : list of strings + List of columns to print. + delimiter : string + Delimited to use. + + Returns + ------- + txt : string + Text string of array. + + """ + txt = '' + if cols is not None: + cols = [c for c in array.dtype.names if c in cols] + else: + cols = list(array.dtype.names) + # drop columns with no data + if np.shape(array)[0] > 1: + cols = [c for c in cols if array[c].min() > -999999] + # add _fmt_string call here + array = np.array(array)[cols] + fmts = _fmt_string_list(array, float_format=float_format) + txt += delimiter.join(cols) + '\n' + txt += '\n'.join( + [delimiter.join(fmts).format(*r) for r in array.tolist()]) + return txt + + +def _parse_1c(line, reachinput, transroute): + """ + Parse Data Set 1c for SFR2 package. + See http://water.usgs.gov/nrp/gwsoftware/modflow2000/MFDOC/index.html?sfr.htm for more info + + Parameters + ---------- + line : str + line read from SFR package input file + + Returns + ------- + a list of length 13 containing all variables for Data Set 6a + + """ + na = 0 + # line = _get_dataset(line, [0, 0, 0, 0, 0, 0, 0, 0, 0, 10, 1, 30, 1, 2, 0.75, 0.0001, []]) + # line = line.strip().split() + line = line_parse(line) + + nstrm = int(line.pop(0)) + nss = int(line.pop(0)) + nsfrpar = int(line.pop(0)) + nparseg = int(line.pop(0)) + const = float(line.pop(0)) + dleak = float(line.pop(0)) + ipakcb = int(line.pop(0)) + istcb2 = int(line.pop(0)) + + isfropt, nstrail, isuzn, nsfrsets = na, na, na, na + if reachinput: + nstrm = abs(nstrm) # see explanation for dataset 1c in online guide + isfropt = int(line.pop(0)) + if isfropt > 1: + nstrail = int(line.pop(0)) + isuzn = int(line.pop(0)) + nsfrsets = int(line.pop(0)) + if nstrm < 0: + isfropt = int(line.pop(0)) + if isfropt > 1: + nstrail = int(line.pop(0)) + isuzn = int(line.pop(0)) + nsfrsets = int(line.pop(0)) + + irtflg, numtim, weight, flwtol = na, na, na, na + if nstrm < 0 or transroute: + irtflg = int(_pop_item(line)) + if irtflg > 0: + numtim = int(line.pop(0)) + weight = float(line.pop(0)) + flwtol = float(line.pop(0)) + + # auxiliary variables (MODFLOW-LGR) + option = [line[i] for i in np.arange(1, len(line)) if + 'aux' in line[i - 1].lower()] + + return nstrm, nss, nsfrpar, nparseg, const, dleak, ipakcb, istcb2, \ + isfropt, nstrail, isuzn, nsfrsets, irtflg, numtim, weight, flwtol, \ + option + + +def _parse_6a(line, option): + """ + Parse Data Set 6a for SFR2 package. + See http://water.usgs.gov/nrp/gwsoftware/modflow2000/MFDOC/index.html?sfr.htm for more info + + Parameters + ---------- + line : str + line read from SFR package input file + + Returns + ------- + a list of length 13 containing all variables for Data Set 6a + """ + # line = line.strip().split() + line = line_parse(line) + + xyz = [] + # handle any aux variables at end of line + for s in line: + if s.lower() in option: + xyz.append(s.lower()) + + na = 0 + nseg = int(_pop_item(line)) + icalc = int(_pop_item(line)) + outseg = int(_pop_item(line)) + iupseg = int(_pop_item(line)) + iprior = na + nstrpts = na + + if iupseg > 0: + iprior = int(_pop_item(line)) + if icalc == 4: + nstrpts = int(_pop_item(line)) + + flow = _pop_item(line) + runoff = _pop_item(line) + etsw = _pop_item(line) + pptsw = _pop_item(line) + roughch = na + roughbk = na + + if icalc in [1, 2]: + roughch = _pop_item(line) + if icalc == 2: + roughbk = _pop_item(line) + + cdpth, fdpth, awdth, bwdth = na, na, na, na + if icalc == 3: + cdpth, fdpth, awdth, bwdth = map(float, line) + return nseg, icalc, outseg, iupseg, iprior, nstrpts, flow, runoff, etsw, \ + pptsw, roughch, roughbk, cdpth, fdpth, awdth, bwdth, xyz + + +def _parse_6bc(line, icalc, nstrm, isfropt, reachinput, per=0): + """ + Parse Data Set 6b for SFR2 package. + See http://water.usgs.gov/nrp/gwsoftware/modflow2000/MFDOC/index.html?sfr.htm for more info + + Parameters + ---------- + line : str + line read from SFR package input file + + Returns + ------- + a list of length 9 containing all variables for Data Set 6b + + """ + nvalues = sum([_isnumeric(s) for s in line_parse(line)]) + line = _get_dataset(line, [0] * nvalues) + + hcond, thickm, elevupdn, width, depth, thts, thti, eps, uhc = [0.0] * 9 + + if isfropt in [0, 4, 5] and icalc <= 0: + hcond = line.pop(0) + thickm = line.pop(0) + elevupdn = line.pop(0) + width = line.pop(0) + depth = line.pop(0) + elif isfropt in [0, 4, 5] and icalc == 1: + hcond = line.pop(0) + if isfropt in [4, 5] and per > 0: + pass + else: + thickm = line.pop(0) + elevupdn = line.pop(0) + # depth is not read if icalc == 1; see table in online guide + width = line.pop(0) + thts = _pop_item(line) + thti = _pop_item(line) + eps = _pop_item(line) + if isfropt == 5 and per == 0: + uhc = line.pop(0) + elif isfropt in [0, 4, 5] and icalc >= 2: + hcond = line.pop(0) + if isfropt in [4, 5] and per > 0 and icalc == 2: + pass + else: + thickm = line.pop(0) + elevupdn = line.pop(0) + if isfropt in [4, 5] and per == 0: + # table in online guide suggests that the following items should be present in this case + # but in the example + thts = _pop_item(line) + thti = _pop_item(line) + eps = _pop_item(line) + if isfropt == 5: + uhc = _pop_item(line) + else: + pass + elif isfropt == 1 and icalc <= 1: + width = line.pop(0) + if icalc <= 0: + depth = line.pop(0) + elif isfropt in [2, 3]: + if icalc <= 0: + width = line.pop(0) + depth = line.pop(0) + + elif icalc == 1: + if per > 0: + pass + else: + width = line.pop(0) + + else: + pass + else: + pass + return hcond, thickm, elevupdn, width, depth, thts, thti, eps, uhc + + +def find_path(graph, start, end=0, path=()): + + graph = graph.copy() + path = list(path) + [start] + if start == end: + return path + if start not in graph: + return None + if not isinstance(graph[start], list): + graph[start] = [graph[start]] + for node in graph[start]: + if node not in path: + newpath = find_path(graph, node, end, path) + if newpath: return newpath + return None diff --git a/flopy/modflow/mfsip.py b/flopy/modflow/mfsip.py index ceeda009c4..8129ed0ab6 100644 --- a/flopy/modflow/mfsip.py +++ b/flopy/modflow/mfsip.py @@ -1,255 +1,255 @@ -""" -mfsip module. Contains the ModflowSip class. Note that the user can access -the ModflowSip class as `flopy.modflow.ModflowSip`. - -Additional information for this MODFLOW package can be found at the `Online -MODFLOW Guide -`_. - -""" - -import sys - -from ..pakbase import Package - - -class ModflowSip(Package): - """ - MODFLOW Strongly Implicit Procedure Package Class. - - Parameters - ---------- - model : model object - The model object (of type :class:flopy.modflow.mf.Modflow) to which - this package will be added. - mxiter : integer - The maximum number of times through the iteration loop in one time - step in an attempt to solve the system of finite-difference equations. - (default is 200) - nparm : integer - The number of iteration variables to be used. - Five variables are generally sufficient. (default is 5) - accl : float - The acceleration variable, which must be greater than zero - and is generally equal to one. If a zero is entered, - it is changed to one. (default is 1) - hclose : float > 0 - The head change criterion for convergence. When the maximum absolute - value of head change from all nodes during an iteration is less than - or equal to hclose, iteration stops. (default is 1e-5) - ipcalc : 0 or 1 - A flag indicating where the seed for calculating iteration variables - will come from. 0 is the seed entered by the user will be used. - 1 is the seed will be calculated at the start of the simulation from - problem variables. (default is 0) - wseed : float > 0 - The seed for calculating iteration variables. wseed is always read, - but is used only if ipcalc is equal to zero. (default is 0) - iprsip : integer > 0 - the printout interval for sip. iprsip, if equal to zero, is changed - to 999. The maximum head change (positive or negative) is printed for - each iteration of a time step whenever the time step is an even - multiple of iprsip. This printout also occurs at the end of each - stress period regardless of the value of iprsip. (default is 0) - extension : string - Filename extension (default is 'sip') - unitnumber : int - File unit number (default is None). - filenames : str or list of str - Filenames to use for the package. If filenames=None the package name - will be created using the model name and package extension. If a - single string is passed the package will be set to the string. - Default is None. - - Attributes - ---------- - - Methods - ------- - - See Also - -------- - - Notes - ----- - - Examples - -------- - - >>> import flopy - >>> ml = flopy.modflow.Modflow() - >>> sip = flopy.modflow.ModflowSip(ml, mxiter=100, hclose=0.0001) - - """ - - def __init__(self, model, mxiter=200, nparm=5, \ - accl=1, hclose=1e-5, ipcalc=1, wseed=0, iprsip=0, - extension='sip', unitnumber=None, filenames=None): - """ - Package constructor. - - """ - # set default unit number of one is not specified - if unitnumber is None: - unitnumber = ModflowSip.defaultunit() - - # set filenames - if filenames is None: - filenames = [None] - elif isinstance(filenames, str): - filenames = [filenames] - - # Fill namefile items - name = [ModflowSip.ftype()] - units = [unitnumber] - extra = [''] - - # set package name - fname = [filenames[0]] - - # Call ancestor's init to set self.parent, extension, name and - # unit number - Package.__init__(self, model, extension=extension, name=name, - unit_number=units, extra=extra, filenames=fname) - - # check if a valid model version has been specified - if model.version == 'mfusg': - err = 'Error: cannot use {} package '.format(self.name) + \ - 'with model version {}'.format(model.version) - raise Exception(err) - - self.heading = '# {} package for '.format(self.name[0]) + \ - ' {}, '.format(model.version_types[model.version]) + \ - 'generated by Flopy.' - self.url = 'sip.htm' - - self.mxiter = mxiter - self.nparm = nparm - self.accl = accl - self.hclose = hclose - self.ipcalc = ipcalc - self.wseed = wseed - self.iprsip = iprsip - self.parent.add_package(self) - - def write_file(self): - """ - Write the package file. - - Returns - ------- - None - - """ - # Open file for writing - f = open(self.fn_path, 'w') - f.write('{}\n'.format(self.heading)) - ifrfm = self.parent.get_ifrefm() - if ifrfm: - f.write('{} {}\n'.format(self.mxiter, self.nparm)) - f.write( - '{} {} {} {} {}\n'.format(self.accl, self.hclose, self.ipcalc, - self.wseed, self.iprsip)) - else: - f.write('{:10d}{:10d}\n'.format(self.mxiter, self.nparm)) - f.write('{:10.3f}{:10.3g}{:10d}{:10.3f}{:10d}\n'.format(self.accl, - self.hclose, - self.ipcalc, - self.wseed, - self.iprsip)) - f.close() - - @staticmethod - def load(f, model, ext_unit_dict=None): - """ - Load an existing package. - - Parameters - ---------- - f : filename or file handle - File to load. - model : model object - The model object (of type :class:`flopy.modflow.mf.Modflow`) to - which this package will be added. - ext_unit_dict : dictionary, optional - If the arrays in the file are specified using EXTERNAL, - or older style array control records, then `f` should be a file - handle. In this case ext_unit_dict is required, which can be - constructed using the function - :class:`flopy.utils.mfreadnam.parsenamefile`. - - Returns - ------- - sip : ModflowSip object - - Examples - -------- - - >>> import flopy - >>> m = flopy.modflow.Modflow() - >>> sip = flopy.modflow.ModflowSip.load('test.sip', m) - - """ - - if model.verbose: - sys.stdout.write('loading sip package file...\n') - - openfile = not hasattr(f, 'read') - if openfile: - filename = f - f = open(filename, 'r') - - # dataset 0 -- header - while True: - line = f.readline() - if line[0] != '#': - break - ifrfm = model.get_ifrefm() - # dataset 1 - if ifrfm: - t = line.strip().split() - mxiter = int(t[0]) - nparm = int(t[1]) - else: - mxiter = int(line[0:10].strip()) - nparm = int(line[10:20].strip()) - # dataset 2 - line = f.readline() - if ifrfm: - t = line.strip().split() - accl = float(t[0]) - hclose = float(t[1]) - ipcalc = int(t[2]) - wseed = float(t[3]) - iprsip = int(t[4]) - else: - accl = float(line[0:10].strip()) - hclose = float(line[10:20].strip()) - ipcalc = int(line[20:30].strip()) - wseed = float(line[30:40].strip()) - iprsip = int(line[40:50].strip()) - - if openfile: - f.close() - - # set package unit number - unitnumber = None - filenames = [None] - if ext_unit_dict is not None: - unitnumber, filenames[0] = \ - model.get_ext_dict_attr(ext_unit_dict, - filetype=ModflowSip.ftype()) - - sip = ModflowSip(model, mxiter=mxiter, nparm=nparm, - accl=accl, hclose=hclose, ipcalc=ipcalc, - wseed=wseed, iprsip=iprsip, unitnumber=unitnumber, - filenames=filenames) - return sip - - @staticmethod - def ftype(): - return 'SIP' - - @staticmethod - def defaultunit(): - return 25 +""" +mfsip module. Contains the ModflowSip class. Note that the user can access +the ModflowSip class as `flopy.modflow.ModflowSip`. + +Additional information for this MODFLOW package can be found at the `Online +MODFLOW Guide +`_. + +""" + +import sys + +from ..pakbase import Package + + +class ModflowSip(Package): + """ + MODFLOW Strongly Implicit Procedure Package Class. + + Parameters + ---------- + model : model object + The model object (of type :class:flopy.modflow.mf.Modflow) to which + this package will be added. + mxiter : integer + The maximum number of times through the iteration loop in one time + step in an attempt to solve the system of finite-difference equations. + (default is 200) + nparm : integer + The number of iteration variables to be used. + Five variables are generally sufficient. (default is 5) + accl : float + The acceleration variable, which must be greater than zero + and is generally equal to one. If a zero is entered, + it is changed to one. (default is 1) + hclose : float > 0 + The head change criterion for convergence. When the maximum absolute + value of head change from all nodes during an iteration is less than + or equal to hclose, iteration stops. (default is 1e-5) + ipcalc : 0 or 1 + A flag indicating where the seed for calculating iteration variables + will come from. 0 is the seed entered by the user will be used. + 1 is the seed will be calculated at the start of the simulation from + problem variables. (default is 0) + wseed : float > 0 + The seed for calculating iteration variables. wseed is always read, + but is used only if ipcalc is equal to zero. (default is 0) + iprsip : integer > 0 + the printout interval for sip. iprsip, if equal to zero, is changed + to 999. The maximum head change (positive or negative) is printed for + each iteration of a time step whenever the time step is an even + multiple of iprsip. This printout also occurs at the end of each + stress period regardless of the value of iprsip. (default is 0) + extension : string + Filename extension (default is 'sip') + unitnumber : int + File unit number (default is None). + filenames : str or list of str + Filenames to use for the package. If filenames=None the package name + will be created using the model name and package extension. If a + single string is passed the package will be set to the string. + Default is None. + + Attributes + ---------- + + Methods + ------- + + See Also + -------- + + Notes + ----- + + Examples + -------- + + >>> import flopy + >>> ml = flopy.modflow.Modflow() + >>> sip = flopy.modflow.ModflowSip(ml, mxiter=100, hclose=0.0001) + + """ + + def __init__(self, model, mxiter=200, nparm=5, \ + accl=1, hclose=1e-5, ipcalc=1, wseed=0, iprsip=0, + extension='sip', unitnumber=None, filenames=None): + """ + Package constructor. + + """ + # set default unit number of one is not specified + if unitnumber is None: + unitnumber = ModflowSip.defaultunit() + + # set filenames + if filenames is None: + filenames = [None] + elif isinstance(filenames, str): + filenames = [filenames] + + # Fill namefile items + name = [ModflowSip.ftype()] + units = [unitnumber] + extra = [''] + + # set package name + fname = [filenames[0]] + + # Call ancestor's init to set self.parent, extension, name and + # unit number + Package.__init__(self, model, extension=extension, name=name, + unit_number=units, extra=extra, filenames=fname) + + # check if a valid model version has been specified + if model.version == 'mfusg': + err = 'Error: cannot use {} package '.format(self.name) + \ + 'with model version {}'.format(model.version) + raise Exception(err) + + self.heading = '# {} package for '.format(self.name[0]) + \ + ' {}, '.format(model.version_types[model.version]) + \ + 'generated by Flopy.' + self.url = 'sip.htm' + + self.mxiter = mxiter + self.nparm = nparm + self.accl = accl + self.hclose = hclose + self.ipcalc = ipcalc + self.wseed = wseed + self.iprsip = iprsip + self.parent.add_package(self) + + def write_file(self): + """ + Write the package file. + + Returns + ------- + None + + """ + # Open file for writing + f = open(self.fn_path, 'w') + f.write('{}\n'.format(self.heading)) + ifrfm = self.parent.get_ifrefm() + if ifrfm: + f.write('{} {}\n'.format(self.mxiter, self.nparm)) + f.write( + '{} {} {} {} {}\n'.format(self.accl, self.hclose, self.ipcalc, + self.wseed, self.iprsip)) + else: + f.write('{:10d}{:10d}\n'.format(self.mxiter, self.nparm)) + f.write('{:10.3f}{:10.3g}{:10d}{:10.3f}{:10d}\n'.format(self.accl, + self.hclose, + self.ipcalc, + self.wseed, + self.iprsip)) + f.close() + + @staticmethod + def load(f, model, ext_unit_dict=None): + """ + Load an existing package. + + Parameters + ---------- + f : filename or file handle + File to load. + model : model object + The model object (of type :class:`flopy.modflow.mf.Modflow`) to + which this package will be added. + ext_unit_dict : dictionary, optional + If the arrays in the file are specified using EXTERNAL, + or older style array control records, then `f` should be a file + handle. In this case ext_unit_dict is required, which can be + constructed using the function + :class:`flopy.utils.mfreadnam.parsenamefile`. + + Returns + ------- + sip : ModflowSip object + + Examples + -------- + + >>> import flopy + >>> m = flopy.modflow.Modflow() + >>> sip = flopy.modflow.ModflowSip.load('test.sip', m) + + """ + + if model.verbose: + sys.stdout.write('loading sip package file...\n') + + openfile = not hasattr(f, 'read') + if openfile: + filename = f + f = open(filename, 'r') + + # dataset 0 -- header + while True: + line = f.readline() + if line[0] != '#': + break + ifrfm = model.get_ifrefm() + # dataset 1 + if ifrfm: + t = line.strip().split() + mxiter = int(t[0]) + nparm = int(t[1]) + else: + mxiter = int(line[0:10].strip()) + nparm = int(line[10:20].strip()) + # dataset 2 + line = f.readline() + if ifrfm: + t = line.strip().split() + accl = float(t[0]) + hclose = float(t[1]) + ipcalc = int(t[2]) + wseed = float(t[3]) + iprsip = int(t[4]) + else: + accl = float(line[0:10].strip()) + hclose = float(line[10:20].strip()) + ipcalc = int(line[20:30].strip()) + wseed = float(line[30:40].strip()) + iprsip = int(line[40:50].strip()) + + if openfile: + f.close() + + # set package unit number + unitnumber = None + filenames = [None] + if ext_unit_dict is not None: + unitnumber, filenames[0] = \ + model.get_ext_dict_attr(ext_unit_dict, + filetype=ModflowSip.ftype()) + + sip = ModflowSip(model, mxiter=mxiter, nparm=nparm, + accl=accl, hclose=hclose, ipcalc=ipcalc, + wseed=wseed, iprsip=iprsip, unitnumber=unitnumber, + filenames=filenames) + return sip + + @staticmethod + def ftype(): + return 'SIP' + + @staticmethod + def defaultunit(): + return 25 diff --git a/flopy/modflow/mfsms.py b/flopy/modflow/mfsms.py index 8bd4cf9675..8ff6ee2e50 100644 --- a/flopy/modflow/mfsms.py +++ b/flopy/modflow/mfsms.py @@ -1,566 +1,566 @@ -""" -mfsms module. This is the solver for MODFLOW-USG. -Contains the ModflowSms class. Note that the user can access -the ModflowSms class as `flopy.modflow.ModflowSms`. - - -""" - -import sys - -from ..pakbase import Package -from ..utils.flopy_io import line_parse - - -class ModflowSms(Package): - """ - MODFLOW Sms Package Class. - - Parameters - ---------- - model : model object - The model object (of type :class:`flopy.modflow.mf.Modflow`) to which - this package will be added. - hclose : float - is the head change criterion for convergence of the outer (nonlinear) - iterations, in units of length. When the maximum absolute value of the - head change at all nodes during an iteration is less than or equal to - HCLOSE, iteration stops. Commonly, HCLOSE equals 0.01. - hiclose : float - is the head change criterion for convergence of the inner (linear) - iterations, in units of length. When the maximum absolute value of the - head change at all nodes during an iteration is less than or equal to - HICLOSE, the matrix solver assumes convergence. Commonly, HICLOSE is - set an order of magnitude less than HCLOSE. - mxiter : int - is the maximum number of outer (nonlinear) iterations -- that is, - calls to the solution routine. For a linear problem MXITER should be 1. - iter1 : int - is the maximum number of inner (linear) iterations. The number - typically depends on the characteristics of the matrix solution - scheme being used. For nonlinear problems, ITER1 usually ranges - from 60 to 600; a value of 100 will be sufficient for most linear - problems. - iprsms : int - is a flag that controls printing of convergence information from the - solver: 0 is print nothing; 1 is print only the total number of - iterations and nonlinear residual reduction summaries; 2 is print - matrix solver information in addition to above. - nonlinmeth : int - is a flag that controls the nonlinear solution method and under- - relaxation schemes. 0 is Picard iteration scheme is used without any - under-relaxation schemes involved. > 0 is Newton-Raphson iteration - scheme is used with under-relaxation. Note that the Newton-Raphson - linearization scheme is available only for the upstream weighted - solution scheme of the BCF and LPF packages. < 0 is Picard iteration - scheme is used with under-relaxation. The absolute value of NONLINMETH - determines the underrelaxation scheme used. 1 or -1, then - Delta-Bar-Delta under-relaxation is used. 2 or -2 then Cooley - under-relaxation scheme is used. - Note that the under-relaxation schemes are used in conjunction with - gradient based methods, however, experience has indicated that the - Cooley under-relaxation and damping work well also for the Picard - scheme with the wet/dry options of MODFLOW. - linmeth : int - is a flag that controls the matrix solution method. 1 is the XMD - solver of Ibaraki (2005). 2 is the unstructured pre-conditioned - conjugate gradient solver of White and Hughes (2011). - theta : float - is the reduction factor for the learning rate (under-relaxation term) - of the delta-bar-delta algorithm. The value of THETA is between zero - and one. If the change in the variable (head) is of opposite sign to - that of the previous iteration, the under-relaxation term is reduced - by a factor of THETA. The value usually ranges from 0.3 to 0.9; a - value of 0.7 works well for most problems. - akappa : float - is the increment for the learning rate (under-relaxation term) of the - delta-bar-delta algorithm. The value of AKAPPA is between zero and - one. If the change in the variable (head) is of the same sign to that - of the previous iteration, the under-relaxation term is increased by - an increment of AKAPPA. The value usually ranges from 0.03 to 0.3; a - value of 0.1 works well for most problems. - gamma : float - is the history or memory term factor of the delta-bar-delta algorithm. - Gamma is between zero and 1 but cannot be equal to one. When GAMMA is - zero, only the most recent history (previous iteration value) is - maintained. As GAMMA is increased, past history of iteration changes - has greater influence on the memory term. The memory term is - maintained as an exponential average of past changes. Retaining some - past history can overcome granular behavior in the calculated function - surface and therefore helps to overcome cyclic patterns of - non-convergence. The value usually ranges from 0.1 to 0.3; a value of - 0.2 works well for most problems. - amomentum : float - is the fraction of past history changes that is added as a momentum - term to the step change for a nonlinear iteration. The value of - AMOMENTUM is between zero and one. A large momentum term should only - be used when small learning rates are expected. Small amounts of the - momentum term help convergence. The value usually ranges from 0.0001 - to 0.1; a value of 0.001 works well for most problems. - numtrack : int - is the maximum number of backtracking iterations allowed for residual - reduction computations. If NUMTRACK = 0 then the backtracking - iterations are omitted. The value usually ranges from 2 to 20; a - value of 10 works well for most problems. - numtrack : int - is the maximum number of backtracking iterations allowed for residual - reduction computations. If NUMTRACK = 0 then the backtracking - iterations are omitted. The value usually ranges from 2 to 20; a - value of 10 works well for most problems. - btol : float - is the tolerance for residual change that is allowed for residual - reduction computations. BTOL should not be less than one to avoid - getting stuck in local minima. A large value serves to check for - extreme residual increases, while a low value serves to control - step size more severely. The value usually ranges from 1.0 to 1e6 ; a - value of 1e4 works well for most problems but lower values like 1.1 - may be required for harder problems. - breduce : float - is the reduction in step size used for residual reduction - computations. The value of BREDUC is between zero and one. The value - usually ranges from 0.1 to 0.3; a value of 0.2 works well for most - problems. - reslim : float - is the limit to which the residual is reduced with backtracking. - If the residual is smaller than RESLIM, then further backtracking is - not performed. A value of 100 is suitable for large problems and - residual reduction to smaller values may only slow down computations. - iacl : int - is the flag for choosing the acceleration method. 0 is Conjugate - Gradient; select this option if the matrix is symmetric. 1 is - ORTHOMIN. 2 is BiCGSTAB. - norder : int - is the flag for choosing the ordering scheme. - 0 is original ordering - 1 is reverse Cuthill McKee ordering - 2 is Minimum degree ordering - level : int - is the level of fill for ILU decomposition. Higher levels of fill - provide more robustness but also require more memory. For optimal - performance, it is suggested that a large level of fill be applied - (7 or 8) with use of drop tolerance. - north : int - is the number of orthogonalizations for the ORTHOMIN acceleration - scheme. A number between 4 and 10 is appropriate. Small values require - less storage but more iteration may be required. This number should - equal 2 for the other acceleration methods. - iredsys : int - is the index for creating a reduced system of equations using the - red-black ordering scheme. - 0 is do not create reduced system - 1 is create reduced system using red-black ordering - rrctol : float - is a residual tolerance criterion for convergence. The root mean - squared residual of the matrix solution is evaluated against this - number to determine convergence. The solver assumes convergence if - either HICLOSE (the absolute head tolerance value for the solver) or - RRCTOL is achieved. Note that a value of zero ignores residual - tolerance in favor of the absolute tolerance (HICLOSE) for closure of - the matrix solver. - idroptol : int - is the flag to perform drop tolerance. - 0 is do not perform drop tolerance - 1 is perform drop tolerance - epsrn : float - is the drop tolerance value. A value of 1e-3 works well for most - problems. - clin : string - an option keyword that defines the linear acceleration method used by - the PCGU solver. - CLIN is "CG", then preconditioned conjugate gradient method. - CLIN is "BCGS", then preconditioned bi-conjugate gradient stabilized - method. - ipc : int - an integer value that defines the preconditioner. - IPC = 0, No preconditioning. - IPC = 1, Jacobi preconditioning. - IPC = 2, ILU(0) preconditioning. - IPC = 3, MILU(0) preconditioning (default). - iscl : int - is the flag for choosing the matrix scaling approach used. - 0 is no matrix scaling applied - 1 is symmetric matrix scaling using the scaling method by the POLCG - preconditioner in Hill (1992). - 2 is symmetric matrix scaling using the l2 norm of each row of - A (DR) and the l2 norm of each row of DRA. - iord : int - is the flag for choosing the matrix reordering approach used. - 0 = original ordering - 1 = reverse Cuthill McKee ordering - 2 = minimum degree ordering - rclosepcgu : float - a real value that defines the flow residual tolerance for convergence - of the PCGU linear solver. This value represents the maximum allowable - residual at any single node. Value is in units of length cubed per - time, and must be consistent with MODFLOW-USG length and time units. - Usually a value of 1.0x10-1 is sufficient for the flow-residual - criteria when meters and seconds are the defined MODFLOW-USG length - and time. - relaxpcgu : float - a real value that defines the relaxation factor used by the MILU(0) - preconditioner. RELAXPCGU is unitless and should be greater than or - equal to 0.0 and less than or equal to 1.0. RELAXPCGU values of about - 1.0 are commonly used, and experience suggests that convergence can - be optimized in some cases with RELAXPCGU values of 0.97. A RELAXPCGU - value of 0.0 will result in ILU(0) preconditioning. RELAXPCGU is only - specified if IPC=3. If RELAXPCGU is not specified and IPC=3, then a - default value of 0.97 will be assigned to RELAXPCGU. - extension : str, optional - File extension (default is 'sms'. - unitnumber : int, optional - FORTRAN unit number for this package (default is None). - filenames : str or list of str - Filenames to use for the package. If filenames=None the package name - will be created using the model name and package extension. If a - single string is passed the package will be set to the string. - Default is None. - - Attributes - ---------- - - Methods - ------- - - See Also - -------- - - Notes - ----- - - Examples - -------- - - >>> import flopy - >>> m = flopy.modflow.Modflow() - >>> sms = flopy.modflow.ModflowSms(m) - - """ - - def __init__(self, model, hclose=1E-4, hiclose=1E-4, mxiter=100, - iter1=20, iprsms=2, nonlinmeth=0, linmeth=2, - theta=0.7, akappa=0.1, gamma=0.2, amomentum=0.001, - numtrack=20, btol=1e4, breduc=0.2, reslim=100., - iacl=2, norder=0, level=7, north=2, iredsys=0, - rrctol=0., idroptol=0, epsrn=1.e-3, - clin='bcgs', ipc=3, iscl=0, iord=0, rclosepcgu=.1, - relaxpcgu=1.0, extension='sms', options=None, - unitnumber=None, filenames=None): - # set default unit number of one is not specified - if unitnumber is None: - unitnumber = ModflowSms.defaultunit() - - # set filenames - if filenames is None: - filenames = [None] - elif isinstance(filenames, str): - filenames = [filenames] - - # Fill namefile items - name = [ModflowSms.ftype()] - units = [unitnumber] - extra = [''] - - # set package name - fname = [filenames[0]] - - # Call ancestor's init to set self.parent, extension, name and - # unit number - Package.__init__(self, model, extension=extension, name=name, - unit_number=units, extra=extra, filenames=fname) - - self.heading = '# {} package for '.format(self.name[0]) + \ - ' {}, '.format(model.version_types[model.version]) + \ - 'generated by Flopy.' - self.url = ' ' - self.hclose = hclose - self.hiclose = hiclose - self.mxiter = mxiter - self.iter1 = iter1 - self.iprsms = iprsms - self.nonlinmeth = nonlinmeth - self.linmeth = linmeth - self.theta = theta - self.akappa = akappa - self.gamma = gamma - self.amomentum = amomentum - self.numtrack = numtrack - self.btol = btol - self.breduc = breduc - self.reslim = reslim - self.iacl = iacl - self.norder = norder - self.level = level - self.north = north - self.iredsys = iredsys - self.rrctol = rrctol - self.idroptol = idroptol - self.epsrn = epsrn - self.clin = clin - self.ipc = ipc - self.iscl = iscl - self.iord = iord - self.rclosepcgu = rclosepcgu - self.relaxpcgu = relaxpcgu - if options is None: - self.options = [] - else: - if not isinstance(options, list): - options = [options] - self.options = options - self.parent.add_package(self) - return - - def write_file(self): - """ - Write the package file. - - Returns - ------- - None - - """ - f = open(self.fn_path, 'w') - f.write('{}\n'.format(self.heading)) - nopt = len(self.options) - if nopt > 0: - f.write(' '.join(self.options) + '\n') - f.write('{0} {1} {2} {3} {4} {5} {6}\n'.format( - self.hclose, self.hiclose, self.mxiter, self.iter1, - self.iprsms, self.nonlinmeth, self.linmeth)) - if self.nonlinmeth != 0 and nopt == 0: - f.write('{0} {1} {2} {3} {4} {5} {6} {7}\n'.format( - self.theta, self.akappa, self.gamma, self.amomentum, - self.numtrack, self.btol, self.breduc, self.reslim)) - if self.linmeth == 1 and nopt == 0: - f.write('{0} {1} {2} {3} {4} {5} {6} {7}\n'.format( - self.iacl, self.norder, self.level, self.north, - self.iredsys, self.rrctol, self.idroptol, self.epsrn)) - if self.linmeth == 2 and nopt == 0: - f.write('{0} {1} {2} {3} {4} {5}\n'.format( - self.clin, self.ipc, self.iscl, self.iord, - self.rclosepcgu, self.relaxpcgu)) - f.write('\n') - f.close() - - @staticmethod - def load(f, model, ext_unit_dict=None): - """ - Load an existing package. - - Parameters - ---------- - f : filename or file handle - File to load. - model : model object - The model object (of type :class:`flopy.modflow.mf.Modflow`) to - which this package will be added. - ext_unit_dict : dictionary, optional - If the arrays in the file are specified using EXTERNAL, - or older style array control records, then `f` should be a file - handle. In this case ext_unit_dict is required, which can be - constructed using the function - :class:`flopy.utils.mfreadnam.parsenamefile`. - - Returns - ------- - sms : ModflowSms object - - Examples - -------- - - >>> import flopy - >>> m = flopy.modflow.Modflow() - >>> sms = flopy.modflow.ModflowPcg.load('test.sms', m) - - """ - - if model.verbose: - sys.stdout.write('loading sms package file...\n') - - if model.version != 'mfusg': - msg = "Warning: model version was reset from " + \ - "'{}' to 'mfusg' in order to load a SMS file".format( - model.version) - print(msg) - model.version = 'mfusg' - - openfile = not hasattr(f, 'read') - if openfile: - filename = f - f = open(filename, 'r') - - # dataset 0 -- header - while True: - line = f.readline() - if line[0] != '#': - break - - # Record 1a - opts = ['simple', 'moderate', 'complex'] - options = [] - firstentry = line.strip().split()[0] - if firstentry.lower() in opts: - options.append(firstentry) - nopt = len(options) - - if nopt > 0: - line = f.readline() - - # Record 1b -- line will have already been read - if model.verbose: - msg = 3 * ' ' + ' loading HCLOSE HICLOSE MXITER ITER1 ' + \ - 'IPRSMS NONLINMETH LINMETH...' - print(msg) - ll = line_parse(line) - hclose = float(ll.pop(0)) - hiclose = float(ll.pop(0)) - mxiter = int(ll.pop(0)) - iter1 = int(ll.pop(0)) - iprsms = int(ll.pop(0)) - nonlinmeth = int(ll.pop(0)) - linmeth = int(ll.pop(0)) - if model.verbose: - print(' HCLOSE {}'.format(hclose)) - print(' HICLOSE {}'.format(hiclose)) - print(' MXITER {}'.format(mxiter)) - print(' ITER1 {}'.format(iter1)) - print(' IPRSMS {}'.format(iprsms)) - print(' NONLINMETH {}'.format(nonlinmeth)) - print(' LINMETH {}'.format(linmeth)) - - # Record 2 - theta = None - akappa = None - gamma = None - amomentum = None - numtrack = None - btol = None - breduc = None - reslim = None - if nonlinmeth != 0 and nopt == 0: - if model.verbose: - msg = 3 * ' ' + 'loading THETA AKAPPA GAMMA AMOMENTUM ' + \ - 'NUMTRACK BTOL BREDUC RESLIM...' - print(msg) - while True: - line = f.readline() - if line[0] != '#': - break - ll = line_parse(line) - theta = float(ll.pop(0)) - akappa = float(ll.pop(0)) - gamma = float(ll.pop(0)) - amomentum = float(ll.pop(0)) - numtrack = int(ll.pop(0)) - btol = float(ll.pop(0)) - breduc = float(ll.pop(0)) - reslim = float(ll.pop(0)) - if model.verbose: - print(' THETA {}'.format(theta)) - print(' AKAPPA {}'.format(akappa)) - print(' GAMMA {}'.format(gamma)) - print(' AMOMENTUM {}'.format(amomentum)) - print(' NUMTRACK {}'.format(numtrack)) - print(' BTOL {}'.format(btol)) - print(' BREDUC {}'.format(breduc)) - print(' RESLIM {}'.format(reslim)) - - iacl = None - norder = None - level = None - north = None - iredsys = None - rrctol = None - idroptol = None - epsrn = None - if linmeth == 1 and nopt == 0: - if model.verbose: - msg = 3 * ' ' + 'loading IACL NORDER LEVEL NORTH ' + \ - 'IREDSYS RRCTOL IDROPTOL EPSRN' - print(msg) - while True: - line = f.readline() - if line[0] != '#': - break - ll = line_parse(line) - iacl = int(ll.pop(0)) - norder = int(ll.pop(0)) - level = int(ll.pop(0)) - north = int(ll.pop(0)) - iredsys = int(ll.pop(0)) - rrctol = float(ll.pop(0)) - idroptol = int(ll.pop(0)) - epsrn = float(ll.pop(0)) - if model.verbose: - print(' IACL {}'.format(iacl)) - print(' NORDER {}'.format(norder)) - print(' LEVEL {}'.format(level)) - print(' NORTH {}'.format(north)) - print(' IREDSYS {}'.format(iredsys)) - print(' RRCTOL {}'.format(rrctol)) - print(' IDROPTOL {}'.format(idroptol)) - print(' EPSRN {}'.format(epsrn)) - - clin = None - ipc = None - iscl = None - iord = None - rclosepcgu = None - relaxpcgu = None - if linmeth == 2 and nopt == 0: - if model.verbose: - msg = 3 * ' ' + 'loading [CLIN] IPC ISCL IORD ' + \ - 'RCLOSEPCGU [RELAXPCGU]' - print(msg) - while True: - line = f.readline() - if line[0] != '#': - break - ll = line_parse(line) - if 'cg' in line.lower(): # this will get cg or bcgs - clin = ll.pop(0) - ipc = int(ll.pop(0)) - iscl = int(ll.pop(0)) - iord = int(ll.pop(0)) - rclosepcgu = float(ll.pop(0)) - if len(ll) > 0: - relaxpcgu = float(ll.pop(0)) - if model.verbose: - print(' CLIN {}'.format(clin)) - print(' IPC {}'.format(ipc)) - print(' ISCL {}'.format(iscl)) - print(' IORD {}'.format(iord)) - print(' RCLOSEPCGU {}'.format(rclosepcgu)) - print(' RELAXPCGU {}'.format(relaxpcgu)) - - if openfile: - f.close() - - # set package unit number - unitnumber = None - filenames = [None] - if ext_unit_dict is not None: - unitnumber, filenames[0] = \ - model.get_ext_dict_attr(ext_unit_dict, - filetype=ModflowSms.ftype()) - - sms = ModflowSms(model, hclose=hclose, hiclose=hiclose, mxiter=mxiter, - iter1=iter1, iprsms=iprsms, nonlinmeth=nonlinmeth, - linmeth=linmeth, theta=theta, akappa=akappa, - gamma=gamma, amomentum=amomentum, numtrack=numtrack, - btol=btol, breduc=breduc, reslim=reslim, - iacl=iacl, norder=norder, level=level, north=north, - iredsys=iredsys, rrctol=rrctol, idroptol=idroptol, - epsrn=epsrn, clin=clin, ipc=ipc, iscl=iscl, - iord=iord, rclosepcgu=rclosepcgu, options=options, - relaxpcgu=relaxpcgu, unitnumber=unitnumber, - filenames=filenames) - return sms - - @staticmethod - def ftype(): - return 'SMS' - - @staticmethod - def defaultunit(): - return 32 +""" +mfsms module. This is the solver for MODFLOW-USG. +Contains the ModflowSms class. Note that the user can access +the ModflowSms class as `flopy.modflow.ModflowSms`. + + +""" + +import sys + +from ..pakbase import Package +from ..utils.flopy_io import line_parse + + +class ModflowSms(Package): + """ + MODFLOW Sms Package Class. + + Parameters + ---------- + model : model object + The model object (of type :class:`flopy.modflow.mf.Modflow`) to which + this package will be added. + hclose : float + is the head change criterion for convergence of the outer (nonlinear) + iterations, in units of length. When the maximum absolute value of the + head change at all nodes during an iteration is less than or equal to + HCLOSE, iteration stops. Commonly, HCLOSE equals 0.01. + hiclose : float + is the head change criterion for convergence of the inner (linear) + iterations, in units of length. When the maximum absolute value of the + head change at all nodes during an iteration is less than or equal to + HICLOSE, the matrix solver assumes convergence. Commonly, HICLOSE is + set an order of magnitude less than HCLOSE. + mxiter : int + is the maximum number of outer (nonlinear) iterations -- that is, + calls to the solution routine. For a linear problem MXITER should be 1. + iter1 : int + is the maximum number of inner (linear) iterations. The number + typically depends on the characteristics of the matrix solution + scheme being used. For nonlinear problems, ITER1 usually ranges + from 60 to 600; a value of 100 will be sufficient for most linear + problems. + iprsms : int + is a flag that controls printing of convergence information from the + solver: 0 is print nothing; 1 is print only the total number of + iterations and nonlinear residual reduction summaries; 2 is print + matrix solver information in addition to above. + nonlinmeth : int + is a flag that controls the nonlinear solution method and under- + relaxation schemes. 0 is Picard iteration scheme is used without any + under-relaxation schemes involved. > 0 is Newton-Raphson iteration + scheme is used with under-relaxation. Note that the Newton-Raphson + linearization scheme is available only for the upstream weighted + solution scheme of the BCF and LPF packages. < 0 is Picard iteration + scheme is used with under-relaxation. The absolute value of NONLINMETH + determines the underrelaxation scheme used. 1 or -1, then + Delta-Bar-Delta under-relaxation is used. 2 or -2 then Cooley + under-relaxation scheme is used. + Note that the under-relaxation schemes are used in conjunction with + gradient based methods, however, experience has indicated that the + Cooley under-relaxation and damping work well also for the Picard + scheme with the wet/dry options of MODFLOW. + linmeth : int + is a flag that controls the matrix solution method. 1 is the XMD + solver of Ibaraki (2005). 2 is the unstructured pre-conditioned + conjugate gradient solver of White and Hughes (2011). + theta : float + is the reduction factor for the learning rate (under-relaxation term) + of the delta-bar-delta algorithm. The value of THETA is between zero + and one. If the change in the variable (head) is of opposite sign to + that of the previous iteration, the under-relaxation term is reduced + by a factor of THETA. The value usually ranges from 0.3 to 0.9; a + value of 0.7 works well for most problems. + akappa : float + is the increment for the learning rate (under-relaxation term) of the + delta-bar-delta algorithm. The value of AKAPPA is between zero and + one. If the change in the variable (head) is of the same sign to that + of the previous iteration, the under-relaxation term is increased by + an increment of AKAPPA. The value usually ranges from 0.03 to 0.3; a + value of 0.1 works well for most problems. + gamma : float + is the history or memory term factor of the delta-bar-delta algorithm. + Gamma is between zero and 1 but cannot be equal to one. When GAMMA is + zero, only the most recent history (previous iteration value) is + maintained. As GAMMA is increased, past history of iteration changes + has greater influence on the memory term. The memory term is + maintained as an exponential average of past changes. Retaining some + past history can overcome granular behavior in the calculated function + surface and therefore helps to overcome cyclic patterns of + non-convergence. The value usually ranges from 0.1 to 0.3; a value of + 0.2 works well for most problems. + amomentum : float + is the fraction of past history changes that is added as a momentum + term to the step change for a nonlinear iteration. The value of + AMOMENTUM is between zero and one. A large momentum term should only + be used when small learning rates are expected. Small amounts of the + momentum term help convergence. The value usually ranges from 0.0001 + to 0.1; a value of 0.001 works well for most problems. + numtrack : int + is the maximum number of backtracking iterations allowed for residual + reduction computations. If NUMTRACK = 0 then the backtracking + iterations are omitted. The value usually ranges from 2 to 20; a + value of 10 works well for most problems. + numtrack : int + is the maximum number of backtracking iterations allowed for residual + reduction computations. If NUMTRACK = 0 then the backtracking + iterations are omitted. The value usually ranges from 2 to 20; a + value of 10 works well for most problems. + btol : float + is the tolerance for residual change that is allowed for residual + reduction computations. BTOL should not be less than one to avoid + getting stuck in local minima. A large value serves to check for + extreme residual increases, while a low value serves to control + step size more severely. The value usually ranges from 1.0 to 1e6 ; a + value of 1e4 works well for most problems but lower values like 1.1 + may be required for harder problems. + breduce : float + is the reduction in step size used for residual reduction + computations. The value of BREDUC is between zero and one. The value + usually ranges from 0.1 to 0.3; a value of 0.2 works well for most + problems. + reslim : float + is the limit to which the residual is reduced with backtracking. + If the residual is smaller than RESLIM, then further backtracking is + not performed. A value of 100 is suitable for large problems and + residual reduction to smaller values may only slow down computations. + iacl : int + is the flag for choosing the acceleration method. 0 is Conjugate + Gradient; select this option if the matrix is symmetric. 1 is + ORTHOMIN. 2 is BiCGSTAB. + norder : int + is the flag for choosing the ordering scheme. + 0 is original ordering + 1 is reverse Cuthill McKee ordering + 2 is Minimum degree ordering + level : int + is the level of fill for ILU decomposition. Higher levels of fill + provide more robustness but also require more memory. For optimal + performance, it is suggested that a large level of fill be applied + (7 or 8) with use of drop tolerance. + north : int + is the number of orthogonalizations for the ORTHOMIN acceleration + scheme. A number between 4 and 10 is appropriate. Small values require + less storage but more iteration may be required. This number should + equal 2 for the other acceleration methods. + iredsys : int + is the index for creating a reduced system of equations using the + red-black ordering scheme. + 0 is do not create reduced system + 1 is create reduced system using red-black ordering + rrctol : float + is a residual tolerance criterion for convergence. The root mean + squared residual of the matrix solution is evaluated against this + number to determine convergence. The solver assumes convergence if + either HICLOSE (the absolute head tolerance value for the solver) or + RRCTOL is achieved. Note that a value of zero ignores residual + tolerance in favor of the absolute tolerance (HICLOSE) for closure of + the matrix solver. + idroptol : int + is the flag to perform drop tolerance. + 0 is do not perform drop tolerance + 1 is perform drop tolerance + epsrn : float + is the drop tolerance value. A value of 1e-3 works well for most + problems. + clin : string + an option keyword that defines the linear acceleration method used by + the PCGU solver. + CLIN is "CG", then preconditioned conjugate gradient method. + CLIN is "BCGS", then preconditioned bi-conjugate gradient stabilized + method. + ipc : int + an integer value that defines the preconditioner. + IPC = 0, No preconditioning. + IPC = 1, Jacobi preconditioning. + IPC = 2, ILU(0) preconditioning. + IPC = 3, MILU(0) preconditioning (default). + iscl : int + is the flag for choosing the matrix scaling approach used. + 0 is no matrix scaling applied + 1 is symmetric matrix scaling using the scaling method by the POLCG + preconditioner in Hill (1992). + 2 is symmetric matrix scaling using the l2 norm of each row of + A (DR) and the l2 norm of each row of DRA. + iord : int + is the flag for choosing the matrix reordering approach used. + 0 = original ordering + 1 = reverse Cuthill McKee ordering + 2 = minimum degree ordering + rclosepcgu : float + a real value that defines the flow residual tolerance for convergence + of the PCGU linear solver. This value represents the maximum allowable + residual at any single node. Value is in units of length cubed per + time, and must be consistent with MODFLOW-USG length and time units. + Usually a value of 1.0x10-1 is sufficient for the flow-residual + criteria when meters and seconds are the defined MODFLOW-USG length + and time. + relaxpcgu : float + a real value that defines the relaxation factor used by the MILU(0) + preconditioner. RELAXPCGU is unitless and should be greater than or + equal to 0.0 and less than or equal to 1.0. RELAXPCGU values of about + 1.0 are commonly used, and experience suggests that convergence can + be optimized in some cases with RELAXPCGU values of 0.97. A RELAXPCGU + value of 0.0 will result in ILU(0) preconditioning. RELAXPCGU is only + specified if IPC=3. If RELAXPCGU is not specified and IPC=3, then a + default value of 0.97 will be assigned to RELAXPCGU. + extension : str, optional + File extension (default is 'sms'. + unitnumber : int, optional + FORTRAN unit number for this package (default is None). + filenames : str or list of str + Filenames to use for the package. If filenames=None the package name + will be created using the model name and package extension. If a + single string is passed the package will be set to the string. + Default is None. + + Attributes + ---------- + + Methods + ------- + + See Also + -------- + + Notes + ----- + + Examples + -------- + + >>> import flopy + >>> m = flopy.modflow.Modflow() + >>> sms = flopy.modflow.ModflowSms(m) + + """ + + def __init__(self, model, hclose=1E-4, hiclose=1E-4, mxiter=100, + iter1=20, iprsms=2, nonlinmeth=0, linmeth=2, + theta=0.7, akappa=0.1, gamma=0.2, amomentum=0.001, + numtrack=20, btol=1e4, breduc=0.2, reslim=100., + iacl=2, norder=0, level=7, north=2, iredsys=0, + rrctol=0., idroptol=0, epsrn=1.e-3, + clin='bcgs', ipc=3, iscl=0, iord=0, rclosepcgu=.1, + relaxpcgu=1.0, extension='sms', options=None, + unitnumber=None, filenames=None): + # set default unit number of one is not specified + if unitnumber is None: + unitnumber = ModflowSms.defaultunit() + + # set filenames + if filenames is None: + filenames = [None] + elif isinstance(filenames, str): + filenames = [filenames] + + # Fill namefile items + name = [ModflowSms.ftype()] + units = [unitnumber] + extra = [''] + + # set package name + fname = [filenames[0]] + + # Call ancestor's init to set self.parent, extension, name and + # unit number + Package.__init__(self, model, extension=extension, name=name, + unit_number=units, extra=extra, filenames=fname) + + self.heading = '# {} package for '.format(self.name[0]) + \ + ' {}, '.format(model.version_types[model.version]) + \ + 'generated by Flopy.' + self.url = ' ' + self.hclose = hclose + self.hiclose = hiclose + self.mxiter = mxiter + self.iter1 = iter1 + self.iprsms = iprsms + self.nonlinmeth = nonlinmeth + self.linmeth = linmeth + self.theta = theta + self.akappa = akappa + self.gamma = gamma + self.amomentum = amomentum + self.numtrack = numtrack + self.btol = btol + self.breduc = breduc + self.reslim = reslim + self.iacl = iacl + self.norder = norder + self.level = level + self.north = north + self.iredsys = iredsys + self.rrctol = rrctol + self.idroptol = idroptol + self.epsrn = epsrn + self.clin = clin + self.ipc = ipc + self.iscl = iscl + self.iord = iord + self.rclosepcgu = rclosepcgu + self.relaxpcgu = relaxpcgu + if options is None: + self.options = [] + else: + if not isinstance(options, list): + options = [options] + self.options = options + self.parent.add_package(self) + return + + def write_file(self): + """ + Write the package file. + + Returns + ------- + None + + """ + f = open(self.fn_path, 'w') + f.write('{}\n'.format(self.heading)) + nopt = len(self.options) + if nopt > 0: + f.write(' '.join(self.options) + '\n') + f.write('{0} {1} {2} {3} {4} {5} {6}\n'.format( + self.hclose, self.hiclose, self.mxiter, self.iter1, + self.iprsms, self.nonlinmeth, self.linmeth)) + if self.nonlinmeth != 0 and nopt == 0: + f.write('{0} {1} {2} {3} {4} {5} {6} {7}\n'.format( + self.theta, self.akappa, self.gamma, self.amomentum, + self.numtrack, self.btol, self.breduc, self.reslim)) + if self.linmeth == 1 and nopt == 0: + f.write('{0} {1} {2} {3} {4} {5} {6} {7}\n'.format( + self.iacl, self.norder, self.level, self.north, + self.iredsys, self.rrctol, self.idroptol, self.epsrn)) + if self.linmeth == 2 and nopt == 0: + f.write('{0} {1} {2} {3} {4} {5}\n'.format( + self.clin, self.ipc, self.iscl, self.iord, + self.rclosepcgu, self.relaxpcgu)) + f.write('\n') + f.close() + + @staticmethod + def load(f, model, ext_unit_dict=None): + """ + Load an existing package. + + Parameters + ---------- + f : filename or file handle + File to load. + model : model object + The model object (of type :class:`flopy.modflow.mf.Modflow`) to + which this package will be added. + ext_unit_dict : dictionary, optional + If the arrays in the file are specified using EXTERNAL, + or older style array control records, then `f` should be a file + handle. In this case ext_unit_dict is required, which can be + constructed using the function + :class:`flopy.utils.mfreadnam.parsenamefile`. + + Returns + ------- + sms : ModflowSms object + + Examples + -------- + + >>> import flopy + >>> m = flopy.modflow.Modflow() + >>> sms = flopy.modflow.ModflowPcg.load('test.sms', m) + + """ + + if model.verbose: + sys.stdout.write('loading sms package file...\n') + + if model.version != 'mfusg': + msg = "Warning: model version was reset from " + \ + "'{}' to 'mfusg' in order to load a SMS file".format( + model.version) + print(msg) + model.version = 'mfusg' + + openfile = not hasattr(f, 'read') + if openfile: + filename = f + f = open(filename, 'r') + + # dataset 0 -- header + while True: + line = f.readline() + if line[0] != '#': + break + + # Record 1a + opts = ['simple', 'moderate', 'complex'] + options = [] + firstentry = line.strip().split()[0] + if firstentry.lower() in opts: + options.append(firstentry) + nopt = len(options) + + if nopt > 0: + line = f.readline() + + # Record 1b -- line will have already been read + if model.verbose: + msg = 3 * ' ' + ' loading HCLOSE HICLOSE MXITER ITER1 ' + \ + 'IPRSMS NONLINMETH LINMETH...' + print(msg) + ll = line_parse(line) + hclose = float(ll.pop(0)) + hiclose = float(ll.pop(0)) + mxiter = int(ll.pop(0)) + iter1 = int(ll.pop(0)) + iprsms = int(ll.pop(0)) + nonlinmeth = int(ll.pop(0)) + linmeth = int(ll.pop(0)) + if model.verbose: + print(' HCLOSE {}'.format(hclose)) + print(' HICLOSE {}'.format(hiclose)) + print(' MXITER {}'.format(mxiter)) + print(' ITER1 {}'.format(iter1)) + print(' IPRSMS {}'.format(iprsms)) + print(' NONLINMETH {}'.format(nonlinmeth)) + print(' LINMETH {}'.format(linmeth)) + + # Record 2 + theta = None + akappa = None + gamma = None + amomentum = None + numtrack = None + btol = None + breduc = None + reslim = None + if nonlinmeth != 0 and nopt == 0: + if model.verbose: + msg = 3 * ' ' + 'loading THETA AKAPPA GAMMA AMOMENTUM ' + \ + 'NUMTRACK BTOL BREDUC RESLIM...' + print(msg) + while True: + line = f.readline() + if line[0] != '#': + break + ll = line_parse(line) + theta = float(ll.pop(0)) + akappa = float(ll.pop(0)) + gamma = float(ll.pop(0)) + amomentum = float(ll.pop(0)) + numtrack = int(ll.pop(0)) + btol = float(ll.pop(0)) + breduc = float(ll.pop(0)) + reslim = float(ll.pop(0)) + if model.verbose: + print(' THETA {}'.format(theta)) + print(' AKAPPA {}'.format(akappa)) + print(' GAMMA {}'.format(gamma)) + print(' AMOMENTUM {}'.format(amomentum)) + print(' NUMTRACK {}'.format(numtrack)) + print(' BTOL {}'.format(btol)) + print(' BREDUC {}'.format(breduc)) + print(' RESLIM {}'.format(reslim)) + + iacl = None + norder = None + level = None + north = None + iredsys = None + rrctol = None + idroptol = None + epsrn = None + if linmeth == 1 and nopt == 0: + if model.verbose: + msg = 3 * ' ' + 'loading IACL NORDER LEVEL NORTH ' + \ + 'IREDSYS RRCTOL IDROPTOL EPSRN' + print(msg) + while True: + line = f.readline() + if line[0] != '#': + break + ll = line_parse(line) + iacl = int(ll.pop(0)) + norder = int(ll.pop(0)) + level = int(ll.pop(0)) + north = int(ll.pop(0)) + iredsys = int(ll.pop(0)) + rrctol = float(ll.pop(0)) + idroptol = int(ll.pop(0)) + epsrn = float(ll.pop(0)) + if model.verbose: + print(' IACL {}'.format(iacl)) + print(' NORDER {}'.format(norder)) + print(' LEVEL {}'.format(level)) + print(' NORTH {}'.format(north)) + print(' IREDSYS {}'.format(iredsys)) + print(' RRCTOL {}'.format(rrctol)) + print(' IDROPTOL {}'.format(idroptol)) + print(' EPSRN {}'.format(epsrn)) + + clin = None + ipc = None + iscl = None + iord = None + rclosepcgu = None + relaxpcgu = None + if linmeth == 2 and nopt == 0: + if model.verbose: + msg = 3 * ' ' + 'loading [CLIN] IPC ISCL IORD ' + \ + 'RCLOSEPCGU [RELAXPCGU]' + print(msg) + while True: + line = f.readline() + if line[0] != '#': + break + ll = line_parse(line) + if 'cg' in line.lower(): # this will get cg or bcgs + clin = ll.pop(0) + ipc = int(ll.pop(0)) + iscl = int(ll.pop(0)) + iord = int(ll.pop(0)) + rclosepcgu = float(ll.pop(0)) + if len(ll) > 0: + relaxpcgu = float(ll.pop(0)) + if model.verbose: + print(' CLIN {}'.format(clin)) + print(' IPC {}'.format(ipc)) + print(' ISCL {}'.format(iscl)) + print(' IORD {}'.format(iord)) + print(' RCLOSEPCGU {}'.format(rclosepcgu)) + print(' RELAXPCGU {}'.format(relaxpcgu)) + + if openfile: + f.close() + + # set package unit number + unitnumber = None + filenames = [None] + if ext_unit_dict is not None: + unitnumber, filenames[0] = \ + model.get_ext_dict_attr(ext_unit_dict, + filetype=ModflowSms.ftype()) + + sms = ModflowSms(model, hclose=hclose, hiclose=hiclose, mxiter=mxiter, + iter1=iter1, iprsms=iprsms, nonlinmeth=nonlinmeth, + linmeth=linmeth, theta=theta, akappa=akappa, + gamma=gamma, amomentum=amomentum, numtrack=numtrack, + btol=btol, breduc=breduc, reslim=reslim, + iacl=iacl, norder=norder, level=level, north=north, + iredsys=iredsys, rrctol=rrctol, idroptol=idroptol, + epsrn=epsrn, clin=clin, ipc=ipc, iscl=iscl, + iord=iord, rclosepcgu=rclosepcgu, options=options, + relaxpcgu=relaxpcgu, unitnumber=unitnumber, + filenames=filenames) + return sms + + @staticmethod + def ftype(): + return 'SMS' + + @staticmethod + def defaultunit(): + return 32 diff --git a/flopy/modflow/mfsor.py b/flopy/modflow/mfsor.py index e7f559add6..805f4a6b8e 100644 --- a/flopy/modflow/mfsor.py +++ b/flopy/modflow/mfsor.py @@ -1,204 +1,204 @@ -""" -mfsor module. Contains the ModflowSor class. Note that the user can access -the ModflowSor class as `flopy.modflow.ModflowSor`. - -Additional information for this MODFLOW package can be found at the `Online -MODFLOW Guide -`_. - -""" - -import sys - -from ..pakbase import Package - - -class ModflowSor(Package): - """ - MODFLOW Slice-successive overrelaxation Package Class. - - Parameters - ---------- - model : model object - The model object (of type :class:flopy.modflow.mf.Modflow) to which - this package will be added. - mxiter : integer - The maximum number of iterations allowed in a time step. - (default is 200) - accl : float - The acceleration variable, which must be greater than zero - and is generally between 1. and 2. (default is 1) - hclose : float > 0 - The head change criterion for convergence. When the maximum absolute - value of head change from all nodes during an iteration is less than - or equal to hclose, iteration stops. (default is 1e-5) - iprsor : integer > 0 - the printout interval for sor. iprsor, if equal to zero, is changed to - 999. The maximum head change (positive or negative) is printed for each - iteration of a time step whenever the time step is an even multiple of - iprsor. This printout also occurs at the end of each stress period - regardless of the value of iprsor. (default is 0) - extension : string - Filename extension (default is 'sor') - unitnumber : int - File unit number (default is None). - filenames : str or list of str - Filenames to use for the package. If filenames=None the package name - will be created using the model name and package extension. If a - single string is passed the package will be set to the string. - Default is None. - - Attributes - ---------- - - Methods - ------- - - See Also - -------- - - Notes - ----- - - Examples - -------- - - >>> import flopy - >>> ml = flopy.modflow.Modflow() - >>> sor = flopy.modflow.ModflowSor(ml) - - """ - - def __init__(self, model, mxiter=200, accl=1, hclose=1e-5, iprsor=0, - extension='sor', unitnumber=None, filenames=None): - """ - Package constructor. - - """ - # set default unit number of one is not specified - if unitnumber is None: - unitnumber = ModflowSor.defaultunit() - - # set filenames - if filenames is None: - filenames = [None] - elif isinstance(filenames, str): - filenames = [filenames] - - # Fill namefile items - name = [ModflowSor.ftype()] - units = [unitnumber] - extra = [''] - - # set package name - fname = [filenames[0]] - - # Call ancestor's init to set self.parent, extension, name and - # unit number - Package.__init__(self, model, extension=extension, name=name, - unit_number=units, extra=extra, filenames=fname) - - # check if a valid model version has been specified - if model.version != 'mf2k': - err = 'Error: cannot use {} '.format(self.name) + \ - 'package with model version {}'.format(model.version) - raise Exception(err) - - self.heading = '# {} package for '.format(self.name[0]) + \ - ' {}, '.format(model.version_types[model.version]) + \ - 'generated by Flopy.' - self.url = 'sor.htm' - self.mxiter = mxiter - self.accl = accl - self.hclose = hclose - self.iprsor = iprsor - self.parent.add_package(self) - - def write_file(self): - """ - Write the package file. - - Returns - ------- - None - - """ - # Open file for writing - f = open(self.fn_path, 'w') - f.write('{}\n'.format(self.heading)) - f.write('{:10d}\n'.format(self.mxiter)) - line = '{:10.4g}{:10.4g}{:10d}\n'.format(self.accl, self.hclose, - self.iprsor) - f.write(line) - f.close() - - @staticmethod - def load(f, model, ext_unit_dict=None): - """ - Load an existing package. - - Parameters - ---------- - f : filename or file handle - File to load. - model : model object - The model object (of type :class:`flopy.modflow.mf.Modflow`) to - which this package will be added. - ext_unit_dict : dictionary, optional - If the arrays in the file are specified using EXTERNAL, - or older style array control records, then `f` should be a file - handle. In this case ext_unit_dict is required, which can be - constructed using the function - :class:`flopy.utils.mfreadnam.parsenamefile`. - - Returns - ------- - sor : ModflowSor object - - Examples - -------- - - >>> import flopy - >>> m = flopy.modflow.Modflow() - >>> sor = flopy.modflow.ModflowSor.load('test.sor', m) - - """ - - if model.verbose: - sys.stdout.write('loading sor package file...\n') - - openfile = not hasattr(f, 'read') - if openfile: - filename = f - f = open(filename, 'r') - - # dataset 0 -- header - - msg = 3 * ' ' + 'Warning: load method not completed. ' + \ - 'Default sor object created.' - print(msg) - - if openfile: - f.close() - - # set package unit number - unitnumber = None - filenames = [None] - if ext_unit_dict is not None: - unitnumber, filenames[0] = \ - model.get_ext_dict_attr(ext_unit_dict, - filetype=ModflowSor.ftype()) - - # create sor object - sor = ModflowSor(model, unitnumber=unitnumber, filenames=filenames) - - # return sor object - return sor - - @staticmethod - def ftype(): - return 'SOR' - - @staticmethod - def defaultunit(): - return 26 +""" +mfsor module. Contains the ModflowSor class. Note that the user can access +the ModflowSor class as `flopy.modflow.ModflowSor`. + +Additional information for this MODFLOW package can be found at the `Online +MODFLOW Guide +`_. + +""" + +import sys + +from ..pakbase import Package + + +class ModflowSor(Package): + """ + MODFLOW Slice-successive overrelaxation Package Class. + + Parameters + ---------- + model : model object + The model object (of type :class:flopy.modflow.mf.Modflow) to which + this package will be added. + mxiter : integer + The maximum number of iterations allowed in a time step. + (default is 200) + accl : float + The acceleration variable, which must be greater than zero + and is generally between 1. and 2. (default is 1) + hclose : float > 0 + The head change criterion for convergence. When the maximum absolute + value of head change from all nodes during an iteration is less than + or equal to hclose, iteration stops. (default is 1e-5) + iprsor : integer > 0 + the printout interval for sor. iprsor, if equal to zero, is changed to + 999. The maximum head change (positive or negative) is printed for each + iteration of a time step whenever the time step is an even multiple of + iprsor. This printout also occurs at the end of each stress period + regardless of the value of iprsor. (default is 0) + extension : string + Filename extension (default is 'sor') + unitnumber : int + File unit number (default is None). + filenames : str or list of str + Filenames to use for the package. If filenames=None the package name + will be created using the model name and package extension. If a + single string is passed the package will be set to the string. + Default is None. + + Attributes + ---------- + + Methods + ------- + + See Also + -------- + + Notes + ----- + + Examples + -------- + + >>> import flopy + >>> ml = flopy.modflow.Modflow() + >>> sor = flopy.modflow.ModflowSor(ml) + + """ + + def __init__(self, model, mxiter=200, accl=1, hclose=1e-5, iprsor=0, + extension='sor', unitnumber=None, filenames=None): + """ + Package constructor. + + """ + # set default unit number of one is not specified + if unitnumber is None: + unitnumber = ModflowSor.defaultunit() + + # set filenames + if filenames is None: + filenames = [None] + elif isinstance(filenames, str): + filenames = [filenames] + + # Fill namefile items + name = [ModflowSor.ftype()] + units = [unitnumber] + extra = [''] + + # set package name + fname = [filenames[0]] + + # Call ancestor's init to set self.parent, extension, name and + # unit number + Package.__init__(self, model, extension=extension, name=name, + unit_number=units, extra=extra, filenames=fname) + + # check if a valid model version has been specified + if model.version != 'mf2k': + err = 'Error: cannot use {} '.format(self.name) + \ + 'package with model version {}'.format(model.version) + raise Exception(err) + + self.heading = '# {} package for '.format(self.name[0]) + \ + ' {}, '.format(model.version_types[model.version]) + \ + 'generated by Flopy.' + self.url = 'sor.htm' + self.mxiter = mxiter + self.accl = accl + self.hclose = hclose + self.iprsor = iprsor + self.parent.add_package(self) + + def write_file(self): + """ + Write the package file. + + Returns + ------- + None + + """ + # Open file for writing + f = open(self.fn_path, 'w') + f.write('{}\n'.format(self.heading)) + f.write('{:10d}\n'.format(self.mxiter)) + line = '{:10.4g}{:10.4g}{:10d}\n'.format(self.accl, self.hclose, + self.iprsor) + f.write(line) + f.close() + + @staticmethod + def load(f, model, ext_unit_dict=None): + """ + Load an existing package. + + Parameters + ---------- + f : filename or file handle + File to load. + model : model object + The model object (of type :class:`flopy.modflow.mf.Modflow`) to + which this package will be added. + ext_unit_dict : dictionary, optional + If the arrays in the file are specified using EXTERNAL, + or older style array control records, then `f` should be a file + handle. In this case ext_unit_dict is required, which can be + constructed using the function + :class:`flopy.utils.mfreadnam.parsenamefile`. + + Returns + ------- + sor : ModflowSor object + + Examples + -------- + + >>> import flopy + >>> m = flopy.modflow.Modflow() + >>> sor = flopy.modflow.ModflowSor.load('test.sor', m) + + """ + + if model.verbose: + sys.stdout.write('loading sor package file...\n') + + openfile = not hasattr(f, 'read') + if openfile: + filename = f + f = open(filename, 'r') + + # dataset 0 -- header + + msg = 3 * ' ' + 'Warning: load method not completed. ' + \ + 'Default sor object created.' + print(msg) + + if openfile: + f.close() + + # set package unit number + unitnumber = None + filenames = [None] + if ext_unit_dict is not None: + unitnumber, filenames[0] = \ + model.get_ext_dict_attr(ext_unit_dict, + filetype=ModflowSor.ftype()) + + # create sor object + sor = ModflowSor(model, unitnumber=unitnumber, filenames=filenames) + + # return sor object + return sor + + @staticmethod + def ftype(): + return 'SOR' + + @staticmethod + def defaultunit(): + return 26 diff --git a/flopy/modflow/mfstr.py b/flopy/modflow/mfstr.py index a753e2e783..60f6f5196f 100644 --- a/flopy/modflow/mfstr.py +++ b/flopy/modflow/mfstr.py @@ -1,889 +1,889 @@ -""" -mfstr module. Contains the ModflowStr class. Note that the user can access -the ModflowStr class as `flopy.modflow.ModflowStr`. - -Additional information for this MODFLOW package can be found at the `Online -MODFLOW Guide -`_. - -""" -import sys - -import numpy as np -from ..utils import MfList -from ..pakbase import Package -from .mfparbc import ModflowParBc as mfparbc -from ..utils.recarray_utils import create_empty_recarray -from ..utils import read_fixed_var, write_fixed_var - - -class ModflowStr(Package): - """ - MODFLOW Stream Package Class. - - Parameters - ---------- - model : model object - The model object (of type :class:`flopy.modflow.mf.Modflow`) to which - this package will be added. - mxacts : int - Maximum number of stream reaches that will be in use during any stress - period. (default is 0) - nss : int - Number of stream segments. (default is 0) - ntrib : int - The number of stream tributaries that can connect to one segment. The - program is currently dimensioned so that NTRIB cannot exceed 10. - (default is 0) - ndiv : int - A flag, which when positive, specifies that diversions from segments - are to be simulated. (default is 0) - icalc : int - A flag, which when positive, specifies that stream stages in reaches - are to be calculated. (default is 0) - const : float - Constant value used in calculating stream stage in reaches whenever - ICALC is greater than 0. This constant is 1.486 for flow units of - cubic feet per second and 1.0 for units of cubic meters per second. - The constant must be multiplied by 86,400 when using time units of - days in the simulation. If ICALC is 0, const can be any real value. - (default is 86400.) - ipakcb : int - A flag that is used to determine if cell-by-cell budget data should be - saved. If ipakcb is non-zero cell-by-cell budget data will be saved. - (default is 0). - istcb2 : int - A flag that is used flag and a unit number for the option to store - streamflow out of each reach in an unformatted (binary) file. - If istcb2 is greater than zero streamflow data will be saved. - (default is None). - dtype : tuple, list, or numpy array of numpy dtypes - is a tuple, list, or numpy array containing the dtype for - datasets 6 and 8 and the dtype for datasets 9 and 10 data in - stress_period_data and segment_data dictionaries. - (default is None) - irdflg : integer or dictionary - is a integer or dictionary containing a integer flag, when positive - suppresses printing of the stream input data for a stress period. If - an integer is passed, all stress periods will use the same value. - If a dictionary is passed, stress periods not in the dictionary will - assigned a value of 1. Default is None which will assign a value of 1 - to all stress periods. - iptflg : integer or dictionary - is a integer or dictionary containing a integer flag, when positive - suppresses printing of stream results for a stress period. If an - integer is passed, all stress periods will use the same value. - If a dictionary is passed, stress periods not in the dictionary will - assigned a value of 1. Default is None which will assign a value of 1 - to all stress periods. - stress_period_data : dictionary of reach data - Each dictionary contains a list of str reach data for a stress period. - - Each stress period in the dictionary data contains data for - datasets 6 and 8. - - The value for stress period data for a stress period can be an integer - (-1 or 0), a list of lists, a numpy array, or a numpy recarray. If - stress period data for a stress period contains an integer, a -1 - denotes data from the previous stress period will be reused and a 0 - indicates there are no str reaches for this stress period. - - Otherwise stress period data for a stress period should contain mxacts - or fewer rows of data containing data for each reach. Reach data are - specified through definition of layer (int), row (int), column (int), - segment number (int), sequential reach number (int), flow entering a - segment (float), stream stage (float), streambed hydraulic conductance - (float), streambed bottom elevation (float), streambed top elevation - (float), stream width (float), stream slope (float), roughness - coefficient (float), and auxiliary variable data for auxiliary variables - defined in options (float). - - If icalc=0 is specified, stream width, stream slope, and roughness - coefficients, are not used and can be any value for each stress period. - If data are specified for dataset 6 for a given stress period and - icalc>0, then stream width, stream slope, and roughness coefficients - should be appropriately set. - - The simplest form is a dictionary with a lists of boundaries for each - stress period, where each list of boundaries itself is a list of - boundaries. Indices of the dictionary are the numbers of the stress - period. For example, if mxacts=3 this gives the form of:: - - stress_period_data = - {0: [ - [lay, row, col, seg, reach, flow, stage, cond, sbot, stop, width, slope, rough], - [lay, row, col, seg, reach, flow, stage, cond, sbot, stop, width, slope, rough], - [lay, row, col, seg, reach, flow, stage, cond, sbot, stop, width, slope, rough]] - ], - 1: [ - [lay, row, col, seg, reach, flow, stage, cond, sbot, stop, width, slope, rough], - [lay, row, col, seg, reach, flow, stage, cond, sbot, stop, width, slope, rough], - [lay, row, col, seg, reach, flow, stage, cond, sbot, stop, width, slope, rough]] - ], ... - kper: - [ - [lay, row, col, seg, reach, flow, stage, cond, sbot, stop, width, slope, rough], - [lay, row, col, seg, reach, flow, stage, cond, sbot, stop, width, slope, rough], - [lay, row, col, seg, reach, flow, stage, cond, sbot, stop, width, slope, rough]] - ] - } - - segment_data : dictionary of str segment data - Each dictionary contains a list of segment str data for a stress period. - - Each stress period in the dictionary data contains data for - datasets 9, and 10. Segment data for a stress period are ignored if - a integer value is specified for stress period data. - - The value for segment data for a stress period can be an integer - (-1 or 0), a list of lists, a numpy array, or a numpy recarray. If - segment data for a stress period contains an integer, a -1 denotes - data from the previous stress period will be reused and a 0 indicates - there are no str segments for this stress period. - - Otherwise stress period data for a stress period should contain nss - rows of data containing data for each segment. Segment data are - specified through definition of itrib (int) data for up to 10 - tributaries and iupseg (int) data. - - If ntrib=0 is specified, itrib values are not used and can be any value - for each stress period. If data are specified for dataset 6 for a given - stress period and ntrib>0, then itrib data should be specified for - columns 0:ntrib. - - If ndiv=0 is specified, iupseg values are not used and can be any value - for each stress period. If data are specified for dataset 6 for a given - stress period and ndiv>0, then iupseg data should be specified for the - column in the dataset [10]. - - The simplest form is a dictionary with a lists of boundaries for each - stress period, where each list of boundaries itself is a list of - boundaries. Indices of the dictionary are the numbers of the stress - period. For example, if nss=2 and ntrib>0 and/or ndiv>0 this gives the - form of:: - - segment_data = - {0: [ - [itrib1, itrib2, itrib3, itrib4, itrib5, itrib6, itrib7, itrib8, itrib9, itrib10, iupseg], - [itrib1, itrib2, itrib3, itrib4, itrib5, itrib6, itrib7, itrib8, itrib9, itrib10, iupseg], - ], - 1: [ - [itrib1, itrib2, itrib3, itrib4, itrib5, itrib6, itrib7, itrib8, itrib9, itrib10, iupseg], - [itrib1, itrib2, itrib3, itrib4, itrib5, itrib6, itrib7, itrib8, itrib9, itrib10, iupseg], - ], ... - kper: - [ - [itrib1, itrib2, itrib3, itrib4, itrib5, itrib6, itrib7, itrib8, itrib9, itrib10, iupseg], - [itrib1, itrib2, itrib3, itrib4, itrib5, itrib6, itrib7, itrib8, itrib9, itrib10, iupseg], - ] - } - - options : list of strings - Package options. Auxiliary variables included as options should be - constructed as options=['AUXILIARY IFACE', 'AUX xyx']. Either - 'AUXILIARY' or 'AUX' can be specified (case insensitive). - (default is None). - extension : string - Filename extension (default is 'str') - unitnumber : int - File unit number (default is None). - filenames : str or list of str - Filenames to use for the package and the output files. If - filenames=None the package name will be created using the model name - and package extension and the cbc output and str output name will be - created using the model name and .cbc the .sfr.bin/.sfr.out extensions - (for example, modflowtest.cbc, and modflowtest.str.bin), if ipakcbc and - istcb2 are numbers greater than zero. If a single string is passed - the package will be set to the string and cbc and sf routput names - will be created using the model name and .cbc and .str.bin/.str.out - extensions, if ipakcbc and istcb2 are numbers greater than zero. To - define the names for all package files (input and output) the length - of the list of strings should be 3. Default is None. - - Methods - ------- - - See Also - -------- - - Notes - ----- - Parameters are not supported in FloPy. - - Examples - -------- - - >>> import flopy - >>> m = flopy.modflow.Modflow() - >>> strd = {} - >>> strd[0] = [[2, 3, 4, 15.6, 1050., -4]] #this str boundary will be - >>> #applied to all stress periods - >>> str = flopy.modflow.ModflowStr(m, stress_period_data=strd) - - """ - - def __init__(self, model, mxacts=0, nss=0, ntrib=0, ndiv=0, icalc=0, - const=86400., ipakcb=None, istcb2=None, - dtype=None, stress_period_data=None, segment_data=None, - irdflg=None, iptflg=None, extension='str', - unitnumber=None, filenames=None, options=None, **kwargs): - """ - Package constructor. - - """ - # set default unit number of one is not specified - if unitnumber is None: - unitnumber = ModflowStr.defaultunit() - - # set filenames - if filenames is None: - filenames = [None, None, None] - elif isinstance(filenames, str): - filenames = [filenames, None, None] - elif isinstance(filenames, list): - if len(filenames) < 3: - for idx in range(len(filenames), 3): - filenames.append(None) - - # update external file information with cbc output, if necessary - if ipakcb is not None: - fname = filenames[1] - model.add_output_file(ipakcb, fname=fname, - package=ModflowStr.ftype()) - else: - ipakcb = 0 - - if istcb2 is not None: - fname = filenames[2] - model.add_output_file(istcb2, fname=fname, - package=ModflowStr.ftype()) - else: - ipakcb = 0 - - # set filenames - if filenames is None: - filenames = [None] - elif isinstance(filenames, str): - filenames = [filenames] - - # Fill namefile items - name = [ModflowStr.ftype()] - units = [unitnumber] - extra = [''] - - # set package name - fname = [filenames[0]] - - # Call ancestor's init to set self.parent, extension, name and - # unit number - Package.__init__(self, model, extension=extension, name=name, - unit_number=units, extra=extra, filenames=fname) - - self.heading = '# {} package for '.format(self.name[0]) + \ - ' {}, '.format(model.version_types[model.version]) + \ - 'generated by Flopy.' - self.url = 'str.htm' - self.mxacts = mxacts - self.nss = nss - self.icalc = icalc - self.ntrib = ntrib - self.ndiv = ndiv - self.const = const - self.ipakcb = ipakcb - self.istcb2 = istcb2 - - # issue exception if ntrib is greater than 10 - if ntrib > 10: - raise Exception('ModflowStr error: ntrib must be less that 10: ' + - 'specified value = {}'.format(ntrib)) - - if options is None: - options = [] - self.options = options - - # parameters are not supported - self.npstr = 0 - - # dataset 5 - # check type of irdflg and iptflg - msg = '' - if irdflg is not None and not isinstance(irdflg, (int, dict)): - msg = 'irdflg' - if iptflg is not None and not isinstance(iptflg, (int, dict)): - if len(msg) > 0: - msg += ' and ' - msg += 'iptflg' - if len(msg) > 0: - msg += ' must be an integer or a dictionary' - raise TypeError(msg) - - # process irdflg - self.irdflg = {} - for n in range(self.parent.nper): - if irdflg is None: - self.irdflg[n] = 1 - elif isinstance(irdflg, int): - self.irdflg[n] = irdflg - elif isinstance(irdflg, dict): - if n in irdflg: - self.irdflg[n] = irdflg[n] - else: - self.irdflg[n] = 1 - - # process iptflg - self.iptflg = {} - for n in range(self.parent.nper): - if iptflg is None: - self.iptflg[n] = 1 - elif isinstance(iptflg, int): - self.iptflg[n] = iptflg - elif isinstance(iptflg, dict): - if n in iptflg: - self.iptflg[n] = iptflg[n] - else: - self.iptflg[n] = 1 - - # determine dtype for dataset 6 - if dtype is not None: - self.dtype = dtype[0] - self.dtype2 = dtype[1] - else: - aux_names = [] - if len(options) > 0: - aux_names = [] - it = 0 - while True: - if 'aux' in options[it].lower(): - t = options[it].split() - aux_names.append(t[-1].lower()) - it += 1 - if it >= len(options): - break - if len(aux_names) < 1: - aux_names = None - d, d2 = self.get_empty(1, 1, aux_names=aux_names, - structured=self.parent.structured) - self.dtype = d.dtype - self.dtype2 = d2.dtype - - # convert stress_period_data for datasets 6 and 8 to a recarray if - # necessary - if stress_period_data is not None: - for key, d in stress_period_data.items(): - if isinstance(d, list): - d = np.array(d) - if isinstance(d, np.recarray): - e = 'ModflowStr error: recarray dtype: ' + \ - str(d.dtype) + ' does not match ' + \ - 'self dtype: ' + str(self.dtype) - assert d.dtype == self.dtype, e - elif isinstance(d, np.ndarray): - d = np.core.records.fromarrays(d.transpose(), - dtype=self.dtype) - elif isinstance(d, int): - if model.verbose: - if d < 0: - msg = 3 * ' ' + \ - 'reusing str data from previous stress period' - print(msg) - elif d == 0: - msg = 3 * ' ' + 'no str data for stress ' + \ - 'period {}'.format(key) - print(msg) - else: - e = 'ModflowStr error: unsupported data type: ' + \ - str(type(d)) + ' at kper ' + '{0:d}'.format(key) - raise Exception(e) - - # add stress_period_data to package - self.stress_period_data = MfList(self, stress_period_data) - - # convert segment_data for datasets 9 and 10 to a recarray if necessary - if segment_data is not None: - for key, d in segment_data.items(): - if isinstance(d, list): - d = np.array(d) - if isinstance(d, np.recarray): - e = 'ModflowStr error: recarray dtype: ' + \ - str(d.dtype) + ' does not match ' + \ - 'self dtype: ' + str(self.dtype2) - assert d.dtype == self.dtype2, e - elif isinstance(d, np.ndarray): - d = np.core.records.fromarrays(d.transpose(), - dtype=self.dtype2) - elif isinstance(d, int): - if model.verbose: - if d < 0: - msg = 3 * ' ' + 'reusing str segment data ' + \ - 'from previous stress period' - print(msg) - elif d == 0: - msg = 3 * ' ' + 'no str segment data for ' + \ - 'stress period {}'.format(key) - print(msg) - else: - e = 'ModflowStr error: unsupported data type: ' + \ - str(type(d)) + ' at kper ' + '{0:d}'.format(key) - raise Exception(e) - - # add segment_data to package - self.segment_data = segment_data - - self.parent.add_package(self) - return - - @staticmethod - def get_empty(ncells=0, nss=0, aux_names=None, structured=True): - # get an empty recarray that corresponds to dtype - dtype, dtype2 = ModflowStr.get_default_dtype(structured=structured) - if aux_names is not None: - dtype = Package.add_to_dtype(dtype, aux_names, np.float32) - return ( - create_empty_recarray(ncells, dtype=dtype, default_value=-1.0E+10), - create_empty_recarray(nss, dtype=dtype2, default_value=0)) - - @staticmethod - def get_default_dtype(structured=True): - if structured: - dtype = np.dtype([("k", np.int), ("i", np.int), ("j", np.int), - ("segment", np.int), ("reach", np.int), - ("flow", np.float32), ("stage", np.float32), - ("cond", np.float32), ("sbot", np.float32), - ("stop", np.float32), - ("width", np.float32), ("slope", np.float32), - ("rough", np.float32)]) - else: - dtype = np.dtype([("node", np.int), - ("segment", np.int), ("reach", np.int), - ("flow", np.float32), ("stage", np.float32), - ("cond", np.float32), ("sbot", np.float32), - ("stop", np.float32), - ("width", np.float32), ("slope", np.float32), - ("rough", np.float32)]) - - dtype2 = np.dtype([("itrib01", np.int), ("itrib02", np.int), - ("itrib03", np.int), ("itrib04", np.int), - ("itrib05", np.int), ("itrib06", np.int), - ("itrib07", np.int), ("itrib08", np.int), - ("itrib09", np.int), ("itrib10", np.int), - ("iupseg", np.int)]) - return dtype, dtype2 - - def ncells(self): - # Return the maximum number of cells that have a stream - # (developed for MT3DMS SSM package) - return self.mxacts - - def write_file(self): - """ - Write the package file. - - Returns - ------- - None - - """ - # set free variable - free = self.parent.free_format_input - - # open the str file - f_str = open(self.fn_path, 'w') - - # dataset 0 - f_str.write('{0}\n'.format(self.heading)) - - # dataset 1 - parameters not supported on write - - # dataset 2 - line = write_fixed_var([self.mxacts, self.nss, - self.ntrib, self.ndiv, - self.icalc, self.const, - self.ipakcb, self.istcb2], - free=free) - for opt in self.options: - line = line.rstrip() - line += ' ' + str(opt) + '\n' - f_str.write(line) - - # dataset 3 - parameters not supported on write - # dataset 4a - parameters not supported on write - # dataset 4b - parameters not supported on write - - nrow, ncol, nlay, nper = self.parent.get_nrow_ncol_nlay_nper() - - kpers = list(self.stress_period_data.data.keys()) - kpers.sort() - - # set column lengths for fixed format input files for - # datasets 6, 8, and 9 - fmt6 = [5, 5, 5, 5, 5, 15, 10, 10, 10, 10] - fmt8 = [10, 10, 10] - fmt9 = 5 - - for iper in range(nper): - if iper not in kpers: - if iper == 0: - itmp = 0 - else: - itmp = -1 - else: - tdata = self.stress_period_data[iper] - sdata = self.segment_data[iper] - if isinstance(tdata, int): - itmp = tdata - elif tdata is None: - itmp = -1 - else: - itmp = tdata.shape[0] - line = '{:10d}'.format(itmp) + \ - '{:10d}'.format(self.irdflg[iper]) + \ - '{:10d}'.format(self.iptflg[iper]) + \ - ' # stress period {}\n'.format(iper + 1) - f_str.write(line) - if itmp > 0: - tdata = np.recarray.copy(tdata) - # dataset 6 - for line in tdata: - line['k'] += 1 - line['i'] += 1 - line['j'] += 1 - ds6 = [] - for idx, v in enumerate(line): - if idx < 10 or idx > 12: - ds6.append(v) - if idx > 12: - fmt6 += [10] - f_str.write(write_fixed_var(ds6, ipos=fmt6, free=free)) - - # dataset 8 - if self.icalc > 0: - for line in tdata: - ds8 = [] - for idx in range(10, 13): - ds8.append(line[idx]) - f_str.write(write_fixed_var(ds8, ipos=fmt8, free=free)) - - # dataset 9 - if self.ntrib > 0: - for line in sdata: - ds9 = [] - for idx in range(self.ntrib): - ds9.append(line[idx]) - f_str.write(write_fixed_var(ds9, length=fmt9, - free=free)) - - # dataset 10 - if self.ndiv > 0: - for line in sdata: - f_str.write(write_fixed_var([line[-1]], - length=10, free=free)) - - # close the str file - f_str.close() - - @staticmethod - def load(f, model, nper=None, ext_unit_dict=None): - """ - Load an existing package. - - Parameters - ---------- - f : filename or file handle - File to load. - model : model object - The model object (of type :class:`flopy.modflow.mf.Modflow`) to - which this package will be added. - nper : int - The number of stress periods. If nper is None, then nper will be - obtained from the model object. (default is None). - ext_unit_dict : dictionary, optional - If the arrays in the file are specified using EXTERNAL, - or older style array control records, then `f` should be a file - handle. In this case ext_unit_dict is required, which can be - constructed using the function - :class:`flopy.utils.mfreadnam.parsenamefile`. - - Returns - ------- - str : ModflowStr object - ModflowStr object. - - Examples - -------- - - >>> import flopy - >>> m = flopy.modflow.Modflow() - >>> strm = flopy.modflow.ModflowStr.load('test.str', m) - - """ - # set local variables - free = model.free_format_input - fmt2 = [10, 10, 10, 10, 10, 10, 10, 10] - fmt6 = [5, 5, 5, 5, 5, 15, 10, 10, 10, 10] - type6 = [np.int32, np.int32, np.int32, np.int32, np.int32, - np.float32, np.float32, np.float32, np.float32, np.float32] - fmt8 = [10, 10, 10] - fmt9 = [5] - - if model.verbose: - sys.stdout.write('loading str package file...\n') - - openfile = not hasattr(f, 'read') - if openfile: - filename = f - f = open(filename, 'r') - - # dataset 0 -- header - while True: - line = f.readline() - if line[0] != '#': - break - - # read dataset 1 - optional parameters - npstr, mxl = 0, 0 - t = line.strip().split() - if t[0].lower() == 'parameter': - if model.verbose: - sys.stdout.write(' loading str dataset 1\n') - npstr = np.int32(t[1]) - mxl = np.int32(t[2]) - - # read next line - line = f.readline() - - # data set 2 - if model.verbose: - sys.stdout.write(' loading str dataset 2\n') - t = read_fixed_var(line, ipos=fmt2, free=free) - mxacts = np.int32(t[0]) - nss = np.int32(t[1]) - ntrib = np.int32(t[2]) - ndiv = np.int32(t[3]) - icalc = np.int32(t[4]) - const = np.float32(t[5]) - istcb1 = np.int32(t[6]) - istcb2 = np.int32(t[7]) - ipakcb = 0 - try: - if istcb1 != 0: - ipakcb = istcb1 - model.add_pop_key_list(istcb1) - except: - if model.verbose: - print(' could not remove unit number {}'.format(istcb1)) - try: - if istcb2 != 0: - ipakcb = 53 - model.add_pop_key_list(istcb2) - except: - if model.verbose: - print(' could not remove unit number {}'.format(istcb2)) - - options = [] - aux_names = [] - naux = 0 - if 'AUX' in line.upper(): - t = line.strip().split() - it = 8 - while it < len(t): - toption = t[it] - if 'aux' in toption.lower(): - naux += 1 - options.append(' '.join(t[it:it + 2])) - aux_names.append(t[it + 1].lower()) - it += 1 - it += 1 - - # read parameter data - if npstr > 0: - dt = ModflowStr.get_empty(1, aux_names=aux_names).dtype - pak_parms = mfparbc.load(f, npstr, dt, model, ext_unit_dict, - model.verbose) - - if nper is None: - nper = model.nper - - irdflg = {} - iptflg = {} - stress_period_data = {} - segment_data = {} - for iper in range(nper): - if model.verbose: - print(" loading " + str( - ModflowStr) + " for kper {0:5d}".format(iper + 1)) - line = f.readline() - if line == '': - break - t = line.strip().split() - - # set itmp - itmp = int(t[0]) - - # set irdflg and iptflg - initialize to 0 since this is how - # MODFLOW would interpret a missing value - iflg0, iflg1 = 0, 0 - if len(t) > 1: - iflg0 = int(t[1]) - if len(t) > 2: - iflg1 = int(t[2]) - irdflg[iper] = iflg0 - iptflg[iper] = iflg1 - - if itmp == 0: - bnd_output = None - seg_output = None - current, current_seg = ModflowStr.get_empty(itmp, nss, - aux_names=aux_names) - elif itmp > 0: - if npstr > 0: - partype = ['cond'] - if model.verbose: - print(" reading str dataset 7") - for iparm in range(itmp): - line = f.readline() - t = line.strip().split() - pname = t[0].lower() - iname = 'static' - try: - tn = t[1] - c = tn.lower() - instance_dict = pak_parms.bc_parms[pname][1] - if c in instance_dict: - iname = c - else: - iname = 'static' - except: - if model.verbose: - print(' implicit static instance for ' + - 'parameter {}'.format(pname)) - - par_dict, current_dict = pak_parms.get(pname) - data_dict = current_dict[iname] - - current = ModflowStr.get_empty(par_dict['nlst'], - aux_names=aux_names) - - # get appropriate parval - if model.mfpar.pval is None: - parval = np.float(par_dict['parval']) - else: - try: - parval = np.float( - model.mfpar.pval.pval_dict[pname]) - except: - parval = np.float(par_dict['parval']) - - # fill current parameter data (par_current) - for ibnd, t in enumerate(data_dict): - current[ibnd] = tuple(t[:len(current.dtype.names)]) - - else: - if model.verbose: - print(" reading str dataset 6") - current, current_seg = ModflowStr.get_empty(itmp, nss, - aux_names=aux_names) - for ibnd in range(itmp): - line = f.readline() - t = read_fixed_var(line, ipos=fmt6, free=free) - v = [tt(vv) for tt, vv in zip(type6, t)] - ii = len(fmt6) - for idx, name in enumerate(current.dtype.names[:ii]): - current[ibnd][name] = v[idx] - if len(aux_names) > 0: - if free: - tt = line.strip().split()[len(fmt6):] - else: - istart = 0 - for i in fmt6: - istart += i - tt = line[istart:].strip().split() - for iaux, name in enumerate(aux_names): - current[ibnd][name] = np.float32(tt[iaux]) - - # convert indices to zero-based - current['k'] -= 1 - current['i'] -= 1 - current['j'] -= 1 - - # read dataset 8 - if icalc > 0: - if model.verbose: - print(" reading str dataset 8") - for ibnd in range(itmp): - line = f.readline() - t = read_fixed_var(line, ipos=fmt8, free=free) - ipos = 0 - for idx in range(10, 13): - current[ibnd][idx] = np.float32(t[ipos]) - ipos += 1 - - bnd_output = np.recarray.copy(current) - - # read data set 9 - if ntrib > 0: - if model.verbose: - print(" reading str dataset 9") - for iseg in range(nss): - line = f.readline() - t = read_fixed_var(line, ipos=fmt9 * ntrib, free=free) - v = [np.float32(vt) for vt in t] - names = current_seg.dtype.names[:ntrib] - for idx, name in enumerate(names): - current_seg[iseg][idx] = v[idx] - - # read data set 10 - if ndiv > 0: - if model.verbose: - print(" reading str dataset 10") - for iseg in range(nss): - line = f.readline() - t = read_fixed_var(line, length=10, free=free) - current_seg[iseg]['iupseg'] = np.int32(t[0]) - - seg_output = np.recarray.copy(current_seg) - - else: - bnd_output = -1 - seg_output = -1 - - if bnd_output is None: - stress_period_data[iper] = itmp - segment_data[iper] = itmp - else: - stress_period_data[iper] = bnd_output - segment_data[iper] = seg_output - - if openfile: - f.close() - - # determine specified unit number - unitnumber = None - filenames = [None, None, None] - if ext_unit_dict is not None: - unitnumber, filenames[0] = \ - model.get_ext_dict_attr(ext_unit_dict, - filetype=ModflowStr.ftype()) - if ipakcb > 0: - iu, filenames[1] = \ - model.get_ext_dict_attr(ext_unit_dict, unit=ipakcb) - if abs(istcb2) > 0: - iu, filenames[2] = \ - model.get_ext_dict_attr(ext_unit_dict, unit=abs(istcb2)) - - strpak = ModflowStr(model, mxacts=mxacts, nss=nss, - ntrib=ntrib, ndiv=ndiv, icalc=icalc, - const=const, ipakcb=ipakcb, istcb2=istcb2, - iptflg=iptflg, irdflg=irdflg, - stress_period_data=stress_period_data, - segment_data=segment_data, - options=options, unitnumber=unitnumber, - filenames=filenames) - return strpak - - @staticmethod - def ftype(): - return 'STR' - - @staticmethod - def defaultunit(): - return 118 +""" +mfstr module. Contains the ModflowStr class. Note that the user can access +the ModflowStr class as `flopy.modflow.ModflowStr`. + +Additional information for this MODFLOW package can be found at the `Online +MODFLOW Guide +`_. + +""" +import sys + +import numpy as np +from ..utils import MfList +from ..pakbase import Package +from .mfparbc import ModflowParBc as mfparbc +from ..utils.recarray_utils import create_empty_recarray +from ..utils import read_fixed_var, write_fixed_var + + +class ModflowStr(Package): + """ + MODFLOW Stream Package Class. + + Parameters + ---------- + model : model object + The model object (of type :class:`flopy.modflow.mf.Modflow`) to which + this package will be added. + mxacts : int + Maximum number of stream reaches that will be in use during any stress + period. (default is 0) + nss : int + Number of stream segments. (default is 0) + ntrib : int + The number of stream tributaries that can connect to one segment. The + program is currently dimensioned so that NTRIB cannot exceed 10. + (default is 0) + ndiv : int + A flag, which when positive, specifies that diversions from segments + are to be simulated. (default is 0) + icalc : int + A flag, which when positive, specifies that stream stages in reaches + are to be calculated. (default is 0) + const : float + Constant value used in calculating stream stage in reaches whenever + ICALC is greater than 0. This constant is 1.486 for flow units of + cubic feet per second and 1.0 for units of cubic meters per second. + The constant must be multiplied by 86,400 when using time units of + days in the simulation. If ICALC is 0, const can be any real value. + (default is 86400.) + ipakcb : int + A flag that is used to determine if cell-by-cell budget data should be + saved. If ipakcb is non-zero cell-by-cell budget data will be saved. + (default is 0). + istcb2 : int + A flag that is used flag and a unit number for the option to store + streamflow out of each reach in an unformatted (binary) file. + If istcb2 is greater than zero streamflow data will be saved. + (default is None). + dtype : tuple, list, or numpy array of numpy dtypes + is a tuple, list, or numpy array containing the dtype for + datasets 6 and 8 and the dtype for datasets 9 and 10 data in + stress_period_data and segment_data dictionaries. + (default is None) + irdflg : integer or dictionary + is a integer or dictionary containing a integer flag, when positive + suppresses printing of the stream input data for a stress period. If + an integer is passed, all stress periods will use the same value. + If a dictionary is passed, stress periods not in the dictionary will + assigned a value of 1. Default is None which will assign a value of 1 + to all stress periods. + iptflg : integer or dictionary + is a integer or dictionary containing a integer flag, when positive + suppresses printing of stream results for a stress period. If an + integer is passed, all stress periods will use the same value. + If a dictionary is passed, stress periods not in the dictionary will + assigned a value of 1. Default is None which will assign a value of 1 + to all stress periods. + stress_period_data : dictionary of reach data + Each dictionary contains a list of str reach data for a stress period. + + Each stress period in the dictionary data contains data for + datasets 6 and 8. + + The value for stress period data for a stress period can be an integer + (-1 or 0), a list of lists, a numpy array, or a numpy recarray. If + stress period data for a stress period contains an integer, a -1 + denotes data from the previous stress period will be reused and a 0 + indicates there are no str reaches for this stress period. + + Otherwise stress period data for a stress period should contain mxacts + or fewer rows of data containing data for each reach. Reach data are + specified through definition of layer (int), row (int), column (int), + segment number (int), sequential reach number (int), flow entering a + segment (float), stream stage (float), streambed hydraulic conductance + (float), streambed bottom elevation (float), streambed top elevation + (float), stream width (float), stream slope (float), roughness + coefficient (float), and auxiliary variable data for auxiliary variables + defined in options (float). + + If icalc=0 is specified, stream width, stream slope, and roughness + coefficients, are not used and can be any value for each stress period. + If data are specified for dataset 6 for a given stress period and + icalc>0, then stream width, stream slope, and roughness coefficients + should be appropriately set. + + The simplest form is a dictionary with a lists of boundaries for each + stress period, where each list of boundaries itself is a list of + boundaries. Indices of the dictionary are the numbers of the stress + period. For example, if mxacts=3 this gives the form of:: + + stress_period_data = + {0: [ + [lay, row, col, seg, reach, flow, stage, cond, sbot, stop, width, slope, rough], + [lay, row, col, seg, reach, flow, stage, cond, sbot, stop, width, slope, rough], + [lay, row, col, seg, reach, flow, stage, cond, sbot, stop, width, slope, rough]] + ], + 1: [ + [lay, row, col, seg, reach, flow, stage, cond, sbot, stop, width, slope, rough], + [lay, row, col, seg, reach, flow, stage, cond, sbot, stop, width, slope, rough], + [lay, row, col, seg, reach, flow, stage, cond, sbot, stop, width, slope, rough]] + ], ... + kper: + [ + [lay, row, col, seg, reach, flow, stage, cond, sbot, stop, width, slope, rough], + [lay, row, col, seg, reach, flow, stage, cond, sbot, stop, width, slope, rough], + [lay, row, col, seg, reach, flow, stage, cond, sbot, stop, width, slope, rough]] + ] + } + + segment_data : dictionary of str segment data + Each dictionary contains a list of segment str data for a stress period. + + Each stress period in the dictionary data contains data for + datasets 9, and 10. Segment data for a stress period are ignored if + a integer value is specified for stress period data. + + The value for segment data for a stress period can be an integer + (-1 or 0), a list of lists, a numpy array, or a numpy recarray. If + segment data for a stress period contains an integer, a -1 denotes + data from the previous stress period will be reused and a 0 indicates + there are no str segments for this stress period. + + Otherwise stress period data for a stress period should contain nss + rows of data containing data for each segment. Segment data are + specified through definition of itrib (int) data for up to 10 + tributaries and iupseg (int) data. + + If ntrib=0 is specified, itrib values are not used and can be any value + for each stress period. If data are specified for dataset 6 for a given + stress period and ntrib>0, then itrib data should be specified for + columns 0:ntrib. + + If ndiv=0 is specified, iupseg values are not used and can be any value + for each stress period. If data are specified for dataset 6 for a given + stress period and ndiv>0, then iupseg data should be specified for the + column in the dataset [10]. + + The simplest form is a dictionary with a lists of boundaries for each + stress period, where each list of boundaries itself is a list of + boundaries. Indices of the dictionary are the numbers of the stress + period. For example, if nss=2 and ntrib>0 and/or ndiv>0 this gives the + form of:: + + segment_data = + {0: [ + [itrib1, itrib2, itrib3, itrib4, itrib5, itrib6, itrib7, itrib8, itrib9, itrib10, iupseg], + [itrib1, itrib2, itrib3, itrib4, itrib5, itrib6, itrib7, itrib8, itrib9, itrib10, iupseg], + ], + 1: [ + [itrib1, itrib2, itrib3, itrib4, itrib5, itrib6, itrib7, itrib8, itrib9, itrib10, iupseg], + [itrib1, itrib2, itrib3, itrib4, itrib5, itrib6, itrib7, itrib8, itrib9, itrib10, iupseg], + ], ... + kper: + [ + [itrib1, itrib2, itrib3, itrib4, itrib5, itrib6, itrib7, itrib8, itrib9, itrib10, iupseg], + [itrib1, itrib2, itrib3, itrib4, itrib5, itrib6, itrib7, itrib8, itrib9, itrib10, iupseg], + ] + } + + options : list of strings + Package options. Auxiliary variables included as options should be + constructed as options=['AUXILIARY IFACE', 'AUX xyx']. Either + 'AUXILIARY' or 'AUX' can be specified (case insensitive). + (default is None). + extension : string + Filename extension (default is 'str') + unitnumber : int + File unit number (default is None). + filenames : str or list of str + Filenames to use for the package and the output files. If + filenames=None the package name will be created using the model name + and package extension and the cbc output and str output name will be + created using the model name and .cbc the .sfr.bin/.sfr.out extensions + (for example, modflowtest.cbc, and modflowtest.str.bin), if ipakcbc and + istcb2 are numbers greater than zero. If a single string is passed + the package will be set to the string and cbc and sf routput names + will be created using the model name and .cbc and .str.bin/.str.out + extensions, if ipakcbc and istcb2 are numbers greater than zero. To + define the names for all package files (input and output) the length + of the list of strings should be 3. Default is None. + + Methods + ------- + + See Also + -------- + + Notes + ----- + Parameters are not supported in FloPy. + + Examples + -------- + + >>> import flopy + >>> m = flopy.modflow.Modflow() + >>> strd = {} + >>> strd[0] = [[2, 3, 4, 15.6, 1050., -4]] #this str boundary will be + >>> #applied to all stress periods + >>> str = flopy.modflow.ModflowStr(m, stress_period_data=strd) + + """ + + def __init__(self, model, mxacts=0, nss=0, ntrib=0, ndiv=0, icalc=0, + const=86400., ipakcb=None, istcb2=None, + dtype=None, stress_period_data=None, segment_data=None, + irdflg=None, iptflg=None, extension='str', + unitnumber=None, filenames=None, options=None, **kwargs): + """ + Package constructor. + + """ + # set default unit number of one is not specified + if unitnumber is None: + unitnumber = ModflowStr.defaultunit() + + # set filenames + if filenames is None: + filenames = [None, None, None] + elif isinstance(filenames, str): + filenames = [filenames, None, None] + elif isinstance(filenames, list): + if len(filenames) < 3: + for idx in range(len(filenames), 3): + filenames.append(None) + + # update external file information with cbc output, if necessary + if ipakcb is not None: + fname = filenames[1] + model.add_output_file(ipakcb, fname=fname, + package=ModflowStr.ftype()) + else: + ipakcb = 0 + + if istcb2 is not None: + fname = filenames[2] + model.add_output_file(istcb2, fname=fname, + package=ModflowStr.ftype()) + else: + ipakcb = 0 + + # set filenames + if filenames is None: + filenames = [None] + elif isinstance(filenames, str): + filenames = [filenames] + + # Fill namefile items + name = [ModflowStr.ftype()] + units = [unitnumber] + extra = [''] + + # set package name + fname = [filenames[0]] + + # Call ancestor's init to set self.parent, extension, name and + # unit number + Package.__init__(self, model, extension=extension, name=name, + unit_number=units, extra=extra, filenames=fname) + + self.heading = '# {} package for '.format(self.name[0]) + \ + ' {}, '.format(model.version_types[model.version]) + \ + 'generated by Flopy.' + self.url = 'str.htm' + self.mxacts = mxacts + self.nss = nss + self.icalc = icalc + self.ntrib = ntrib + self.ndiv = ndiv + self.const = const + self.ipakcb = ipakcb + self.istcb2 = istcb2 + + # issue exception if ntrib is greater than 10 + if ntrib > 10: + raise Exception('ModflowStr error: ntrib must be less that 10: ' + + 'specified value = {}'.format(ntrib)) + + if options is None: + options = [] + self.options = options + + # parameters are not supported + self.npstr = 0 + + # dataset 5 + # check type of irdflg and iptflg + msg = '' + if irdflg is not None and not isinstance(irdflg, (int, dict)): + msg = 'irdflg' + if iptflg is not None and not isinstance(iptflg, (int, dict)): + if len(msg) > 0: + msg += ' and ' + msg += 'iptflg' + if len(msg) > 0: + msg += ' must be an integer or a dictionary' + raise TypeError(msg) + + # process irdflg + self.irdflg = {} + for n in range(self.parent.nper): + if irdflg is None: + self.irdflg[n] = 1 + elif isinstance(irdflg, int): + self.irdflg[n] = irdflg + elif isinstance(irdflg, dict): + if n in irdflg: + self.irdflg[n] = irdflg[n] + else: + self.irdflg[n] = 1 + + # process iptflg + self.iptflg = {} + for n in range(self.parent.nper): + if iptflg is None: + self.iptflg[n] = 1 + elif isinstance(iptflg, int): + self.iptflg[n] = iptflg + elif isinstance(iptflg, dict): + if n in iptflg: + self.iptflg[n] = iptflg[n] + else: + self.iptflg[n] = 1 + + # determine dtype for dataset 6 + if dtype is not None: + self.dtype = dtype[0] + self.dtype2 = dtype[1] + else: + aux_names = [] + if len(options) > 0: + aux_names = [] + it = 0 + while True: + if 'aux' in options[it].lower(): + t = options[it].split() + aux_names.append(t[-1].lower()) + it += 1 + if it >= len(options): + break + if len(aux_names) < 1: + aux_names = None + d, d2 = self.get_empty(1, 1, aux_names=aux_names, + structured=self.parent.structured) + self.dtype = d.dtype + self.dtype2 = d2.dtype + + # convert stress_period_data for datasets 6 and 8 to a recarray if + # necessary + if stress_period_data is not None: + for key, d in stress_period_data.items(): + if isinstance(d, list): + d = np.array(d) + if isinstance(d, np.recarray): + e = 'ModflowStr error: recarray dtype: ' + \ + str(d.dtype) + ' does not match ' + \ + 'self dtype: ' + str(self.dtype) + assert d.dtype == self.dtype, e + elif isinstance(d, np.ndarray): + d = np.core.records.fromarrays(d.transpose(), + dtype=self.dtype) + elif isinstance(d, int): + if model.verbose: + if d < 0: + msg = 3 * ' ' + \ + 'reusing str data from previous stress period' + print(msg) + elif d == 0: + msg = 3 * ' ' + 'no str data for stress ' + \ + 'period {}'.format(key) + print(msg) + else: + e = 'ModflowStr error: unsupported data type: ' + \ + str(type(d)) + ' at kper ' + '{0:d}'.format(key) + raise Exception(e) + + # add stress_period_data to package + self.stress_period_data = MfList(self, stress_period_data) + + # convert segment_data for datasets 9 and 10 to a recarray if necessary + if segment_data is not None: + for key, d in segment_data.items(): + if isinstance(d, list): + d = np.array(d) + if isinstance(d, np.recarray): + e = 'ModflowStr error: recarray dtype: ' + \ + str(d.dtype) + ' does not match ' + \ + 'self dtype: ' + str(self.dtype2) + assert d.dtype == self.dtype2, e + elif isinstance(d, np.ndarray): + d = np.core.records.fromarrays(d.transpose(), + dtype=self.dtype2) + elif isinstance(d, int): + if model.verbose: + if d < 0: + msg = 3 * ' ' + 'reusing str segment data ' + \ + 'from previous stress period' + print(msg) + elif d == 0: + msg = 3 * ' ' + 'no str segment data for ' + \ + 'stress period {}'.format(key) + print(msg) + else: + e = 'ModflowStr error: unsupported data type: ' + \ + str(type(d)) + ' at kper ' + '{0:d}'.format(key) + raise Exception(e) + + # add segment_data to package + self.segment_data = segment_data + + self.parent.add_package(self) + return + + @staticmethod + def get_empty(ncells=0, nss=0, aux_names=None, structured=True): + # get an empty recarray that corresponds to dtype + dtype, dtype2 = ModflowStr.get_default_dtype(structured=structured) + if aux_names is not None: + dtype = Package.add_to_dtype(dtype, aux_names, np.float32) + return ( + create_empty_recarray(ncells, dtype=dtype, default_value=-1.0E+10), + create_empty_recarray(nss, dtype=dtype2, default_value=0)) + + @staticmethod + def get_default_dtype(structured=True): + if structured: + dtype = np.dtype([("k", np.int), ("i", np.int), ("j", np.int), + ("segment", np.int), ("reach", np.int), + ("flow", np.float32), ("stage", np.float32), + ("cond", np.float32), ("sbot", np.float32), + ("stop", np.float32), + ("width", np.float32), ("slope", np.float32), + ("rough", np.float32)]) + else: + dtype = np.dtype([("node", np.int), + ("segment", np.int), ("reach", np.int), + ("flow", np.float32), ("stage", np.float32), + ("cond", np.float32), ("sbot", np.float32), + ("stop", np.float32), + ("width", np.float32), ("slope", np.float32), + ("rough", np.float32)]) + + dtype2 = np.dtype([("itrib01", np.int), ("itrib02", np.int), + ("itrib03", np.int), ("itrib04", np.int), + ("itrib05", np.int), ("itrib06", np.int), + ("itrib07", np.int), ("itrib08", np.int), + ("itrib09", np.int), ("itrib10", np.int), + ("iupseg", np.int)]) + return dtype, dtype2 + + def ncells(self): + # Return the maximum number of cells that have a stream + # (developed for MT3DMS SSM package) + return self.mxacts + + def write_file(self): + """ + Write the package file. + + Returns + ------- + None + + """ + # set free variable + free = self.parent.free_format_input + + # open the str file + f_str = open(self.fn_path, 'w') + + # dataset 0 + f_str.write('{0}\n'.format(self.heading)) + + # dataset 1 - parameters not supported on write + + # dataset 2 + line = write_fixed_var([self.mxacts, self.nss, + self.ntrib, self.ndiv, + self.icalc, self.const, + self.ipakcb, self.istcb2], + free=free) + for opt in self.options: + line = line.rstrip() + line += ' ' + str(opt) + '\n' + f_str.write(line) + + # dataset 3 - parameters not supported on write + # dataset 4a - parameters not supported on write + # dataset 4b - parameters not supported on write + + nrow, ncol, nlay, nper = self.parent.get_nrow_ncol_nlay_nper() + + kpers = list(self.stress_period_data.data.keys()) + kpers.sort() + + # set column lengths for fixed format input files for + # datasets 6, 8, and 9 + fmt6 = [5, 5, 5, 5, 5, 15, 10, 10, 10, 10] + fmt8 = [10, 10, 10] + fmt9 = 5 + + for iper in range(nper): + if iper not in kpers: + if iper == 0: + itmp = 0 + else: + itmp = -1 + else: + tdata = self.stress_period_data[iper] + sdata = self.segment_data[iper] + if isinstance(tdata, int): + itmp = tdata + elif tdata is None: + itmp = -1 + else: + itmp = tdata.shape[0] + line = '{:10d}'.format(itmp) + \ + '{:10d}'.format(self.irdflg[iper]) + \ + '{:10d}'.format(self.iptflg[iper]) + \ + ' # stress period {}\n'.format(iper + 1) + f_str.write(line) + if itmp > 0: + tdata = np.recarray.copy(tdata) + # dataset 6 + for line in tdata: + line['k'] += 1 + line['i'] += 1 + line['j'] += 1 + ds6 = [] + for idx, v in enumerate(line): + if idx < 10 or idx > 12: + ds6.append(v) + if idx > 12: + fmt6 += [10] + f_str.write(write_fixed_var(ds6, ipos=fmt6, free=free)) + + # dataset 8 + if self.icalc > 0: + for line in tdata: + ds8 = [] + for idx in range(10, 13): + ds8.append(line[idx]) + f_str.write(write_fixed_var(ds8, ipos=fmt8, free=free)) + + # dataset 9 + if self.ntrib > 0: + for line in sdata: + ds9 = [] + for idx in range(self.ntrib): + ds9.append(line[idx]) + f_str.write(write_fixed_var(ds9, length=fmt9, + free=free)) + + # dataset 10 + if self.ndiv > 0: + for line in sdata: + f_str.write(write_fixed_var([line[-1]], + length=10, free=free)) + + # close the str file + f_str.close() + + @staticmethod + def load(f, model, nper=None, ext_unit_dict=None): + """ + Load an existing package. + + Parameters + ---------- + f : filename or file handle + File to load. + model : model object + The model object (of type :class:`flopy.modflow.mf.Modflow`) to + which this package will be added. + nper : int + The number of stress periods. If nper is None, then nper will be + obtained from the model object. (default is None). + ext_unit_dict : dictionary, optional + If the arrays in the file are specified using EXTERNAL, + or older style array control records, then `f` should be a file + handle. In this case ext_unit_dict is required, which can be + constructed using the function + :class:`flopy.utils.mfreadnam.parsenamefile`. + + Returns + ------- + str : ModflowStr object + ModflowStr object. + + Examples + -------- + + >>> import flopy + >>> m = flopy.modflow.Modflow() + >>> strm = flopy.modflow.ModflowStr.load('test.str', m) + + """ + # set local variables + free = model.free_format_input + fmt2 = [10, 10, 10, 10, 10, 10, 10, 10] + fmt6 = [5, 5, 5, 5, 5, 15, 10, 10, 10, 10] + type6 = [np.int32, np.int32, np.int32, np.int32, np.int32, + np.float32, np.float32, np.float32, np.float32, np.float32] + fmt8 = [10, 10, 10] + fmt9 = [5] + + if model.verbose: + sys.stdout.write('loading str package file...\n') + + openfile = not hasattr(f, 'read') + if openfile: + filename = f + f = open(filename, 'r') + + # dataset 0 -- header + while True: + line = f.readline() + if line[0] != '#': + break + + # read dataset 1 - optional parameters + npstr, mxl = 0, 0 + t = line.strip().split() + if t[0].lower() == 'parameter': + if model.verbose: + sys.stdout.write(' loading str dataset 1\n') + npstr = np.int32(t[1]) + mxl = np.int32(t[2]) + + # read next line + line = f.readline() + + # data set 2 + if model.verbose: + sys.stdout.write(' loading str dataset 2\n') + t = read_fixed_var(line, ipos=fmt2, free=free) + mxacts = np.int32(t[0]) + nss = np.int32(t[1]) + ntrib = np.int32(t[2]) + ndiv = np.int32(t[3]) + icalc = np.int32(t[4]) + const = np.float32(t[5]) + istcb1 = np.int32(t[6]) + istcb2 = np.int32(t[7]) + ipakcb = 0 + try: + if istcb1 != 0: + ipakcb = istcb1 + model.add_pop_key_list(istcb1) + except: + if model.verbose: + print(' could not remove unit number {}'.format(istcb1)) + try: + if istcb2 != 0: + ipakcb = 53 + model.add_pop_key_list(istcb2) + except: + if model.verbose: + print(' could not remove unit number {}'.format(istcb2)) + + options = [] + aux_names = [] + naux = 0 + if 'AUX' in line.upper(): + t = line.strip().split() + it = 8 + while it < len(t): + toption = t[it] + if 'aux' in toption.lower(): + naux += 1 + options.append(' '.join(t[it:it + 2])) + aux_names.append(t[it + 1].lower()) + it += 1 + it += 1 + + # read parameter data + if npstr > 0: + dt = ModflowStr.get_empty(1, aux_names=aux_names).dtype + pak_parms = mfparbc.load(f, npstr, dt, model, ext_unit_dict, + model.verbose) + + if nper is None: + nper = model.nper + + irdflg = {} + iptflg = {} + stress_period_data = {} + segment_data = {} + for iper in range(nper): + if model.verbose: + print(" loading " + str( + ModflowStr) + " for kper {0:5d}".format(iper + 1)) + line = f.readline() + if line == '': + break + t = line.strip().split() + + # set itmp + itmp = int(t[0]) + + # set irdflg and iptflg - initialize to 0 since this is how + # MODFLOW would interpret a missing value + iflg0, iflg1 = 0, 0 + if len(t) > 1: + iflg0 = int(t[1]) + if len(t) > 2: + iflg1 = int(t[2]) + irdflg[iper] = iflg0 + iptflg[iper] = iflg1 + + if itmp == 0: + bnd_output = None + seg_output = None + current, current_seg = ModflowStr.get_empty(itmp, nss, + aux_names=aux_names) + elif itmp > 0: + if npstr > 0: + partype = ['cond'] + if model.verbose: + print(" reading str dataset 7") + for iparm in range(itmp): + line = f.readline() + t = line.strip().split() + pname = t[0].lower() + iname = 'static' + try: + tn = t[1] + c = tn.lower() + instance_dict = pak_parms.bc_parms[pname][1] + if c in instance_dict: + iname = c + else: + iname = 'static' + except: + if model.verbose: + print(' implicit static instance for ' + + 'parameter {}'.format(pname)) + + par_dict, current_dict = pak_parms.get(pname) + data_dict = current_dict[iname] + + current = ModflowStr.get_empty(par_dict['nlst'], + aux_names=aux_names) + + # get appropriate parval + if model.mfpar.pval is None: + parval = np.float(par_dict['parval']) + else: + try: + parval = np.float( + model.mfpar.pval.pval_dict[pname]) + except: + parval = np.float(par_dict['parval']) + + # fill current parameter data (par_current) + for ibnd, t in enumerate(data_dict): + current[ibnd] = tuple(t[:len(current.dtype.names)]) + + else: + if model.verbose: + print(" reading str dataset 6") + current, current_seg = ModflowStr.get_empty(itmp, nss, + aux_names=aux_names) + for ibnd in range(itmp): + line = f.readline() + t = read_fixed_var(line, ipos=fmt6, free=free) + v = [tt(vv) for tt, vv in zip(type6, t)] + ii = len(fmt6) + for idx, name in enumerate(current.dtype.names[:ii]): + current[ibnd][name] = v[idx] + if len(aux_names) > 0: + if free: + tt = line.strip().split()[len(fmt6):] + else: + istart = 0 + for i in fmt6: + istart += i + tt = line[istart:].strip().split() + for iaux, name in enumerate(aux_names): + current[ibnd][name] = np.float32(tt[iaux]) + + # convert indices to zero-based + current['k'] -= 1 + current['i'] -= 1 + current['j'] -= 1 + + # read dataset 8 + if icalc > 0: + if model.verbose: + print(" reading str dataset 8") + for ibnd in range(itmp): + line = f.readline() + t = read_fixed_var(line, ipos=fmt8, free=free) + ipos = 0 + for idx in range(10, 13): + current[ibnd][idx] = np.float32(t[ipos]) + ipos += 1 + + bnd_output = np.recarray.copy(current) + + # read data set 9 + if ntrib > 0: + if model.verbose: + print(" reading str dataset 9") + for iseg in range(nss): + line = f.readline() + t = read_fixed_var(line, ipos=fmt9 * ntrib, free=free) + v = [np.float32(vt) for vt in t] + names = current_seg.dtype.names[:ntrib] + for idx, name in enumerate(names): + current_seg[iseg][idx] = v[idx] + + # read data set 10 + if ndiv > 0: + if model.verbose: + print(" reading str dataset 10") + for iseg in range(nss): + line = f.readline() + t = read_fixed_var(line, length=10, free=free) + current_seg[iseg]['iupseg'] = np.int32(t[0]) + + seg_output = np.recarray.copy(current_seg) + + else: + bnd_output = -1 + seg_output = -1 + + if bnd_output is None: + stress_period_data[iper] = itmp + segment_data[iper] = itmp + else: + stress_period_data[iper] = bnd_output + segment_data[iper] = seg_output + + if openfile: + f.close() + + # determine specified unit number + unitnumber = None + filenames = [None, None, None] + if ext_unit_dict is not None: + unitnumber, filenames[0] = \ + model.get_ext_dict_attr(ext_unit_dict, + filetype=ModflowStr.ftype()) + if ipakcb > 0: + iu, filenames[1] = \ + model.get_ext_dict_attr(ext_unit_dict, unit=ipakcb) + if abs(istcb2) > 0: + iu, filenames[2] = \ + model.get_ext_dict_attr(ext_unit_dict, unit=abs(istcb2)) + + strpak = ModflowStr(model, mxacts=mxacts, nss=nss, + ntrib=ntrib, ndiv=ndiv, icalc=icalc, + const=const, ipakcb=ipakcb, istcb2=istcb2, + iptflg=iptflg, irdflg=irdflg, + stress_period_data=stress_period_data, + segment_data=segment_data, + options=options, unitnumber=unitnumber, + filenames=filenames) + return strpak + + @staticmethod + def ftype(): + return 'STR' + + @staticmethod + def defaultunit(): + return 118 diff --git a/flopy/modflow/mfsub.py b/flopy/modflow/mfsub.py index 244e5f5eae..ec0371a4c2 100644 --- a/flopy/modflow/mfsub.py +++ b/flopy/modflow/mfsub.py @@ -1,756 +1,756 @@ -""" -mfsub module. Contains the ModflowSub class. Note that the user can access -the ModflowSub class as `flopy.modflow.ModflowSub`. - -Additional information for this MODFLOW package can be found at the `Online -MODFLOW Guide -`_. - -""" -import sys - -import numpy as np - -from ..pakbase import Package -from ..utils import Util2d, Util3d, read1d - - -class ModflowSub(Package): - """ - MODFLOW SUB Package Class. - - Parameters - ---------- - model : model object - The model object (of type :class:`flopy.modflow.mf.Modflow`) to which - this package will be added. - ipakcb : int - A flag that is used to determine if cell-by-cell budget data should be - saved. If ipakcb is non-zero cell-by-cell budget data will be saved. - (default is 0). - isuboc : int - isuboc is a flag used to control output of information generated by the - SUB Package. (default is 0). - idsave : int - idsave is a flag and a unit number on which restart records for delay - interbeds will be saved at the end of the simulation. (default is 0). - idrest : int - idrest is a flag and a unit number on which restart records for delay - interbeds will be read in at the start of the simulation (default is 0). - idbit : int - idrest is an optional flag that defines if iteration will be used for - the delay bed solution of heads. If idbit is 0 or less than 0, - iteration will not be used (this is is identical to the approach used - in MODFLOW-2005 versions less than 1.13. if idbit is greater than - 0, iteration of the delay bed solution will continue until convergence - is achieved. (default is 0). - nndb : int - nndb is the number of systems of no-delay interbeds. (default is 1). - ndb : int - ndb is the number of systems of delay interbeds. (default is 1). - nmz : int - nmz is the number of material zones that are needed to define the - hydraulic properties of systems of delay interbeds. Each material zone - is defined by a combination of vertical hydraulic conductivity, elastic - specific storage, and inelastic specific storage. (default is 1). - nn : int - nn is the number of nodes used to discretize the half space to - approximate the head distributions in systems of delay interbeds. - (default is 20). - ac1 : float - ac1 is an acceleration parameter. This parameter is used to predict - the aquifer head at the interbed boundaries on the basis of the head - change computed for the previous iteration. A value of 0.0 results in - the use of the aquifer head at the previous iteration. Limited - experience indicates that optimum values may range from 0.0 to 0.6. - (default is 0). - ac2 : float - ac2 is an acceleration parameter. This acceleration parameter is a - multiplier for the head changes to compute the head at the new - iteration. Values are normally between 1.0 and 2.0, but the optimum is - probably closer to 1.0 than to 2.0. However this parameter also can be - used to help convergence of the iterative solution by using values - between 0 and 1. (default is 1.0). - itmin : int - ITMIN is the minimum number of iterations for which one-dimensional - equations will be solved for flow in interbeds when the Strongly - Implicit Procedure (SIP) is used to solve the ground-water flow - equations. If the current iteration level is greater than ITMIN and - the SIP convergence criterion for head closure (HCLOSE) is met at a - particular cell, the one-dimensional equations for that cell will not - be solved. The previous solution will be used. The value of ITMIN is - not used if a solver other than SIP is used to solve the ground-water - flow equations. (default is 5). - ln : int or array of ints (nndb) - ln is a one-dimensional array specifying the model layer assignments - for each system of no-delay interbeds. (default is 0). - ldn : int or array of ints (ndb) - ldn is a one-dimensional array specifying the model layer assignments - for each system of delay interbeds.(default is 0). - rnb : float or array of floats (ndb, nrow, ncol) - rnb is an array specifying the factor nequiv at each cell for each - system of delay interbeds. The array also is used to define the areal - extent of each system of interbeds. For cells beyond the areal extent - of the system of interbeds, enter a number less than 1.0 in the - corresponding element of this array. (default is 1). - hc : float or array of floats (nndb, nrow, ncol) - hc is an array specifying the preconsolidation head or preconsolidation - stress in terms of head in the aquifer for systems of no-delay - interbeds. For any model cells in which specified HC is greater than - the corresponding value of starting head, the value of HC will be set - to that of starting head. (default is 100000). - sfe : float or array of floats (nndb, nrow, ncol) - sfe is an array specifying the dimensionless elastic skeletal storage - coefficient for systems of no-delay interbeds. (default is 1.e-4). - sfv : float or array of floats (nndb, nrow, ncol) - sfv is an array specifying the dimensionless inelastic skeletal storage - coefficient for systems of no-delay interbeds. (default is 1.e-3). - com : float or array of floats (nndb, nrow, ncol) - com is an array specifying the starting compaction in each system of - no-delay interbeds. Compaction values computed by the package are added - to values in this array so that printed or stored values of compaction - and land subsidence may include previous components. Values in this - array do not affect calculations of storage changes or resulting - compaction. For simulations in which output values are to reflect - compaction and subsidence since the start of the simulation, enter zero - values for all elements of this array. (default is 0). - dp : list or array of floats (nmz, 3) - Data item includes nmz records, each with a value of vertical hydraulic - conductivity, elastic specific storage, and inelastic specific storage. - (default is [1.e-6, 6.e-6, 6.e-4]). - dstart : float or array of floats (ndb, nrow, ncol) - dstart is an array specifying starting head in interbeds for systems of - delay interbeds. For a particular location in a system of interbeds, - the starting head is applied to every node in the string of nodes that - approximates flow in half of a doubly draining interbed. - (default is 1). - dhc : float or array of floats (ndb, nrow, ncol) - dhc is an array specifying the starting preconsolidation head in - interbeds for systems of delay interbeds. For a particular location in - a system of interbeds, the starting preconsolidation head is applied to - every node in the string of nodes that approximates flow in half of a - doubly draining interbed. For any location at which specified starting - preconsolidation head is greater than the corresponding value of the - starting head, Dstart, the value of the starting preconsolidation head - will be set to that of the starting head. (default is 100000). - dcom : float or array of floats (ndb, nrow, ncol) - dcom is an array specifying the starting compaction in each system of - delay interbeds. Compaction values computed by the package are added to - values in this array so that printed or stored values of compaction and - land subsidence may include previous components. Values in this array - do not affect calculations of storage changes or resulting compaction. - For simulations in which output values are to reflect compaction - and subsidence since the start of the simulation, enter zero values - for all elements of this array. (default is 0). - dz : float or array of floats (ndb, nrow, ncol) - dz is an array specifying the equivalent thickness for a system of - delay interbeds. (default is 1). - nz : int or array of ints (ndb, nrow, ncol) - nz is an array specifying the material zone numbers for systems of - delay interbeds. The zone number for each location in the model grid - selects the hydraulic conductivity, elastic specific storage, and - inelastic specific storage of the interbeds. (default is 1). - ids15 : list or array of ints (12) - Format codes and unit numbers for subsidence, compaction by model - layer, compaction by interbed system, vertical displacement, no-delay - preconsolidation, and delay preconsolidation will be printed. If ids15 - is None and isuboc>0 then print code 0 will be used for all data which - is output to the binary subsidence output file (unit=1051). The 12 - entries in ids15 correspond to ifm1, iun1, ifm2, iun2, ifm3, iun3, - ifm4, iun4, ifm5, iun5, ifm6, and iun6 variables. (default is None). - ids16 : list or array of ints (isuboc, 17) - Stress period and time step range and print and save flags used to - control printing and saving of information generated by the SUB Package - during program execution. Each row of ids16 corresponds to isp1, isp2, - its1, its2, ifl1, ifl2, ifl3, ifl4, ifl5, ifl6, ifl7, ifl8, ifl9, - ifl10, ifl11, ifl12, and ifl13 variables for isuboc entries. isp1, - isp2, its1, and its2 are stress period and time step ranges. ifl1 and - ifl2 control subsidence printing and saving. ifl3 and ifl4 control - compaction by model layer printing and saving. ifl5 and ifl6 control - compaction by interbed system printing and saving. ifl7 and ifl8 - control vertical displacement printing and saving. ifl9 and ifl10 - control critical head for no-delay interbeds printing and saving. - ifl11 and ifl12 control critical head for delay interbeds printing - and saving. ifl13 controls volumetric budget for delay interbeds - printing. If ids16 is None and isuboc>0 then all available subsidence - output will be printed and saved to the binary subsidence output file - (unit=1051). (default is None). - unitnumber : int - File unit number (default is None). - filenames : str or list of str - Filenames to use for the package and the output files. If - filenames=None the package name will be created using the model name - and package extension and the cbc output name and other sub output - files will be created using the model name and .cbc and swt output - extensions (for example, modflowtest.cbc), if ipakcbc and other - sub output files (dataset 15) are numbers greater than zero. - If a single string is passed the package name will be set to the - string and other sub output files will be set to the model name with - the appropriate output file extensions. To define the names for all - package files (input and output) the length of the list of strings - should be 9. Default is None. - - Attributes - ---------- - - Methods - ------- - - See Also - -------- - - Notes - ----- - Parameters are supported in Flopy only when reading in existing models. - Parameter values are converted to native values in Flopy and the - connection to "parameters" is thus nonexistent. Parameters are not - supported in the SUB Package. - - Examples - -------- - - >>> import flopy - >>> m = flopy.modflow.Modflow() - >>> sub = flopy.modflow.ModflowSub(m) - - """ - - def __init__(self, model, ipakcb=None, isuboc=0, idsave=None, idrest=None, - idbit=None, - nndb=1, ndb=1, nmz=1, nn=20, ac1=0., ac2=1.0, itmin=5, - ln=0, ldn=0, rnb=1, - hc=100000., sfe=1.e-4, sfv=1.e-3, com=0., - dp=[[1.e-6, 6.e-6, 6.e-4]], - dstart=1., dhc=100000., dcom=0., dz=1., nz=1, - ids15=None, ids16=None, - extension='sub', unitnumber=None, - filenames=None): - """ - Package constructor. - - """ - # set default unit number of one is not specified - if unitnumber is None: - unitnumber = ModflowSub.defaultunit() - - # set filenames - if filenames is None: - filenames = [None for x in range(9)] - elif isinstance(filenames, str): - filenames = [filenames] + [None for x in range(8)] - elif isinstance(filenames, list): - if len(filenames) < 9: - n = 9 - len(filenames) + 1 - filenames = filenames + [None for x in range(n)] - - # update external file information with cbc output, if necessary - if ipakcb is not None: - fname = filenames[1] - model.add_output_file(ipakcb, fname=fname, - package=ModflowSub.ftype()) - else: - ipakcb = 0 - - if idsave is not None: - fname = filenames[2] - model.add_output_file(idsave, fname=fname, extension='rst', - package=ModflowSub.ftype()) - else: - idsave = 0 - - if idrest is None: - idrest = 0 - - item15_extensions = ["subsidence.hds", "total_comp.hds", - "inter_comp.hds", "vert_disp.hds", - "nodelay_precon.hds", "delay_precon.hds"] - item15_units = [2052 + i for i in range(len(item15_extensions))] - - if isuboc > 0: - idx = 0 - for k in range(1, 12, 2): - ext = item15_extensions[idx] - if ids15 is None: - iu = item15_units[idx] - else: - iu = ids15[k] - fname = filenames[idx+3] - model.add_output_file(iu, fname=fname, extension=ext, - package=ModflowSub.ftype()) - idx += 1 - - extensions = [extension] - name = [ModflowSub.ftype()] - units = [unitnumber] - extra = [''] - - # set package name - fname = [filenames[0]] - - # Call ancestor's init to set self.parent, extension, name and - # unit number - Package.__init__(self, model, extension=extensions, name=name, - unit_number=units, extra=extra, filenames=fname) - - nrow, ncol, nlay, nper = self.parent.nrow_ncol_nlay_nper - - self.heading = '# {} package for '.format(self.name[0]) + \ - ' {}, '.format(model.version_types[model.version]) + \ - 'generated by Flopy.' - self.url = 'sub.htm' - - self.ipakcb = ipakcb - self.isuboc = isuboc - self.idsave = idsave - self.idrest = idrest - self.idbit = idbit - self.nndb = nndb - self.ndb = ndb - self.nmz = nmz - self.nn = nn - self.ac1 = ac1 - self.ac2 = ac2 - self.itmin = itmin - # no-delay bed data - self.ln = None - self.hc = None - self.sfe = None - self.sfv = None - if nndb > 0: - self.ln = Util2d(model, (nndb,), np.int32, ln, name='ln') - self.hc = Util3d(model, (nndb, nrow, ncol), np.float32, hc, - name='hc', - locat=self.unit_number[0]) - self.sfe = Util3d(model, (nndb, nrow, ncol), np.float32, sfe, - name='sfe', - locat=self.unit_number[0]) - self.sfv = Util3d(model, (nndb, nrow, ncol), np.float32, sfv, - name='sfv', - locat=self.unit_number[0]) - self.com = Util3d(model, (nndb, nrow, ncol), np.float32, com, - name='com', - locat=self.unit_number[0]) - # delay bed data - self.ldn = None - self.rnb = None - self.dstart = None - self.dhc = None - self.dz = None - self.nz = None - if ndb > 0: - self.ldn = Util2d(model, (ndb,), np.int32, ldn, name='ldn') - self.rnb = Util3d(model, (ndb, nrow, ncol), np.float32, rnb, - name='rnb', - locat=self.unit_number[0]) - self.dstart = Util3d(model, (ndb, nrow, ncol), np.float32, dstart, - name='dstart', - locat=self.unit_number[0]) - self.dhc = Util3d(model, (ndb, nrow, ncol), np.float32, dhc, - name='dhc', - locat=self.unit_number[0]) - self.dcom = Util3d(model, (ndb, nrow, ncol), np.float32, dcom, - name='dcom', - locat=self.unit_number[0]) - self.dz = Util3d(model, (ndb, nrow, ncol), np.float32, dz, - name='dz', - locat=self.unit_number[0]) - self.nz = Util3d(model, (ndb, nrow, ncol), np.int32, nz, name='nz', - locat=self.unit_number[0]) - # material zone data - if isinstance(dp, list): - dp = np.array(dp) - self.dp = dp - - # output data - if isuboc > 0: - if ids15 is None: - ids15 = np.zeros(12, dtype=np.int32) - iu = 0 - for i in range(1, 12, 2): - ids15[i] = item15_units[iu] - iu += 1 - self.ids15 = ids15 - else: - if isinstance(ids15, list): - ids15 = np.array(ids15) - self.ids15 = ids15 - - if ids16 is None: - self.isuboc = 1 - # save and print everything - ids16 = np.ones((1, 17), dtype=np.int32) - ids16[0, 0] = 0 - ids16[0, 1] = nper - 1 - ids16[0, 2] = 0 - ids16[0, 3] = 9999 - else: - if isinstance(ids16, list): - ids16 = np.array(ids16) - if len(ids16.shape) == 1: - ids16 = np.reshape(ids16, (1, ids16.shape[0])) - self.ids16 = ids16 - - # add package to model - self.parent.add_package(self) - - def write_file(self, check=False, f=None): - """ - Write the package file. - - Returns - ------- - None - - """ - if check: - print("warning: check not implemented for sub") - nrow, ncol, nlay, nper = self.parent.nrow_ncol_nlay_nper - # Open file for writing - if f is None: - f = open(self.fn_path, 'w') - # First line: heading - f.write('{}\n'.format(self.heading)) - # write dataset 1 - f.write( - '{} {} {} {} {} {} '.format(self.ipakcb, self.isuboc, self.nndb, - self.ndb, self.nmz, self.nn)) - - f.write('{} {} {} {} {}'.format(self.ac1, self.ac2, - self.itmin, self.idsave, - self.idrest)) - line = '' - if self.idbit is not None: - line += ' {}'.format(self.idbit) - line += '\n' - f.write(line) - - if self.nndb > 0: - t = self.ln.array - for tt in t: - f.write('{} '.format(tt + 1)) - f.write('\n') - if self.ndb > 0: - t = self.ldn.array - for tt in t: - f.write('{} '.format(tt + 1)) - f.write('\n') - - # write dataset 4 - if self.ndb > 0: - for k in range(self.ndb): - f.write(self.rnb[k].get_file_entry()) - - # write dataset 5 to 8 - if self.nndb > 0: - for k in range(self.nndb): - f.write(self.hc[k].get_file_entry()) - f.write(self.sfe[k].get_file_entry()) - f.write(self.sfv[k].get_file_entry()) - f.write(self.com[k].get_file_entry()) - - # write dataset 9 - if self.ndb > 0: - for k in range(self.nmz): - line = '{:15.6g} {:15.6g} {:15.6g}'.format(self.dp[k, 0], - self.dp[k, 1], - self.dp[k, 2]) + \ - ' #material zone {} data\n'.format(k + 1) - f.write(line) - # write dataset 10 to 14 - if self.ndb > 0: - for k in range(self.ndb): - f.write(self.dstart[k].get_file_entry()) - f.write(self.dhc[k].get_file_entry()) - f.write(self.dcom[k].get_file_entry()) - f.write(self.dz[k].get_file_entry()) - f.write(self.nz[k].get_file_entry()) - - # write dataset 15 and 16 - if self.isuboc > 0: - # dataset 15 - for i in self.ids15: - f.write('{} '.format(i)) - f.write(' #dataset 15\n') - - # dataset 16 - for k in range(self.isuboc): - t = self.ids16[k, :] - t[0:4] += 1 - for i in t: - f.write('{} '.format(i)) - f.write(' #dataset 16 isuboc {}\n'.format(k + 1)) - - # close sub file - f.close() - - @staticmethod - def load(f, model, ext_unit_dict=None): - """ - Load an existing package. - - Parameters - ---------- - f : filename or file handle - File to load. - model : model object - The model object (of type :class:`flopy.modflow.mf.Modflow`) to - which this package will be added. - ext_unit_dict : dictionary, optional - If the arrays in the file are specified using EXTERNAL, - or older style array control records, then `f` should be a file - handle. In this case ext_unit_dict is required, which can be - constructed using the function - :class:`flopy.utils.mfreadnam.parsenamefile`. - - Returns - ------- - sub : ModflowSub object - - Examples - -------- - - >>> import flopy - >>> m = flopy.modflow.Modflow() - >>> sub = flopy.modflow.ModflowSub.load('test.sub', m) - - """ - - if model.verbose: - sys.stdout.write('loading sub package file...\n') - - openfile = not hasattr(f, 'read') - if openfile: - filename = f - f = open(filename, 'r') - - # dataset 0 -- header - while True: - line = f.readline() - if line[0] != '#': - break - # determine problem dimensions - nrow, ncol, nlay, nper = model.get_nrow_ncol_nlay_nper() - - # read dataset 1 - if model.verbose: - sys.stdout.write(' loading sub dataset 1\n') - t = line.strip().split() - ipakcb, isuboc, nndb, ndb, nmz, nn = int(t[0]), int(t[1]), int(t[2]), \ - int(t[3]), int(t[4]), int(t[5]) - ac1, ac2 = float(t[6]), float(t[7]) - itmin, idsave, idrest = int(t[8]), int(t[9]), int(t[10]) - - idbit = None - if len(t) > 11: - if isinstance(t[11], (int, float)): - idbit = int(t[11]) - if idbit is None: - if model.verbose: - print(' explicit idbit in file') - - ln = None - if nndb > 0: - if model.verbose: - sys.stdout.write(' loading sub dataset 2\n') - ln = np.empty((nndb), dtype=np.int32) - ln = read1d(f, ln) - 1 - ldn = None - if ndb > 0: - if model.verbose: - sys.stdout.write(' loading sub dataset 3\n') - ldn = np.empty((ndb), dtype=np.int32) - ldn = read1d(f, ldn) - 1 - rnb = None - if ndb > 0: - if model.verbose: - sys.stdout.write(' loading sub dataset 4\n') - rnb = [0] * ndb - for k in range(ndb): - t = Util2d.load(f, model, (nrow, ncol), np.float32, - 'rnb delay bed {}'.format(k + 1), - ext_unit_dict) - rnb[k] = t - hc = None - sfe = None - sfv = None - com = None - if nndb > 0: - hc = [0] * nndb - sfe = [0] * nndb - sfv = [0] * nndb - com = [0] * nndb - for k in range(nndb): - kk = ln[k] + 1 - # hc - if model.verbose: - sys.stdout.write( - ' loading sub dataset 5 for layer {}\n'.format(kk)) - t = Util2d.load(f, model, (nrow, ncol), np.float32, - 'hc layer {}'.format(kk), ext_unit_dict) - hc[k] = t - # sfe - if model.verbose: - sys.stdout.write( - ' loading sub dataset 6 for layer {}\n'.format(kk)) - t = Util2d.load(f, model, (nrow, ncol), np.float32, - 'sfe layer {}'.format(kk), ext_unit_dict) - sfe[k] = t - # sfv - if model.verbose: - sys.stdout.write( - ' loading sub dataset 7 for layer {}\n'.format(kk)) - t = Util2d.load(f, model, (nrow, ncol), np.float32, - 'sfv layer {}'.format(kk), ext_unit_dict) - sfv[k] = t - # com - if model.verbose: - sys.stdout.write( - ' loading sub dataset 8 for layer {}\n'.format(kk)) - t = Util2d.load(f, model, (nrow, ncol), np.float32, - 'com layer {}'.format(kk), ext_unit_dict) - com[k] = t - - # dp - dp = None - if ndb > 0: - dp = np.zeros((nmz, 3), dtype=np.float32) - for k in range(nmz): - if model.verbose: - msg = 2 * ' ' + 'loading sub dataset 9 for material ' + \ - 'zone {}\n'.format(k + 1) - sys.stdout.write(msg) - line = f.readline() - t = line.strip().split() - dp[k, :] = float(t[0]), float(t[1]), float(t[2]) - - dstart = None - dhc = None - dcom = None - dz = None - nz = None - if ndb > 0: - dstart = [0] * ndb - dhc = [0] * ndb - dcom = [0] * ndb - dz = [0] * ndb - nz = [0] * ndb - for k in range(ndb): - kk = ldn[k] + 1 - # dstart - if model.verbose: - msg = 2 * ' ' + 'loading sub dataset 10 for ' + \ - 'layer {}\n'.format(kk) - sys.stdout.write(msg) - t = Util2d.load(f, model, (nrow, ncol), np.float32, - 'dstart layer {}'.format(kk), - ext_unit_dict) - dstart[k] = t - # dhc - if model.verbose: - msg = 2 * ' ' + 'loading sub dataset 11 for ' + \ - 'layer {}\n'.format(kk) - sys.stdout.write(msg) - t = Util2d.load(f, model, (nrow, ncol), np.float32, - 'dhc layer {}'.format(kk), - ext_unit_dict) - dhc[k] = t - # dcom - if model.verbose: - msg = 2 * ' ' + 'loading sub dataset 12 for ' + \ - 'layer {}\n'.format(kk) - sys.stdout.write(msg) - t = Util2d.load(f, model, (nrow, ncol), np.float32, - 'dcom layer {}'.format(kk), - ext_unit_dict) - dcom[k] = t - # dz - if model.verbose: - msg = 2 * ' ' + 'loading sub dataset 13 for ' + \ - 'layer {}\n'.format(kk) - sys.stdout.write(msg) - t = Util2d.load(f, model, (nrow, ncol), np.float32, - 'dz layer {}'.format(kk), - ext_unit_dict) - dz[k] = t - # nz - if model.verbose: - msg = 2 * ' ' + 'loading sub dataset 14 for ' + \ - 'layer {}\n'.format(kk) - sys.stdout.write(msg) - t = Util2d.load(f, model, (nrow, ncol), np.int32, - 'nz layer {}'.format(kk), - ext_unit_dict) - nz[k] = t - - ids15 = None - ids16 = None - if isuboc > 0: - # dataset 15 - if model.verbose: - msg = 2 * ' ' + 'loading sub dataset 15 for ' + \ - 'layer {}\n'.format(kk) - sys.stdout.write(msg) - ids15 = np.empty(12, dtype=np.int32) - ids15 = read1d(f, ids15) - # dataset 16 - ids16 = [0] * isuboc - for k in range(isuboc): - if model.verbose: - msg = 2 * ' ' + 'loading sub dataset 16 for ' + \ - 'isuboc {}\n'.format(k + 1) - sys.stdout.write(msg) - t = np.empty(17, dtype=np.int32) - t = read1d(f, t) - t[0:4] -= 1 - ids16[k] = t - - if openfile: - f.close() - - model.add_pop_key_list(2051) - - # determine specified unit number - unitnumber = None - filenames = [None for x in range(9)] - if ext_unit_dict is not None: - unitnumber, filenames[0] = \ - model.get_ext_dict_attr(ext_unit_dict, - filetype=ModflowSub.ftype()) - if ipakcb > 0: - iu, filenames[1] = \ - model.get_ext_dict_attr(ext_unit_dict, unit=ipakcb) - - if idsave > 0: - iu, filenames[2] = \ - model.get_ext_dict_attr(ext_unit_dict, unit=idsave) - - if isuboc > 0: - ipos = 3 - for k in range(1, 12, 2): - unit = ids15[k] - if unit > 0: - iu, filenames[ipos] = \ - model.get_ext_dict_attr(ext_unit_dict, unit=unit) - model.add_pop_key_list(unit) - ipos += 1 - - # create sub instance - sub = ModflowSub(model, ipakcb=ipakcb, isuboc=isuboc, idsave=idsave, - idrest=idrest, idbit=idbit, - nndb=nndb, ndb=ndb, nmz=nmz, nn=nn, ac1=ac1, ac2=ac2, - itmin=itmin, - ln=ln, ldn=ldn, rnb=rnb, - hc=hc, sfe=sfe, sfv=sfv, com=com, dp=dp, - dstart=dstart, dhc=dhc, dcom=dcom, dz=dz, nz=nz, - ids15=ids15, ids16=ids16, unitnumber=unitnumber, - filenames=filenames) - # return sub instance - return sub - - @staticmethod - def ftype(): - return 'SUB' - - @staticmethod - def defaultunit(): - return 32 +""" +mfsub module. Contains the ModflowSub class. Note that the user can access +the ModflowSub class as `flopy.modflow.ModflowSub`. + +Additional information for this MODFLOW package can be found at the `Online +MODFLOW Guide +`_. + +""" +import sys + +import numpy as np + +from ..pakbase import Package +from ..utils import Util2d, Util3d, read1d + + +class ModflowSub(Package): + """ + MODFLOW SUB Package Class. + + Parameters + ---------- + model : model object + The model object (of type :class:`flopy.modflow.mf.Modflow`) to which + this package will be added. + ipakcb : int + A flag that is used to determine if cell-by-cell budget data should be + saved. If ipakcb is non-zero cell-by-cell budget data will be saved. + (default is 0). + isuboc : int + isuboc is a flag used to control output of information generated by the + SUB Package. (default is 0). + idsave : int + idsave is a flag and a unit number on which restart records for delay + interbeds will be saved at the end of the simulation. (default is 0). + idrest : int + idrest is a flag and a unit number on which restart records for delay + interbeds will be read in at the start of the simulation (default is 0). + idbit : int + idrest is an optional flag that defines if iteration will be used for + the delay bed solution of heads. If idbit is 0 or less than 0, + iteration will not be used (this is is identical to the approach used + in MODFLOW-2005 versions less than 1.13. if idbit is greater than + 0, iteration of the delay bed solution will continue until convergence + is achieved. (default is 0). + nndb : int + nndb is the number of systems of no-delay interbeds. (default is 1). + ndb : int + ndb is the number of systems of delay interbeds. (default is 1). + nmz : int + nmz is the number of material zones that are needed to define the + hydraulic properties of systems of delay interbeds. Each material zone + is defined by a combination of vertical hydraulic conductivity, elastic + specific storage, and inelastic specific storage. (default is 1). + nn : int + nn is the number of nodes used to discretize the half space to + approximate the head distributions in systems of delay interbeds. + (default is 20). + ac1 : float + ac1 is an acceleration parameter. This parameter is used to predict + the aquifer head at the interbed boundaries on the basis of the head + change computed for the previous iteration. A value of 0.0 results in + the use of the aquifer head at the previous iteration. Limited + experience indicates that optimum values may range from 0.0 to 0.6. + (default is 0). + ac2 : float + ac2 is an acceleration parameter. This acceleration parameter is a + multiplier for the head changes to compute the head at the new + iteration. Values are normally between 1.0 and 2.0, but the optimum is + probably closer to 1.0 than to 2.0. However this parameter also can be + used to help convergence of the iterative solution by using values + between 0 and 1. (default is 1.0). + itmin : int + ITMIN is the minimum number of iterations for which one-dimensional + equations will be solved for flow in interbeds when the Strongly + Implicit Procedure (SIP) is used to solve the ground-water flow + equations. If the current iteration level is greater than ITMIN and + the SIP convergence criterion for head closure (HCLOSE) is met at a + particular cell, the one-dimensional equations for that cell will not + be solved. The previous solution will be used. The value of ITMIN is + not used if a solver other than SIP is used to solve the ground-water + flow equations. (default is 5). + ln : int or array of ints (nndb) + ln is a one-dimensional array specifying the model layer assignments + for each system of no-delay interbeds. (default is 0). + ldn : int or array of ints (ndb) + ldn is a one-dimensional array specifying the model layer assignments + for each system of delay interbeds.(default is 0). + rnb : float or array of floats (ndb, nrow, ncol) + rnb is an array specifying the factor nequiv at each cell for each + system of delay interbeds. The array also is used to define the areal + extent of each system of interbeds. For cells beyond the areal extent + of the system of interbeds, enter a number less than 1.0 in the + corresponding element of this array. (default is 1). + hc : float or array of floats (nndb, nrow, ncol) + hc is an array specifying the preconsolidation head or preconsolidation + stress in terms of head in the aquifer for systems of no-delay + interbeds. For any model cells in which specified HC is greater than + the corresponding value of starting head, the value of HC will be set + to that of starting head. (default is 100000). + sfe : float or array of floats (nndb, nrow, ncol) + sfe is an array specifying the dimensionless elastic skeletal storage + coefficient for systems of no-delay interbeds. (default is 1.e-4). + sfv : float or array of floats (nndb, nrow, ncol) + sfv is an array specifying the dimensionless inelastic skeletal storage + coefficient for systems of no-delay interbeds. (default is 1.e-3). + com : float or array of floats (nndb, nrow, ncol) + com is an array specifying the starting compaction in each system of + no-delay interbeds. Compaction values computed by the package are added + to values in this array so that printed or stored values of compaction + and land subsidence may include previous components. Values in this + array do not affect calculations of storage changes or resulting + compaction. For simulations in which output values are to reflect + compaction and subsidence since the start of the simulation, enter zero + values for all elements of this array. (default is 0). + dp : list or array of floats (nmz, 3) + Data item includes nmz records, each with a value of vertical hydraulic + conductivity, elastic specific storage, and inelastic specific storage. + (default is [1.e-6, 6.e-6, 6.e-4]). + dstart : float or array of floats (ndb, nrow, ncol) + dstart is an array specifying starting head in interbeds for systems of + delay interbeds. For a particular location in a system of interbeds, + the starting head is applied to every node in the string of nodes that + approximates flow in half of a doubly draining interbed. + (default is 1). + dhc : float or array of floats (ndb, nrow, ncol) + dhc is an array specifying the starting preconsolidation head in + interbeds for systems of delay interbeds. For a particular location in + a system of interbeds, the starting preconsolidation head is applied to + every node in the string of nodes that approximates flow in half of a + doubly draining interbed. For any location at which specified starting + preconsolidation head is greater than the corresponding value of the + starting head, Dstart, the value of the starting preconsolidation head + will be set to that of the starting head. (default is 100000). + dcom : float or array of floats (ndb, nrow, ncol) + dcom is an array specifying the starting compaction in each system of + delay interbeds. Compaction values computed by the package are added to + values in this array so that printed or stored values of compaction and + land subsidence may include previous components. Values in this array + do not affect calculations of storage changes or resulting compaction. + For simulations in which output values are to reflect compaction + and subsidence since the start of the simulation, enter zero values + for all elements of this array. (default is 0). + dz : float or array of floats (ndb, nrow, ncol) + dz is an array specifying the equivalent thickness for a system of + delay interbeds. (default is 1). + nz : int or array of ints (ndb, nrow, ncol) + nz is an array specifying the material zone numbers for systems of + delay interbeds. The zone number for each location in the model grid + selects the hydraulic conductivity, elastic specific storage, and + inelastic specific storage of the interbeds. (default is 1). + ids15 : list or array of ints (12) + Format codes and unit numbers for subsidence, compaction by model + layer, compaction by interbed system, vertical displacement, no-delay + preconsolidation, and delay preconsolidation will be printed. If ids15 + is None and isuboc>0 then print code 0 will be used for all data which + is output to the binary subsidence output file (unit=1051). The 12 + entries in ids15 correspond to ifm1, iun1, ifm2, iun2, ifm3, iun3, + ifm4, iun4, ifm5, iun5, ifm6, and iun6 variables. (default is None). + ids16 : list or array of ints (isuboc, 17) + Stress period and time step range and print and save flags used to + control printing and saving of information generated by the SUB Package + during program execution. Each row of ids16 corresponds to isp1, isp2, + its1, its2, ifl1, ifl2, ifl3, ifl4, ifl5, ifl6, ifl7, ifl8, ifl9, + ifl10, ifl11, ifl12, and ifl13 variables for isuboc entries. isp1, + isp2, its1, and its2 are stress period and time step ranges. ifl1 and + ifl2 control subsidence printing and saving. ifl3 and ifl4 control + compaction by model layer printing and saving. ifl5 and ifl6 control + compaction by interbed system printing and saving. ifl7 and ifl8 + control vertical displacement printing and saving. ifl9 and ifl10 + control critical head for no-delay interbeds printing and saving. + ifl11 and ifl12 control critical head for delay interbeds printing + and saving. ifl13 controls volumetric budget for delay interbeds + printing. If ids16 is None and isuboc>0 then all available subsidence + output will be printed and saved to the binary subsidence output file + (unit=1051). (default is None). + unitnumber : int + File unit number (default is None). + filenames : str or list of str + Filenames to use for the package and the output files. If + filenames=None the package name will be created using the model name + and package extension and the cbc output name and other sub output + files will be created using the model name and .cbc and swt output + extensions (for example, modflowtest.cbc), if ipakcbc and other + sub output files (dataset 15) are numbers greater than zero. + If a single string is passed the package name will be set to the + string and other sub output files will be set to the model name with + the appropriate output file extensions. To define the names for all + package files (input and output) the length of the list of strings + should be 9. Default is None. + + Attributes + ---------- + + Methods + ------- + + See Also + -------- + + Notes + ----- + Parameters are supported in Flopy only when reading in existing models. + Parameter values are converted to native values in Flopy and the + connection to "parameters" is thus nonexistent. Parameters are not + supported in the SUB Package. + + Examples + -------- + + >>> import flopy + >>> m = flopy.modflow.Modflow() + >>> sub = flopy.modflow.ModflowSub(m) + + """ + + def __init__(self, model, ipakcb=None, isuboc=0, idsave=None, idrest=None, + idbit=None, + nndb=1, ndb=1, nmz=1, nn=20, ac1=0., ac2=1.0, itmin=5, + ln=0, ldn=0, rnb=1, + hc=100000., sfe=1.e-4, sfv=1.e-3, com=0., + dp=[[1.e-6, 6.e-6, 6.e-4]], + dstart=1., dhc=100000., dcom=0., dz=1., nz=1, + ids15=None, ids16=None, + extension='sub', unitnumber=None, + filenames=None): + """ + Package constructor. + + """ + # set default unit number of one is not specified + if unitnumber is None: + unitnumber = ModflowSub.defaultunit() + + # set filenames + if filenames is None: + filenames = [None for x in range(9)] + elif isinstance(filenames, str): + filenames = [filenames] + [None for x in range(8)] + elif isinstance(filenames, list): + if len(filenames) < 9: + n = 9 - len(filenames) + 1 + filenames = filenames + [None for x in range(n)] + + # update external file information with cbc output, if necessary + if ipakcb is not None: + fname = filenames[1] + model.add_output_file(ipakcb, fname=fname, + package=ModflowSub.ftype()) + else: + ipakcb = 0 + + if idsave is not None: + fname = filenames[2] + model.add_output_file(idsave, fname=fname, extension='rst', + package=ModflowSub.ftype()) + else: + idsave = 0 + + if idrest is None: + idrest = 0 + + item15_extensions = ["subsidence.hds", "total_comp.hds", + "inter_comp.hds", "vert_disp.hds", + "nodelay_precon.hds", "delay_precon.hds"] + item15_units = [2052 + i for i in range(len(item15_extensions))] + + if isuboc > 0: + idx = 0 + for k in range(1, 12, 2): + ext = item15_extensions[idx] + if ids15 is None: + iu = item15_units[idx] + else: + iu = ids15[k] + fname = filenames[idx+3] + model.add_output_file(iu, fname=fname, extension=ext, + package=ModflowSub.ftype()) + idx += 1 + + extensions = [extension] + name = [ModflowSub.ftype()] + units = [unitnumber] + extra = [''] + + # set package name + fname = [filenames[0]] + + # Call ancestor's init to set self.parent, extension, name and + # unit number + Package.__init__(self, model, extension=extensions, name=name, + unit_number=units, extra=extra, filenames=fname) + + nrow, ncol, nlay, nper = self.parent.nrow_ncol_nlay_nper + + self.heading = '# {} package for '.format(self.name[0]) + \ + ' {}, '.format(model.version_types[model.version]) + \ + 'generated by Flopy.' + self.url = 'sub.htm' + + self.ipakcb = ipakcb + self.isuboc = isuboc + self.idsave = idsave + self.idrest = idrest + self.idbit = idbit + self.nndb = nndb + self.ndb = ndb + self.nmz = nmz + self.nn = nn + self.ac1 = ac1 + self.ac2 = ac2 + self.itmin = itmin + # no-delay bed data + self.ln = None + self.hc = None + self.sfe = None + self.sfv = None + if nndb > 0: + self.ln = Util2d(model, (nndb,), np.int32, ln, name='ln') + self.hc = Util3d(model, (nndb, nrow, ncol), np.float32, hc, + name='hc', + locat=self.unit_number[0]) + self.sfe = Util3d(model, (nndb, nrow, ncol), np.float32, sfe, + name='sfe', + locat=self.unit_number[0]) + self.sfv = Util3d(model, (nndb, nrow, ncol), np.float32, sfv, + name='sfv', + locat=self.unit_number[0]) + self.com = Util3d(model, (nndb, nrow, ncol), np.float32, com, + name='com', + locat=self.unit_number[0]) + # delay bed data + self.ldn = None + self.rnb = None + self.dstart = None + self.dhc = None + self.dz = None + self.nz = None + if ndb > 0: + self.ldn = Util2d(model, (ndb,), np.int32, ldn, name='ldn') + self.rnb = Util3d(model, (ndb, nrow, ncol), np.float32, rnb, + name='rnb', + locat=self.unit_number[0]) + self.dstart = Util3d(model, (ndb, nrow, ncol), np.float32, dstart, + name='dstart', + locat=self.unit_number[0]) + self.dhc = Util3d(model, (ndb, nrow, ncol), np.float32, dhc, + name='dhc', + locat=self.unit_number[0]) + self.dcom = Util3d(model, (ndb, nrow, ncol), np.float32, dcom, + name='dcom', + locat=self.unit_number[0]) + self.dz = Util3d(model, (ndb, nrow, ncol), np.float32, dz, + name='dz', + locat=self.unit_number[0]) + self.nz = Util3d(model, (ndb, nrow, ncol), np.int32, nz, name='nz', + locat=self.unit_number[0]) + # material zone data + if isinstance(dp, list): + dp = np.array(dp) + self.dp = dp + + # output data + if isuboc > 0: + if ids15 is None: + ids15 = np.zeros(12, dtype=np.int32) + iu = 0 + for i in range(1, 12, 2): + ids15[i] = item15_units[iu] + iu += 1 + self.ids15 = ids15 + else: + if isinstance(ids15, list): + ids15 = np.array(ids15) + self.ids15 = ids15 + + if ids16 is None: + self.isuboc = 1 + # save and print everything + ids16 = np.ones((1, 17), dtype=np.int32) + ids16[0, 0] = 0 + ids16[0, 1] = nper - 1 + ids16[0, 2] = 0 + ids16[0, 3] = 9999 + else: + if isinstance(ids16, list): + ids16 = np.array(ids16) + if len(ids16.shape) == 1: + ids16 = np.reshape(ids16, (1, ids16.shape[0])) + self.ids16 = ids16 + + # add package to model + self.parent.add_package(self) + + def write_file(self, check=False, f=None): + """ + Write the package file. + + Returns + ------- + None + + """ + if check: + print("warning: check not implemented for sub") + nrow, ncol, nlay, nper = self.parent.nrow_ncol_nlay_nper + # Open file for writing + if f is None: + f = open(self.fn_path, 'w') + # First line: heading + f.write('{}\n'.format(self.heading)) + # write dataset 1 + f.write( + '{} {} {} {} {} {} '.format(self.ipakcb, self.isuboc, self.nndb, + self.ndb, self.nmz, self.nn)) + + f.write('{} {} {} {} {}'.format(self.ac1, self.ac2, + self.itmin, self.idsave, + self.idrest)) + line = '' + if self.idbit is not None: + line += ' {}'.format(self.idbit) + line += '\n' + f.write(line) + + if self.nndb > 0: + t = self.ln.array + for tt in t: + f.write('{} '.format(tt + 1)) + f.write('\n') + if self.ndb > 0: + t = self.ldn.array + for tt in t: + f.write('{} '.format(tt + 1)) + f.write('\n') + + # write dataset 4 + if self.ndb > 0: + for k in range(self.ndb): + f.write(self.rnb[k].get_file_entry()) + + # write dataset 5 to 8 + if self.nndb > 0: + for k in range(self.nndb): + f.write(self.hc[k].get_file_entry()) + f.write(self.sfe[k].get_file_entry()) + f.write(self.sfv[k].get_file_entry()) + f.write(self.com[k].get_file_entry()) + + # write dataset 9 + if self.ndb > 0: + for k in range(self.nmz): + line = '{:15.6g} {:15.6g} {:15.6g}'.format(self.dp[k, 0], + self.dp[k, 1], + self.dp[k, 2]) + \ + ' #material zone {} data\n'.format(k + 1) + f.write(line) + # write dataset 10 to 14 + if self.ndb > 0: + for k in range(self.ndb): + f.write(self.dstart[k].get_file_entry()) + f.write(self.dhc[k].get_file_entry()) + f.write(self.dcom[k].get_file_entry()) + f.write(self.dz[k].get_file_entry()) + f.write(self.nz[k].get_file_entry()) + + # write dataset 15 and 16 + if self.isuboc > 0: + # dataset 15 + for i in self.ids15: + f.write('{} '.format(i)) + f.write(' #dataset 15\n') + + # dataset 16 + for k in range(self.isuboc): + t = self.ids16[k, :] + t[0:4] += 1 + for i in t: + f.write('{} '.format(i)) + f.write(' #dataset 16 isuboc {}\n'.format(k + 1)) + + # close sub file + f.close() + + @staticmethod + def load(f, model, ext_unit_dict=None): + """ + Load an existing package. + + Parameters + ---------- + f : filename or file handle + File to load. + model : model object + The model object (of type :class:`flopy.modflow.mf.Modflow`) to + which this package will be added. + ext_unit_dict : dictionary, optional + If the arrays in the file are specified using EXTERNAL, + or older style array control records, then `f` should be a file + handle. In this case ext_unit_dict is required, which can be + constructed using the function + :class:`flopy.utils.mfreadnam.parsenamefile`. + + Returns + ------- + sub : ModflowSub object + + Examples + -------- + + >>> import flopy + >>> m = flopy.modflow.Modflow() + >>> sub = flopy.modflow.ModflowSub.load('test.sub', m) + + """ + + if model.verbose: + sys.stdout.write('loading sub package file...\n') + + openfile = not hasattr(f, 'read') + if openfile: + filename = f + f = open(filename, 'r') + + # dataset 0 -- header + while True: + line = f.readline() + if line[0] != '#': + break + # determine problem dimensions + nrow, ncol, nlay, nper = model.get_nrow_ncol_nlay_nper() + + # read dataset 1 + if model.verbose: + sys.stdout.write(' loading sub dataset 1\n') + t = line.strip().split() + ipakcb, isuboc, nndb, ndb, nmz, nn = int(t[0]), int(t[1]), int(t[2]), \ + int(t[3]), int(t[4]), int(t[5]) + ac1, ac2 = float(t[6]), float(t[7]) + itmin, idsave, idrest = int(t[8]), int(t[9]), int(t[10]) + + idbit = None + if len(t) > 11: + if isinstance(t[11], (int, float)): + idbit = int(t[11]) + if idbit is None: + if model.verbose: + print(' explicit idbit in file') + + ln = None + if nndb > 0: + if model.verbose: + sys.stdout.write(' loading sub dataset 2\n') + ln = np.empty((nndb), dtype=np.int32) + ln = read1d(f, ln) - 1 + ldn = None + if ndb > 0: + if model.verbose: + sys.stdout.write(' loading sub dataset 3\n') + ldn = np.empty((ndb), dtype=np.int32) + ldn = read1d(f, ldn) - 1 + rnb = None + if ndb > 0: + if model.verbose: + sys.stdout.write(' loading sub dataset 4\n') + rnb = [0] * ndb + for k in range(ndb): + t = Util2d.load(f, model, (nrow, ncol), np.float32, + 'rnb delay bed {}'.format(k + 1), + ext_unit_dict) + rnb[k] = t + hc = None + sfe = None + sfv = None + com = None + if nndb > 0: + hc = [0] * nndb + sfe = [0] * nndb + sfv = [0] * nndb + com = [0] * nndb + for k in range(nndb): + kk = ln[k] + 1 + # hc + if model.verbose: + sys.stdout.write( + ' loading sub dataset 5 for layer {}\n'.format(kk)) + t = Util2d.load(f, model, (nrow, ncol), np.float32, + 'hc layer {}'.format(kk), ext_unit_dict) + hc[k] = t + # sfe + if model.verbose: + sys.stdout.write( + ' loading sub dataset 6 for layer {}\n'.format(kk)) + t = Util2d.load(f, model, (nrow, ncol), np.float32, + 'sfe layer {}'.format(kk), ext_unit_dict) + sfe[k] = t + # sfv + if model.verbose: + sys.stdout.write( + ' loading sub dataset 7 for layer {}\n'.format(kk)) + t = Util2d.load(f, model, (nrow, ncol), np.float32, + 'sfv layer {}'.format(kk), ext_unit_dict) + sfv[k] = t + # com + if model.verbose: + sys.stdout.write( + ' loading sub dataset 8 for layer {}\n'.format(kk)) + t = Util2d.load(f, model, (nrow, ncol), np.float32, + 'com layer {}'.format(kk), ext_unit_dict) + com[k] = t + + # dp + dp = None + if ndb > 0: + dp = np.zeros((nmz, 3), dtype=np.float32) + for k in range(nmz): + if model.verbose: + msg = 2 * ' ' + 'loading sub dataset 9 for material ' + \ + 'zone {}\n'.format(k + 1) + sys.stdout.write(msg) + line = f.readline() + t = line.strip().split() + dp[k, :] = float(t[0]), float(t[1]), float(t[2]) + + dstart = None + dhc = None + dcom = None + dz = None + nz = None + if ndb > 0: + dstart = [0] * ndb + dhc = [0] * ndb + dcom = [0] * ndb + dz = [0] * ndb + nz = [0] * ndb + for k in range(ndb): + kk = ldn[k] + 1 + # dstart + if model.verbose: + msg = 2 * ' ' + 'loading sub dataset 10 for ' + \ + 'layer {}\n'.format(kk) + sys.stdout.write(msg) + t = Util2d.load(f, model, (nrow, ncol), np.float32, + 'dstart layer {}'.format(kk), + ext_unit_dict) + dstart[k] = t + # dhc + if model.verbose: + msg = 2 * ' ' + 'loading sub dataset 11 for ' + \ + 'layer {}\n'.format(kk) + sys.stdout.write(msg) + t = Util2d.load(f, model, (nrow, ncol), np.float32, + 'dhc layer {}'.format(kk), + ext_unit_dict) + dhc[k] = t + # dcom + if model.verbose: + msg = 2 * ' ' + 'loading sub dataset 12 for ' + \ + 'layer {}\n'.format(kk) + sys.stdout.write(msg) + t = Util2d.load(f, model, (nrow, ncol), np.float32, + 'dcom layer {}'.format(kk), + ext_unit_dict) + dcom[k] = t + # dz + if model.verbose: + msg = 2 * ' ' + 'loading sub dataset 13 for ' + \ + 'layer {}\n'.format(kk) + sys.stdout.write(msg) + t = Util2d.load(f, model, (nrow, ncol), np.float32, + 'dz layer {}'.format(kk), + ext_unit_dict) + dz[k] = t + # nz + if model.verbose: + msg = 2 * ' ' + 'loading sub dataset 14 for ' + \ + 'layer {}\n'.format(kk) + sys.stdout.write(msg) + t = Util2d.load(f, model, (nrow, ncol), np.int32, + 'nz layer {}'.format(kk), + ext_unit_dict) + nz[k] = t + + ids15 = None + ids16 = None + if isuboc > 0: + # dataset 15 + if model.verbose: + msg = 2 * ' ' + 'loading sub dataset 15 for ' + \ + 'layer {}\n'.format(kk) + sys.stdout.write(msg) + ids15 = np.empty(12, dtype=np.int32) + ids15 = read1d(f, ids15) + # dataset 16 + ids16 = [0] * isuboc + for k in range(isuboc): + if model.verbose: + msg = 2 * ' ' + 'loading sub dataset 16 for ' + \ + 'isuboc {}\n'.format(k + 1) + sys.stdout.write(msg) + t = np.empty(17, dtype=np.int32) + t = read1d(f, t) + t[0:4] -= 1 + ids16[k] = t + + if openfile: + f.close() + + model.add_pop_key_list(2051) + + # determine specified unit number + unitnumber = None + filenames = [None for x in range(9)] + if ext_unit_dict is not None: + unitnumber, filenames[0] = \ + model.get_ext_dict_attr(ext_unit_dict, + filetype=ModflowSub.ftype()) + if ipakcb > 0: + iu, filenames[1] = \ + model.get_ext_dict_attr(ext_unit_dict, unit=ipakcb) + + if idsave > 0: + iu, filenames[2] = \ + model.get_ext_dict_attr(ext_unit_dict, unit=idsave) + + if isuboc > 0: + ipos = 3 + for k in range(1, 12, 2): + unit = ids15[k] + if unit > 0: + iu, filenames[ipos] = \ + model.get_ext_dict_attr(ext_unit_dict, unit=unit) + model.add_pop_key_list(unit) + ipos += 1 + + # create sub instance + sub = ModflowSub(model, ipakcb=ipakcb, isuboc=isuboc, idsave=idsave, + idrest=idrest, idbit=idbit, + nndb=nndb, ndb=ndb, nmz=nmz, nn=nn, ac1=ac1, ac2=ac2, + itmin=itmin, + ln=ln, ldn=ldn, rnb=rnb, + hc=hc, sfe=sfe, sfv=sfv, com=com, dp=dp, + dstart=dstart, dhc=dhc, dcom=dcom, dz=dz, nz=nz, + ids15=ids15, ids16=ids16, unitnumber=unitnumber, + filenames=filenames) + # return sub instance + return sub + + @staticmethod + def ftype(): + return 'SUB' + + @staticmethod + def defaultunit(): + return 32 diff --git a/flopy/modflow/mfswi2.py b/flopy/modflow/mfswi2.py index c7977d8dc7..9ab4974c48 100644 --- a/flopy/modflow/mfswi2.py +++ b/flopy/modflow/mfswi2.py @@ -1,716 +1,716 @@ -"""mfswi2 module. Contains the ModflowSwi2 class. Note that the user can -access the ModflowSwi2 class as `flopy.modflow.ModflowSwi2`. - -Additional information for this MODFLOW package can be found at the `Online -MODFLOW Guide -`_. -""" -import sys - -import numpy as np - -from ..pakbase import Package -from ..utils import Util2d, Util3d - - -class ModflowSwi2(Package): - """MODFLOW SWI2 Package Class. - - Parameters - ---------- - model : model object - The model object (of type :class:`flopy.modflow.mf.Modflow`) to which - this package will be added. - nsrf : int - number of active surfaces (interfaces). This equals the number of zones - minus one. (default is 1). - istrat : int - flag indicating the density distribution. (default is 1). - iswizt : int - unit number for zeta output. (default is None). - ipakcb : int - A flag that is used to determine if cell-by-cell budget data should be - saved. If ipakcb is non-zero cell-by-cell budget data will be saved. - (default is None). - iswiobs : int - flag and unit number SWI2 observation output. (default is 0). - options : list of strings - Package options. If 'adaptive' is one of the options adaptive SWI2 time - steps will be used. (default is None). - nsolver : int - DE4 solver is used if nsolver=1. PCG solver is used if nsolver=2. - (default is 1). - iprsol : int - solver print out interval. (default is 0). - mutsol : int - If MUTSOL = 0, tables of maximum head change and residual will be - printed each iteration. - If MUTSOL = 1, only the total number of iterations will be printed. - If MUTSOL = 2, no information will be printed. - If MUTSOL = 3, information will only be printed if convergence fails. - (default is 3). - solver2parameters : dict - only used if nsolver = 2 - - mxiter : int - maximum number of outer iterations. (default is 100) - - iter1 : int - maximum number of inner iterations. (default is 20) - - npcond : int - flag used to select the matrix conditioning method. (default is 1). - specify NPCOND = 1 for Modified Incomplete Cholesky. - specify NPCOND = 2 for Polynomial. - - zclose : float - is the ZETA change criterion for convergence. (default is 1e-3). - - rclose : float - is the residual criterion for convergence. (default is 1e-4) - - relax : float - is the relaxation parameter used with NPCOND = 1. (default is 1.0) - - nbpol : int - is only used when NPCOND = 2 to indicate whether the estimate of - the upper bound on the maximum eigenvalue is 2.0, or whether the - estimate will be calculated. NBPOL = 2 is used to specify the - value is 2.0; for any other value of NBPOL, the estimate is - calculated. Convergence is generally insensitive to this - parameter. (default is 2). - - damp : float - is the steady-state damping factor. (default is 1.) - - dampt : float - is the transient damping factor. (default is 1.) - - toeslope : float - Maximum slope of toe cells. (default is 0.05) - tipslope : float - Maximum slope of tip cells. (default is 0.05) - alpha : float - fraction of threshold used to move the tip and toe to adjacent empty - cells when the slope exceeds user-specified TOESLOPE and TIPSLOPE - values. (default is None) - beta : float - Fraction of threshold used to move the toe to adjacent non-empty cells - when the surface is below a minimum value defined by the user-specified - TOESLOPE value. (default is 0.1). - nadptmx : int - only used if adaptive is True. Maximum number of SWI2 time steps per - MODFLOW time step. (default is 1). - nadptmn : int - only used if adaptive is True. Minimum number of SWI2 time steps per - MODFLOW time step. (default is 1). - adptfct : float - is the factor used to evaluate tip and toe thicknesses and control the - number of SWI2 time steps per MODFLOW time step. When the maximum tip - or toe thickness exceeds the product of TOESLOPE or TIPSLOPE the cell - size and ADPTFCT, the number of SWI2 time steps are increased to a - value less than or equal to NADPT. When the maximum tip or toe - thickness is less than the product of TOESLOPE or TIPSLOPE the cell - size and ADPTFCT, the number of SWI2 time steps is decreased in the - next MODFLOW time step to a value greater than or equal to 1. ADPTFCT - must be greater than 0.0 and is reset to 1.0 if NADPTMX is equal to - NADPTMN. (default is 1.0). - nu : array of floats - if istart = 1, density of each zone (nsrf + 1 values). if istrat = 0, - density along top of layer, each surface, and bottom of layer - (nsrf + 2 values). (default is 0.025) - zeta : list of floats or list of array of floats [(nlay, nrow, ncol), - (nlay, nrow, ncol)] initial elevations of the active surfaces. The - list should contain an entry for each surface and be of size nsrf. - (default is [0.]) - ssz : float or array of floats (nlay, nrow, ncol) - effective porosity. (default is 0.25) - isource : integer or array of integers (nlay, nrow, ncol) - Source type of any external sources or sinks, specified with any - outside package (i.e. WEL Package, RCH Package, GHB Package). - (default is 0). - - If ISOURCE > 0 sources and sinks have the same fluid density as the - zone ISOURCE. If such a zone is not present in the cell, sources and - sinks have the same fluid density as the active zone at the top of - the aquifer. If ISOURCE = 0 sources and sinks have the same fluid - density as the active zone at the top of the aquifer. If ISOURCE < 0 - sources have the same fluid density as the zone with a number equal - to the absolute value of ISOURCE. Sinks have the same fluid density - as the active zone at the top of the aquifer. This option is useful - for the modeling of the ocean bottom where infiltrating water is - salt, yet exfiltrating water is of the same type as the water at the - top of the aquifer. - obsnam : list of strings - names for nobs observations. - obslrc : list of lists - zero-based [layer, row, column] lists for nobs observations. - extension : string - Filename extension (default is 'swi2') - npln : int - Deprecated - use nsrf instead. - unitnumber : int - File unit number (default is None). - filenames : str or list of str - Filenames to use for the package and the zeta, cbc, obs output files. - If filenames=None the package name will be created using the model name - and package extension and the output file names will be created using - the model name and output extensions. If a single string is passed the - package will be set to the string and output names will be created - using the model name and zeta, cbc, and observation extensions. To - define the names for all package files (input and output) the length - of the list of strings should be 4. Default is None. - - Attributes - ---------- - - Methods - ------- - - See Also - -------- - - Notes - ----- - Parameters are supported in Flopy only when reading in existing models. - Parameter values are converted to native values in Flopy and the - connection to "parameters" is thus nonexistent. - - Examples - -------- - - >>> import flopy - >>> m = flopy.modflow.Modflow() - >>> swi2 = flopy.modflow.ModflowSwi2(m) - """ - - def __init__(self, model, nsrf=1, istrat=1, nobs=0, iswizt=None, - ipakcb=None, iswiobs=0, options=None, - nsolver=1, iprsol=0, mutsol=3, - solver2params={'mxiter': 100, 'iter1': 20, 'npcond': 1, - 'zclose': 1e-3, 'rclose': 1e-4, 'relax': 1.0, - 'nbpol': 2, 'damp': 1.0, 'dampt': 1.0}, - toeslope=0.05, tipslope=0.05, alpha=None, beta=0.1, nadptmx=1, - nadptmn=1, adptfct=1.0, nu=0.025, zeta=[0.0], ssz=0.25, - isource=0, obsnam=None, obslrc=None, npln=None, - extension='swi2', unitnumber=None, filenames=None): - """Package constructor.""" - # set default unit number of one is not specified - if unitnumber is None: - unitnumber = ModflowSwi2.defaultunit() - - # set filenames - if filenames is None: - filenames = [None, None, None, None] - elif isinstance(filenames, str): - filenames = [filenames, None, None, None] - elif isinstance(filenames, list): - if len(filenames) < 4: - for idx in range(len(filenames), 4): - filenames.append(None) - - # update external file information with zeta output, if necessary - if iswizt is not None: - fname = filenames[1] - model.add_output_file(iswizt, fname=fname, extension='zta', - package=ModflowSwi2.ftype()) - else: - iswizt = 0 - - # update external file information with swi2 cell-by-cell output, - # if necessary - if ipakcb is not None: - fname = filenames[2] - model.add_output_file(ipakcb, fname=fname, - package=ModflowSwi2.ftype()) - else: - ipakcb = 0 - - # Process observations - if nobs != 0: - print('ModflowSwi2: specification of nobs is deprecated.') - nobs = 0 - if obslrc is not None: - if isinstance(obslrc, list) or isinstance(obslrc, tuple): - obslrc = np.array(obslrc, dtype=np.int32) - if isinstance(obslrc, np.ndarray): - if obslrc.ndim == 1 and obslrc.size == 3: - obslrc = obslrc.reshape((1, 3)) - else: - errmsg = 'ModflowSwi2: obslrc must be a tuple or ' + \ - 'list of tuples.' - raise Exception(errmsg) - nobs = obslrc.shape[0] - - if obsnam is None: - obsnam = [] - for n in range(nobs): - obsnam.append('Obs{:03}'.format(n + 1)) - else: - if not isinstance(obsnam, list): - obsnam = [obsnam] - if len(obsnam) != nobs: - errmsg = 'ModflowSwi2: obsnam must be a list with a ' + \ - 'length of {} not {}.'.format(nobs, len(obsnam)) - raise Exception(errmsg) - - if nobs > 0: - binflag = False - ext = 'zobs.out' - fname = filenames[3] - if iswiobs is not None: - if iswiobs < 0: - binflag = True - ext = 'zobs.bin' - else: - iswiobs = 1053 - - # update external file information with swi2 observation output, - # if necessary - model.add_output_file(iswiobs, fname=fname, binflag=binflag, - extension=ext, package=ModflowSwi2.ftype()) - else: - iswiobs = 0 - - # Fill namefile items - name = [ModflowSwi2.ftype()] - units = [unitnumber] - extra = [''] - - # set package name - fname = [filenames[0]] - - # Call ancestor's init to set self.parent, extension, name and - # unit number - Package.__init__(self, model, extension=extension, name=name, - unit_number=units, extra=extra, filenames=fname) - - nrow, ncol, nlay, nper = self.parent.nrow_ncol_nlay_nper - self.heading = '# {} package for '.format(self.name[0]) + \ - ' {}, '.format(model.version_types[model.version]) + \ - 'generated by Flopy.' - - # options - self.fsssopt, self.adaptive = False, False - if isinstance(options, list): - if len(options) < 1: - self.options = None - else: - self.options = options - for o in self.options: - if o.lower() == 'fsssopt': - self.fsssopt = True - elif o.lower() == 'adaptive': - self.adaptive = True - else: - self.options = None - - if npln is not None: - print('npln keyword is deprecated. use the nsrf keyword') - nsrf = npln - - self.nsrf, self.istrat, self.nobs, self.iswizt, self.iswiobs = nsrf, \ - istrat, \ - nobs, \ - iswizt, \ - iswiobs - # set cbc unit - self.ipakcb = ipakcb - - # set solver flags - self.nsolver, self.iprsol, self.mutsol = nsolver, iprsol, mutsol - - # set solver parameters - self.solver2params = solver2params - # - self.toeslope, self.tipslope, self.alpha, self.beta = toeslope, \ - tipslope, \ - alpha, \ - beta - self.nadptmx, self.nadptmn, self.adptfct = nadptmx, nadptmn, adptfct - - # Create arrays so that they have the correct size - if self.istrat == 1: - self.nu = Util2d(model, (self.nsrf + 1,), np.float32, nu, - name='nu') - else: - self.nu = Util2d(model, (self.nsrf + 2,), np.float32, nu, - name='nu') - self.zeta = [] - for i in range(self.nsrf): - self.zeta.append(Util3d(model, (nlay, nrow, ncol), np.float32, - zeta[i], name='zeta_' + str(i + 1))) - self.ssz = Util3d(model, (nlay, nrow, ncol), np.float32, ssz, - name='ssz') - self.isource = Util3d(model, (nlay, nrow, ncol), np.int32, isource, - name='isource') - # - self.obsnam = obsnam - self.obslrc = obslrc - if nobs != 0: - self.nobs = self.obslrc.shape[0] - # - self.parent.add_package(self) - - def write_file(self, check=True, f=None): - """Write the package file. - - Parameters - ---------- - check : boolean - Check package data for common errors. (default True) - - Returns - ------- - None - """ - nrow, ncol, nlay, nper = self.parent.nrow_ncol_nlay_nper - - # Open file for writing - if f is None: - f = open(self.fn_path, 'w') - - # First line: heading - f.write('{}\n'.format( - self.heading)) # Writing heading not allowed in SWI??? - - # write dataset 1 - f.write('# Dataset 1\n') - f.write( - '{:10d}{:10d}{:10d}{:10d}{:10d}{:10d}'.format(self.nsrf, - self.istrat, - self.nobs, - self.iswizt, - self.ipakcb, - self.iswiobs)) - - # write SWI2 options - if self.options != None: - for o in self.options: - f.write(' {}'.format(o)) - f.write('\n') - - # write dataset 2a - f.write('# Dataset 2a\n') - f.write('{:10d}{:10d}{:10d}\n'.format(self.nsolver, self.iprsol, - self.mutsol)) - - # write dataset 2b - if self.nsolver == 2: - f.write('# Dataset 2b\n') - f.write('{:10d}'.format(self.solver2params['mxiter'])) - f.write('{:10d}'.format(self.solver2params['iter1'])) - f.write('{:10d}'.format(self.solver2params['npcond'])) - f.write('{:14.6g}'.format(self.solver2params['zclose'])) - f.write('{:14.6g}'.format(self.solver2params['rclose'])) - f.write('{:14.6g}'.format(self.solver2params['relax'])) - f.write('{:10d}'.format(self.solver2params['nbpol'])) - f.write('{:14.6g}'.format(self.solver2params['damp'])) - f.write('{:14.6g}\n'.format(self.solver2params['dampt'])) - - # write dataset 3a - f.write('# Dataset 3a\n') - f.write('{:14.6g}{:14.6g}'.format(self.toeslope, self.tipslope)) - if self.alpha is not None: - f.write('{:14.6g}{:14.6g}'.format(self.alpha, self.beta)) - f.write('\n') - - # write dataset 3b - if self.adaptive is True: - f.write('# Dataset 3b\n') - f.write('{:10d}{:10d}{:14.6g}\n'.format(self.nadptmx, - self.nadptmn, - self.adptfct)) - # write dataset 4 - f.write('# Dataset 4\n') - f.write(self.nu.get_file_entry()) - - # write dataset 5 - f.write('# Dataset 5\n') - for isur in range(self.nsrf): - for ilay in range(nlay): - f.write(self.zeta[isur][ilay].get_file_entry()) - - # write dataset 6 - f.write('# Dataset 6\n') - f.write(self.ssz.get_file_entry()) - - # write dataset 7 - f.write('# Dataset 7\n') - f.write(self.isource.get_file_entry()) - - # write dataset 8 - if self.nobs > 0: - f.write('# Dataset 8\n') - for i in range(self.nobs): - # f.write(self.obsnam[i] + 3 * '%10i' % self.obslrc + '\n') - f.write('{} '.format(self.obsnam[i])) - for v in self.obslrc[i, :]: - f.write('{:10d}'.format(v + 1)) - f.write('\n') - - # close swi2 file - f.close() - - @staticmethod - def load(f, model, ext_unit_dict=None): - """Load an existing package. - - Parameters - ---------- - f : filename or file handle - File to load. - model : model object - The model object (of type :class:`flopy.modflow.mf.Modflow`) to - which this package will be added. - ext_unit_dict : dictionary, optional - If the arrays in the file are specified using EXTERNAL, - or older style array control records, then `f` should be a file - handle. In this case ext_unit_dict is required, which can be - constructed using the function - :class:`flopy.utils.mfreadnam.parsenamefile`. - - Returns - ------- - swi2 : ModflowSwi2 object - - Examples - -------- - - >>> import flopy - >>> m = flopy.modflow.Modflow() - >>> swi2 = flopy.modflow.ModflowSwi2.load('test.swi2', m) - """ - - if model.verbose: - sys.stdout.write('loading swi2 package file...\n') - - openfile = not hasattr(f, 'read') - if openfile: - filename = f - f = open(filename, 'r') - - # dataset 0 -- header - while True: - line = f.readline() - if line[0] != '#': - break - # determine problem dimensions - nrow, ncol, nlay, nper = model.get_nrow_ncol_nlay_nper() - - # --read dataset 1 - if model.verbose: - sys.stdout.write(' loading swi2 dataset 1\n') - t = line.strip().split() - nsrf = int(t[0]) - istrat = int(t[1]) - nobs = int(t[2]) - if int(t[3]) > 0: - model.add_pop_key_list(int(t[3])) - iswizt = int(t[3]) - if int(t[4]) > 0: - model.add_pop_key_list(int(t[4])) - ipakcb = int(t[4]) - else: - ipakcb = 0 - iswiobs = 0 - if int(t[5]) > 0: - model.add_pop_key_list(int(t[5])) - iswiobs = int(t[5]) - options = [] - adaptive = False - for idx in range(6, len(t)): - if '#' in t[idx]: - break - options.append(t[idx]) - if 'adaptive' in t[idx].lower(): - adaptive = True - - # read dataset 2a - if model.verbose: - sys.stdout.write(' loading swi2 dataset 2a\n') - while True: - line = f.readline() - if line[0] != '#': - break - t = line.strip().split() - nsolver = int(t[0]) - iprsol = int(t[1]) - mutsol = int(t[2]) - - # read dataset 2b - solver2params = {} - if nsolver == 2: - if model.verbose: - sys.stdout.write(' loading swi2 dataset 2b\n') - while True: - line = f.readline() - if line[0] != '#': - break - t = line.strip().split() - solver2params['mxiter'] = int(t[0]) - solver2params['iter1'] = int(t[1]) - solver2params['npcond'] = int(t[2]) - solver2params['zclose'] = float(t[3]) - solver2params['rclose'] = float(t[4]) - solver2params['relax'] = float(t[5]) - solver2params['nbpol'] = int(t[6]) - solver2params['damp'] = float(t[7]) - solver2params['dampt'] = float(t[8]) - - # read dataset 3a - if model.verbose: - sys.stdout.write(' loading swi2 dataset 3a\n') - while True: - line = f.readline() - if line[0] != '#': - break - t = line.strip().split() - toeslope = float(t[0]) - tipslope = float(t[1]) - alpha = None - beta = 0.1 - if len(t) > 2: - try: - alpha = float(t[2]) - beta = float(t[3]) - except: - if model.verbose: - print(' explicit alpha and beta in file') - - # read dataset 3b - nadptmx, nadptmn, adptfct = None, None, None - if adaptive: - if model.verbose: - sys.stdout.write(' loading swi2 dataset 3b\n') - while True: - line = f.readline() - if line[0] != '#': - break - t = line.strip().split() - nadptmx = int(t[0]) - nadptmn = int(t[1]) - adptfct = float(t[2]) - - # read dataset 4 - if model.verbose: - print(' loading nu...') - if istrat == 1: - nnu = nsrf + 1 - else: - nnu = nsrf + 2 - while True: - ipos = f.tell() - line = f.readline() - if line[0] != '#': - f.seek(ipos) - break - nu = Util2d.load(f, model, (nnu,), np.float32, 'nu', - ext_unit_dict) - - # read dataset 5 - if model.verbose: - print(' loading initial zeta surfaces...') - while True: - ipos = f.tell() - line = f.readline() - if line[0] != '#': - f.seek(ipos) - break - zeta = [] - for n in range(nsrf): - ctxt = 'zeta_surf{:02d}'.format(n + 1) - zeta.append(Util3d.load(f, model, (nlay, nrow, ncol), - np.float32, ctxt, ext_unit_dict)) - - # read dataset 6 - if model.verbose: - print(' loading initial ssz...') - while True: - ipos = f.tell() - line = f.readline() - if line[0] != '#': - f.seek(ipos) - break - ssz = Util3d.load(f, model, (nlay, nrow, ncol), np.float32, - 'ssz', ext_unit_dict) - - # read dataset 7 - if model.verbose: - print(' loading initial isource...') - while True: - ipos = f.tell() - line = f.readline() - if line[0] != '#': - f.seek(ipos) - break - isource = Util3d.load(f, model, (nlay, nrow, ncol), np.int32, - 'isource', ext_unit_dict) - - # read dataset 8 - obsname = [] - obslrc = [] - if nobs > 0: - if model.verbose: - print(' loading observation locations...') - while True: - line = f.readline() - if line[0] != '#': - break - for i in range(nobs): - if i > 0: - try: - line = f.readline() - except: - break - t = line.strip().split() - obsname.append(t[0]) - kk = int(t[1]) - 1 - ii = int(t[2]) - 1 - jj = int(t[3]) - 1 - obslrc.append([kk, ii, jj]) - nobs = len(obsname) - - if openfile: - f.close() - - # determine specified unit number - unitnumber = None - filenames = [None, None, None, None] - if ext_unit_dict is not None: - unitnumber, filenames[0] = \ - model.get_ext_dict_attr(ext_unit_dict, - filetype=ModflowSwi2.ftype()) - if iswizt > 0: - iu, filenames[1] = \ - model.get_ext_dict_attr(ext_unit_dict, unit=iswizt) - if ipakcb > 0: - iu, filenames[2] = \ - model.get_ext_dict_attr(ext_unit_dict, unit=ipakcb) - if abs(iswiobs) > 0: - iu, filenames[3] = \ - model.get_ext_dict_attr(ext_unit_dict, unit=abs(iswiobs)) - - # create swi2 instance - swi2 = ModflowSwi2(model, nsrf=nsrf, istrat=istrat, - iswizt=iswizt, ipakcb=ipakcb, - iswiobs=iswiobs, options=options, - nsolver=nsolver, iprsol=iprsol, mutsol=mutsol, - solver2params=solver2params, - toeslope=toeslope, tipslope=tipslope, alpha=alpha, - beta=beta, - nadptmx=nadptmx, nadptmn=nadptmn, adptfct=adptfct, - nu=nu, zeta=zeta, ssz=ssz, isource=isource, - obsnam=obsname, obslrc=obslrc, - unitnumber=unitnumber, filenames=filenames) - - # return swi2 instance - return swi2 - - @staticmethod - def ftype(): - return 'SWI2' - - @staticmethod - def defaultunit(): - return 29 +"""mfswi2 module. Contains the ModflowSwi2 class. Note that the user can +access the ModflowSwi2 class as `flopy.modflow.ModflowSwi2`. + +Additional information for this MODFLOW package can be found at the `Online +MODFLOW Guide +`_. +""" +import sys + +import numpy as np + +from ..pakbase import Package +from ..utils import Util2d, Util3d + + +class ModflowSwi2(Package): + """MODFLOW SWI2 Package Class. + + Parameters + ---------- + model : model object + The model object (of type :class:`flopy.modflow.mf.Modflow`) to which + this package will be added. + nsrf : int + number of active surfaces (interfaces). This equals the number of zones + minus one. (default is 1). + istrat : int + flag indicating the density distribution. (default is 1). + iswizt : int + unit number for zeta output. (default is None). + ipakcb : int + A flag that is used to determine if cell-by-cell budget data should be + saved. If ipakcb is non-zero cell-by-cell budget data will be saved. + (default is None). + iswiobs : int + flag and unit number SWI2 observation output. (default is 0). + options : list of strings + Package options. If 'adaptive' is one of the options adaptive SWI2 time + steps will be used. (default is None). + nsolver : int + DE4 solver is used if nsolver=1. PCG solver is used if nsolver=2. + (default is 1). + iprsol : int + solver print out interval. (default is 0). + mutsol : int + If MUTSOL = 0, tables of maximum head change and residual will be + printed each iteration. + If MUTSOL = 1, only the total number of iterations will be printed. + If MUTSOL = 2, no information will be printed. + If MUTSOL = 3, information will only be printed if convergence fails. + (default is 3). + solver2parameters : dict + only used if nsolver = 2 + + mxiter : int + maximum number of outer iterations. (default is 100) + + iter1 : int + maximum number of inner iterations. (default is 20) + + npcond : int + flag used to select the matrix conditioning method. (default is 1). + specify NPCOND = 1 for Modified Incomplete Cholesky. + specify NPCOND = 2 for Polynomial. + + zclose : float + is the ZETA change criterion for convergence. (default is 1e-3). + + rclose : float + is the residual criterion for convergence. (default is 1e-4) + + relax : float + is the relaxation parameter used with NPCOND = 1. (default is 1.0) + + nbpol : int + is only used when NPCOND = 2 to indicate whether the estimate of + the upper bound on the maximum eigenvalue is 2.0, or whether the + estimate will be calculated. NBPOL = 2 is used to specify the + value is 2.0; for any other value of NBPOL, the estimate is + calculated. Convergence is generally insensitive to this + parameter. (default is 2). + + damp : float + is the steady-state damping factor. (default is 1.) + + dampt : float + is the transient damping factor. (default is 1.) + + toeslope : float + Maximum slope of toe cells. (default is 0.05) + tipslope : float + Maximum slope of tip cells. (default is 0.05) + alpha : float + fraction of threshold used to move the tip and toe to adjacent empty + cells when the slope exceeds user-specified TOESLOPE and TIPSLOPE + values. (default is None) + beta : float + Fraction of threshold used to move the toe to adjacent non-empty cells + when the surface is below a minimum value defined by the user-specified + TOESLOPE value. (default is 0.1). + nadptmx : int + only used if adaptive is True. Maximum number of SWI2 time steps per + MODFLOW time step. (default is 1). + nadptmn : int + only used if adaptive is True. Minimum number of SWI2 time steps per + MODFLOW time step. (default is 1). + adptfct : float + is the factor used to evaluate tip and toe thicknesses and control the + number of SWI2 time steps per MODFLOW time step. When the maximum tip + or toe thickness exceeds the product of TOESLOPE or TIPSLOPE the cell + size and ADPTFCT, the number of SWI2 time steps are increased to a + value less than or equal to NADPT. When the maximum tip or toe + thickness is less than the product of TOESLOPE or TIPSLOPE the cell + size and ADPTFCT, the number of SWI2 time steps is decreased in the + next MODFLOW time step to a value greater than or equal to 1. ADPTFCT + must be greater than 0.0 and is reset to 1.0 if NADPTMX is equal to + NADPTMN. (default is 1.0). + nu : array of floats + if istart = 1, density of each zone (nsrf + 1 values). if istrat = 0, + density along top of layer, each surface, and bottom of layer + (nsrf + 2 values). (default is 0.025) + zeta : list of floats or list of array of floats [(nlay, nrow, ncol), + (nlay, nrow, ncol)] initial elevations of the active surfaces. The + list should contain an entry for each surface and be of size nsrf. + (default is [0.]) + ssz : float or array of floats (nlay, nrow, ncol) + effective porosity. (default is 0.25) + isource : integer or array of integers (nlay, nrow, ncol) + Source type of any external sources or sinks, specified with any + outside package (i.e. WEL Package, RCH Package, GHB Package). + (default is 0). + + If ISOURCE > 0 sources and sinks have the same fluid density as the + zone ISOURCE. If such a zone is not present in the cell, sources and + sinks have the same fluid density as the active zone at the top of + the aquifer. If ISOURCE = 0 sources and sinks have the same fluid + density as the active zone at the top of the aquifer. If ISOURCE < 0 + sources have the same fluid density as the zone with a number equal + to the absolute value of ISOURCE. Sinks have the same fluid density + as the active zone at the top of the aquifer. This option is useful + for the modeling of the ocean bottom where infiltrating water is + salt, yet exfiltrating water is of the same type as the water at the + top of the aquifer. + obsnam : list of strings + names for nobs observations. + obslrc : list of lists + zero-based [layer, row, column] lists for nobs observations. + extension : string + Filename extension (default is 'swi2') + npln : int + Deprecated - use nsrf instead. + unitnumber : int + File unit number (default is None). + filenames : str or list of str + Filenames to use for the package and the zeta, cbc, obs output files. + If filenames=None the package name will be created using the model name + and package extension and the output file names will be created using + the model name and output extensions. If a single string is passed the + package will be set to the string and output names will be created + using the model name and zeta, cbc, and observation extensions. To + define the names for all package files (input and output) the length + of the list of strings should be 4. Default is None. + + Attributes + ---------- + + Methods + ------- + + See Also + -------- + + Notes + ----- + Parameters are supported in Flopy only when reading in existing models. + Parameter values are converted to native values in Flopy and the + connection to "parameters" is thus nonexistent. + + Examples + -------- + + >>> import flopy + >>> m = flopy.modflow.Modflow() + >>> swi2 = flopy.modflow.ModflowSwi2(m) + """ + + def __init__(self, model, nsrf=1, istrat=1, nobs=0, iswizt=None, + ipakcb=None, iswiobs=0, options=None, + nsolver=1, iprsol=0, mutsol=3, + solver2params={'mxiter': 100, 'iter1': 20, 'npcond': 1, + 'zclose': 1e-3, 'rclose': 1e-4, 'relax': 1.0, + 'nbpol': 2, 'damp': 1.0, 'dampt': 1.0}, + toeslope=0.05, tipslope=0.05, alpha=None, beta=0.1, nadptmx=1, + nadptmn=1, adptfct=1.0, nu=0.025, zeta=[0.0], ssz=0.25, + isource=0, obsnam=None, obslrc=None, npln=None, + extension='swi2', unitnumber=None, filenames=None): + """Package constructor.""" + # set default unit number of one is not specified + if unitnumber is None: + unitnumber = ModflowSwi2.defaultunit() + + # set filenames + if filenames is None: + filenames = [None, None, None, None] + elif isinstance(filenames, str): + filenames = [filenames, None, None, None] + elif isinstance(filenames, list): + if len(filenames) < 4: + for idx in range(len(filenames), 4): + filenames.append(None) + + # update external file information with zeta output, if necessary + if iswizt is not None: + fname = filenames[1] + model.add_output_file(iswizt, fname=fname, extension='zta', + package=ModflowSwi2.ftype()) + else: + iswizt = 0 + + # update external file information with swi2 cell-by-cell output, + # if necessary + if ipakcb is not None: + fname = filenames[2] + model.add_output_file(ipakcb, fname=fname, + package=ModflowSwi2.ftype()) + else: + ipakcb = 0 + + # Process observations + if nobs != 0: + print('ModflowSwi2: specification of nobs is deprecated.') + nobs = 0 + if obslrc is not None: + if isinstance(obslrc, list) or isinstance(obslrc, tuple): + obslrc = np.array(obslrc, dtype=np.int32) + if isinstance(obslrc, np.ndarray): + if obslrc.ndim == 1 and obslrc.size == 3: + obslrc = obslrc.reshape((1, 3)) + else: + errmsg = 'ModflowSwi2: obslrc must be a tuple or ' + \ + 'list of tuples.' + raise Exception(errmsg) + nobs = obslrc.shape[0] + + if obsnam is None: + obsnam = [] + for n in range(nobs): + obsnam.append('Obs{:03}'.format(n + 1)) + else: + if not isinstance(obsnam, list): + obsnam = [obsnam] + if len(obsnam) != nobs: + errmsg = 'ModflowSwi2: obsnam must be a list with a ' + \ + 'length of {} not {}.'.format(nobs, len(obsnam)) + raise Exception(errmsg) + + if nobs > 0: + binflag = False + ext = 'zobs.out' + fname = filenames[3] + if iswiobs is not None: + if iswiobs < 0: + binflag = True + ext = 'zobs.bin' + else: + iswiobs = 1053 + + # update external file information with swi2 observation output, + # if necessary + model.add_output_file(iswiobs, fname=fname, binflag=binflag, + extension=ext, package=ModflowSwi2.ftype()) + else: + iswiobs = 0 + + # Fill namefile items + name = [ModflowSwi2.ftype()] + units = [unitnumber] + extra = [''] + + # set package name + fname = [filenames[0]] + + # Call ancestor's init to set self.parent, extension, name and + # unit number + Package.__init__(self, model, extension=extension, name=name, + unit_number=units, extra=extra, filenames=fname) + + nrow, ncol, nlay, nper = self.parent.nrow_ncol_nlay_nper + self.heading = '# {} package for '.format(self.name[0]) + \ + ' {}, '.format(model.version_types[model.version]) + \ + 'generated by Flopy.' + + # options + self.fsssopt, self.adaptive = False, False + if isinstance(options, list): + if len(options) < 1: + self.options = None + else: + self.options = options + for o in self.options: + if o.lower() == 'fsssopt': + self.fsssopt = True + elif o.lower() == 'adaptive': + self.adaptive = True + else: + self.options = None + + if npln is not None: + print('npln keyword is deprecated. use the nsrf keyword') + nsrf = npln + + self.nsrf, self.istrat, self.nobs, self.iswizt, self.iswiobs = nsrf, \ + istrat, \ + nobs, \ + iswizt, \ + iswiobs + # set cbc unit + self.ipakcb = ipakcb + + # set solver flags + self.nsolver, self.iprsol, self.mutsol = nsolver, iprsol, mutsol + + # set solver parameters + self.solver2params = solver2params + # + self.toeslope, self.tipslope, self.alpha, self.beta = toeslope, \ + tipslope, \ + alpha, \ + beta + self.nadptmx, self.nadptmn, self.adptfct = nadptmx, nadptmn, adptfct + + # Create arrays so that they have the correct size + if self.istrat == 1: + self.nu = Util2d(model, (self.nsrf + 1,), np.float32, nu, + name='nu') + else: + self.nu = Util2d(model, (self.nsrf + 2,), np.float32, nu, + name='nu') + self.zeta = [] + for i in range(self.nsrf): + self.zeta.append(Util3d(model, (nlay, nrow, ncol), np.float32, + zeta[i], name='zeta_' + str(i + 1))) + self.ssz = Util3d(model, (nlay, nrow, ncol), np.float32, ssz, + name='ssz') + self.isource = Util3d(model, (nlay, nrow, ncol), np.int32, isource, + name='isource') + # + self.obsnam = obsnam + self.obslrc = obslrc + if nobs != 0: + self.nobs = self.obslrc.shape[0] + # + self.parent.add_package(self) + + def write_file(self, check=True, f=None): + """Write the package file. + + Parameters + ---------- + check : boolean + Check package data for common errors. (default True) + + Returns + ------- + None + """ + nrow, ncol, nlay, nper = self.parent.nrow_ncol_nlay_nper + + # Open file for writing + if f is None: + f = open(self.fn_path, 'w') + + # First line: heading + f.write('{}\n'.format( + self.heading)) # Writing heading not allowed in SWI??? + + # write dataset 1 + f.write('# Dataset 1\n') + f.write( + '{:10d}{:10d}{:10d}{:10d}{:10d}{:10d}'.format(self.nsrf, + self.istrat, + self.nobs, + self.iswizt, + self.ipakcb, + self.iswiobs)) + + # write SWI2 options + if self.options != None: + for o in self.options: + f.write(' {}'.format(o)) + f.write('\n') + + # write dataset 2a + f.write('# Dataset 2a\n') + f.write('{:10d}{:10d}{:10d}\n'.format(self.nsolver, self.iprsol, + self.mutsol)) + + # write dataset 2b + if self.nsolver == 2: + f.write('# Dataset 2b\n') + f.write('{:10d}'.format(self.solver2params['mxiter'])) + f.write('{:10d}'.format(self.solver2params['iter1'])) + f.write('{:10d}'.format(self.solver2params['npcond'])) + f.write('{:14.6g}'.format(self.solver2params['zclose'])) + f.write('{:14.6g}'.format(self.solver2params['rclose'])) + f.write('{:14.6g}'.format(self.solver2params['relax'])) + f.write('{:10d}'.format(self.solver2params['nbpol'])) + f.write('{:14.6g}'.format(self.solver2params['damp'])) + f.write('{:14.6g}\n'.format(self.solver2params['dampt'])) + + # write dataset 3a + f.write('# Dataset 3a\n') + f.write('{:14.6g}{:14.6g}'.format(self.toeslope, self.tipslope)) + if self.alpha is not None: + f.write('{:14.6g}{:14.6g}'.format(self.alpha, self.beta)) + f.write('\n') + + # write dataset 3b + if self.adaptive is True: + f.write('# Dataset 3b\n') + f.write('{:10d}{:10d}{:14.6g}\n'.format(self.nadptmx, + self.nadptmn, + self.adptfct)) + # write dataset 4 + f.write('# Dataset 4\n') + f.write(self.nu.get_file_entry()) + + # write dataset 5 + f.write('# Dataset 5\n') + for isur in range(self.nsrf): + for ilay in range(nlay): + f.write(self.zeta[isur][ilay].get_file_entry()) + + # write dataset 6 + f.write('# Dataset 6\n') + f.write(self.ssz.get_file_entry()) + + # write dataset 7 + f.write('# Dataset 7\n') + f.write(self.isource.get_file_entry()) + + # write dataset 8 + if self.nobs > 0: + f.write('# Dataset 8\n') + for i in range(self.nobs): + # f.write(self.obsnam[i] + 3 * '%10i' % self.obslrc + '\n') + f.write('{} '.format(self.obsnam[i])) + for v in self.obslrc[i, :]: + f.write('{:10d}'.format(v + 1)) + f.write('\n') + + # close swi2 file + f.close() + + @staticmethod + def load(f, model, ext_unit_dict=None): + """Load an existing package. + + Parameters + ---------- + f : filename or file handle + File to load. + model : model object + The model object (of type :class:`flopy.modflow.mf.Modflow`) to + which this package will be added. + ext_unit_dict : dictionary, optional + If the arrays in the file are specified using EXTERNAL, + or older style array control records, then `f` should be a file + handle. In this case ext_unit_dict is required, which can be + constructed using the function + :class:`flopy.utils.mfreadnam.parsenamefile`. + + Returns + ------- + swi2 : ModflowSwi2 object + + Examples + -------- + + >>> import flopy + >>> m = flopy.modflow.Modflow() + >>> swi2 = flopy.modflow.ModflowSwi2.load('test.swi2', m) + """ + + if model.verbose: + sys.stdout.write('loading swi2 package file...\n') + + openfile = not hasattr(f, 'read') + if openfile: + filename = f + f = open(filename, 'r') + + # dataset 0 -- header + while True: + line = f.readline() + if line[0] != '#': + break + # determine problem dimensions + nrow, ncol, nlay, nper = model.get_nrow_ncol_nlay_nper() + + # --read dataset 1 + if model.verbose: + sys.stdout.write(' loading swi2 dataset 1\n') + t = line.strip().split() + nsrf = int(t[0]) + istrat = int(t[1]) + nobs = int(t[2]) + if int(t[3]) > 0: + model.add_pop_key_list(int(t[3])) + iswizt = int(t[3]) + if int(t[4]) > 0: + model.add_pop_key_list(int(t[4])) + ipakcb = int(t[4]) + else: + ipakcb = 0 + iswiobs = 0 + if int(t[5]) > 0: + model.add_pop_key_list(int(t[5])) + iswiobs = int(t[5]) + options = [] + adaptive = False + for idx in range(6, len(t)): + if '#' in t[idx]: + break + options.append(t[idx]) + if 'adaptive' in t[idx].lower(): + adaptive = True + + # read dataset 2a + if model.verbose: + sys.stdout.write(' loading swi2 dataset 2a\n') + while True: + line = f.readline() + if line[0] != '#': + break + t = line.strip().split() + nsolver = int(t[0]) + iprsol = int(t[1]) + mutsol = int(t[2]) + + # read dataset 2b + solver2params = {} + if nsolver == 2: + if model.verbose: + sys.stdout.write(' loading swi2 dataset 2b\n') + while True: + line = f.readline() + if line[0] != '#': + break + t = line.strip().split() + solver2params['mxiter'] = int(t[0]) + solver2params['iter1'] = int(t[1]) + solver2params['npcond'] = int(t[2]) + solver2params['zclose'] = float(t[3]) + solver2params['rclose'] = float(t[4]) + solver2params['relax'] = float(t[5]) + solver2params['nbpol'] = int(t[6]) + solver2params['damp'] = float(t[7]) + solver2params['dampt'] = float(t[8]) + + # read dataset 3a + if model.verbose: + sys.stdout.write(' loading swi2 dataset 3a\n') + while True: + line = f.readline() + if line[0] != '#': + break + t = line.strip().split() + toeslope = float(t[0]) + tipslope = float(t[1]) + alpha = None + beta = 0.1 + if len(t) > 2: + try: + alpha = float(t[2]) + beta = float(t[3]) + except: + if model.verbose: + print(' explicit alpha and beta in file') + + # read dataset 3b + nadptmx, nadptmn, adptfct = None, None, None + if adaptive: + if model.verbose: + sys.stdout.write(' loading swi2 dataset 3b\n') + while True: + line = f.readline() + if line[0] != '#': + break + t = line.strip().split() + nadptmx = int(t[0]) + nadptmn = int(t[1]) + adptfct = float(t[2]) + + # read dataset 4 + if model.verbose: + print(' loading nu...') + if istrat == 1: + nnu = nsrf + 1 + else: + nnu = nsrf + 2 + while True: + ipos = f.tell() + line = f.readline() + if line[0] != '#': + f.seek(ipos) + break + nu = Util2d.load(f, model, (nnu,), np.float32, 'nu', + ext_unit_dict) + + # read dataset 5 + if model.verbose: + print(' loading initial zeta surfaces...') + while True: + ipos = f.tell() + line = f.readline() + if line[0] != '#': + f.seek(ipos) + break + zeta = [] + for n in range(nsrf): + ctxt = 'zeta_surf{:02d}'.format(n + 1) + zeta.append(Util3d.load(f, model, (nlay, nrow, ncol), + np.float32, ctxt, ext_unit_dict)) + + # read dataset 6 + if model.verbose: + print(' loading initial ssz...') + while True: + ipos = f.tell() + line = f.readline() + if line[0] != '#': + f.seek(ipos) + break + ssz = Util3d.load(f, model, (nlay, nrow, ncol), np.float32, + 'ssz', ext_unit_dict) + + # read dataset 7 + if model.verbose: + print(' loading initial isource...') + while True: + ipos = f.tell() + line = f.readline() + if line[0] != '#': + f.seek(ipos) + break + isource = Util3d.load(f, model, (nlay, nrow, ncol), np.int32, + 'isource', ext_unit_dict) + + # read dataset 8 + obsname = [] + obslrc = [] + if nobs > 0: + if model.verbose: + print(' loading observation locations...') + while True: + line = f.readline() + if line[0] != '#': + break + for i in range(nobs): + if i > 0: + try: + line = f.readline() + except: + break + t = line.strip().split() + obsname.append(t[0]) + kk = int(t[1]) - 1 + ii = int(t[2]) - 1 + jj = int(t[3]) - 1 + obslrc.append([kk, ii, jj]) + nobs = len(obsname) + + if openfile: + f.close() + + # determine specified unit number + unitnumber = None + filenames = [None, None, None, None] + if ext_unit_dict is not None: + unitnumber, filenames[0] = \ + model.get_ext_dict_attr(ext_unit_dict, + filetype=ModflowSwi2.ftype()) + if iswizt > 0: + iu, filenames[1] = \ + model.get_ext_dict_attr(ext_unit_dict, unit=iswizt) + if ipakcb > 0: + iu, filenames[2] = \ + model.get_ext_dict_attr(ext_unit_dict, unit=ipakcb) + if abs(iswiobs) > 0: + iu, filenames[3] = \ + model.get_ext_dict_attr(ext_unit_dict, unit=abs(iswiobs)) + + # create swi2 instance + swi2 = ModflowSwi2(model, nsrf=nsrf, istrat=istrat, + iswizt=iswizt, ipakcb=ipakcb, + iswiobs=iswiobs, options=options, + nsolver=nsolver, iprsol=iprsol, mutsol=mutsol, + solver2params=solver2params, + toeslope=toeslope, tipslope=tipslope, alpha=alpha, + beta=beta, + nadptmx=nadptmx, nadptmn=nadptmn, adptfct=adptfct, + nu=nu, zeta=zeta, ssz=ssz, isource=isource, + obsnam=obsname, obslrc=obslrc, + unitnumber=unitnumber, filenames=filenames) + + # return swi2 instance + return swi2 + + @staticmethod + def ftype(): + return 'SWI2' + + @staticmethod + def defaultunit(): + return 29 diff --git a/flopy/modflow/mfswr1.py b/flopy/modflow/mfswr1.py index a2095fe2b6..006b685b1f 100644 --- a/flopy/modflow/mfswr1.py +++ b/flopy/modflow/mfswr1.py @@ -1,187 +1,187 @@ -""" -mfswr module. Contains the ModflowSwr1 class. Note that the user can access -the ModflowSwr1 class as `flopy.modflow.ModflowSwr1`. - -Additional information for this MODFLOW process can be found at the `Online -MODFLOW Guide -`_. - -""" -import sys -from ..pakbase import Package - - -class ModflowSwr1(Package): - """ - MODFLOW Surface-Water Routing Class. - - Parameters - ---------- - model : model object - The model object (of type :class:`flopy.modflow.mf.Modflow`) to which - this package will be added. - extension : string - Filename extension (default is 'swr') - unitnumber : int - File unit number (default is None). - filenames : str or list of str - Filenames to use for the package. If filenames=None the package name - will be created using the model name and package extension. If a - single string is passed the package will be set to the string. - Default is None. - - - Attributes - ---------- - - Methods - ------- - - - See Also - -------- - - Notes - ----- - SWR1 Class is only used to write SWR1 filename to name file. Full - functionality still needs to be implemented. - - Examples - -------- - - >>> import flopy - >>> m = flopy.modflow.Modflow() - >>> swr = flopy.modflow.ModflowSwr1(m) - - """ - - def __init__(self, model, extension='swr', unitnumber=None, - filenames=None): - """ - Package constructor. - - """ - # set default unit number of one is not specified - if unitnumber is None: - unitnumber = ModflowSwr1.defaultunit() - - # set filenames - if filenames is None: - filenames = [None] - elif isinstance(filenames, str): - filenames = [filenames] - - # Fill namefile items - name = [ModflowSwr1.ftype()] - units = [unitnumber] - extra = [''] - - # set package name - fname = [filenames[0]] - - # Call ancestor's init to set self.parent, extension, name and - # unit number - Package.__init__(self, model, extension=extension, name=name, - unit_number=units, extra=extra, filenames=fname) - - # check if a valid model version has been specified - if model.version == 'mf2k' or model.version == 'mfusg': - err = 'Error: cannot use {} package with model version {}'.format( - self.name, model.version) - raise Exception(err) - - self.heading = '# {} package for '.format(self.name[0]) + \ - ' {}, '.format(model.version_types[model.version]) + \ - 'generated by Flopy.' - self.url = 'swr1.htm' - - self.parent.add_package(self) - - def write_file(self): - """ - Write the package file. - - Returns - ------- - None - - """ - print('SWR1 write method not implemented yet') - # f = open(self.fn_path, 'w') - # f.write('{0}\n'.format(self.heading)) - # f.close() - - @staticmethod - def load(f, model, ext_unit_dict=None): - """ - Load an existing package. - - Parameters - ---------- - f : filename or file handle - File to load. - model : model object - The model object (of type: class:`flopy.modflow.mf.Modflow`) - to which this package will be added. - ext_unit_dict : dictionary, optional - If the arrays in the file are specified using EXTERNAL, - or older style array control records, then `f` should be a file - handle. In this case ext_unit_dict is required, which can be - constructed using the function - :class:`flopy.utils.mfreadnam.parsenamefile`. - - Returns - ------- - swr : ModflowSwr1 object - ModflowSwr1 object (of type :class:`flopy.modflow.mfbas.ModflowSwr1`) - - Notes - ----- - Load method still needs to be implemented. - - Examples - -------- - - >>> import flopy - >>> m = flopy.modflow.Modflow() - >>> swr = flopy.modflow.ModflowSwr1.load('test.swr', m) - - """ - - if model.verbose: - sys.stdout.write('loading swr1 process file...\n') - - # todo: everything - - openfile = not hasattr(f, 'read') - if openfile: - filename = f - f = open(filename, 'r') - - print( - 'Warning: load method not completed. default swr1 object created.') - - if openfile: - f.close() - - # determine specified unit number - unitnumber = None - filenames = [None] - if ext_unit_dict is not None: - unitnumber, filenames[0] = \ - model.get_ext_dict_attr(ext_unit_dict, - filetype=ModflowSwr1.ftype()) - - # create swr1 object instance - swr1 = ModflowSwr1(model, unitnumber=unitnumber, filenames=filenames) - - # return swr object - return swr1 - - @staticmethod - def ftype(): - return 'SWR' - - @staticmethod - def defaultunit(): - return 36 +""" +mfswr module. Contains the ModflowSwr1 class. Note that the user can access +the ModflowSwr1 class as `flopy.modflow.ModflowSwr1`. + +Additional information for this MODFLOW process can be found at the `Online +MODFLOW Guide +`_. + +""" +import sys +from ..pakbase import Package + + +class ModflowSwr1(Package): + """ + MODFLOW Surface-Water Routing Class. + + Parameters + ---------- + model : model object + The model object (of type :class:`flopy.modflow.mf.Modflow`) to which + this package will be added. + extension : string + Filename extension (default is 'swr') + unitnumber : int + File unit number (default is None). + filenames : str or list of str + Filenames to use for the package. If filenames=None the package name + will be created using the model name and package extension. If a + single string is passed the package will be set to the string. + Default is None. + + + Attributes + ---------- + + Methods + ------- + + + See Also + -------- + + Notes + ----- + SWR1 Class is only used to write SWR1 filename to name file. Full + functionality still needs to be implemented. + + Examples + -------- + + >>> import flopy + >>> m = flopy.modflow.Modflow() + >>> swr = flopy.modflow.ModflowSwr1(m) + + """ + + def __init__(self, model, extension='swr', unitnumber=None, + filenames=None): + """ + Package constructor. + + """ + # set default unit number of one is not specified + if unitnumber is None: + unitnumber = ModflowSwr1.defaultunit() + + # set filenames + if filenames is None: + filenames = [None] + elif isinstance(filenames, str): + filenames = [filenames] + + # Fill namefile items + name = [ModflowSwr1.ftype()] + units = [unitnumber] + extra = [''] + + # set package name + fname = [filenames[0]] + + # Call ancestor's init to set self.parent, extension, name and + # unit number + Package.__init__(self, model, extension=extension, name=name, + unit_number=units, extra=extra, filenames=fname) + + # check if a valid model version has been specified + if model.version == 'mf2k' or model.version == 'mfusg': + err = 'Error: cannot use {} package with model version {}'.format( + self.name, model.version) + raise Exception(err) + + self.heading = '# {} package for '.format(self.name[0]) + \ + ' {}, '.format(model.version_types[model.version]) + \ + 'generated by Flopy.' + self.url = 'swr1.htm' + + self.parent.add_package(self) + + def write_file(self): + """ + Write the package file. + + Returns + ------- + None + + """ + print('SWR1 write method not implemented yet') + # f = open(self.fn_path, 'w') + # f.write('{0}\n'.format(self.heading)) + # f.close() + + @staticmethod + def load(f, model, ext_unit_dict=None): + """ + Load an existing package. + + Parameters + ---------- + f : filename or file handle + File to load. + model : model object + The model object (of type: class:`flopy.modflow.mf.Modflow`) + to which this package will be added. + ext_unit_dict : dictionary, optional + If the arrays in the file are specified using EXTERNAL, + or older style array control records, then `f` should be a file + handle. In this case ext_unit_dict is required, which can be + constructed using the function + :class:`flopy.utils.mfreadnam.parsenamefile`. + + Returns + ------- + swr : ModflowSwr1 object + ModflowSwr1 object (of type :class:`flopy.modflow.mfbas.ModflowSwr1`) + + Notes + ----- + Load method still needs to be implemented. + + Examples + -------- + + >>> import flopy + >>> m = flopy.modflow.Modflow() + >>> swr = flopy.modflow.ModflowSwr1.load('test.swr', m) + + """ + + if model.verbose: + sys.stdout.write('loading swr1 process file...\n') + + # todo: everything + + openfile = not hasattr(f, 'read') + if openfile: + filename = f + f = open(filename, 'r') + + print( + 'Warning: load method not completed. default swr1 object created.') + + if openfile: + f.close() + + # determine specified unit number + unitnumber = None + filenames = [None] + if ext_unit_dict is not None: + unitnumber, filenames[0] = \ + model.get_ext_dict_attr(ext_unit_dict, + filetype=ModflowSwr1.ftype()) + + # create swr1 object instance + swr1 = ModflowSwr1(model, unitnumber=unitnumber, filenames=filenames) + + # return swr object + return swr1 + + @staticmethod + def ftype(): + return 'SWR' + + @staticmethod + def defaultunit(): + return 36 diff --git a/flopy/modflow/mfswt.py b/flopy/modflow/mfswt.py index fff4a2adfd..2727fba892 100644 --- a/flopy/modflow/mfswt.py +++ b/flopy/modflow/mfswt.py @@ -1,755 +1,755 @@ -""" -mfswt module. Contains the ModflowSwt class. Note that the user can access -the ModflowSub class as `flopy.modflow.ModflowSwt`. - -Additional information for this MODFLOW package can be found at the `Online -MODFLOW Guide -`_. - -""" -import sys - -import numpy as np - -from ..pakbase import Package -from ..utils import Util2d, Util3d, read1d - - -class ModflowSwt(Package): - """ - MODFLOW SUB-WT Package Class. - - Parameters - ---------- - model : model object - The model object (of type :class:`flopy.modflow.mf.Modflow`) to which - this package will be added. - ipakcb : int - A flag that is used to determine if cell-by-cell budget data should be - saved. If ipakcb is non-zero cell-by-cell budget data will be saved. - (default is 0). - iswtoc : int - iswtoc is a flag used to control output of information generated by the - SUB Package. (default is 0). - nystm : int - nsystm is the number of systems of interbeds. (default is 1). - ithk : int - ithk is a flag to determine how thicknesses of compressible sediments - vary in response to changes in saturated thickness. If ithk < 1, - thickness of compressible sediments is constant. If ithk > 0, thickness - of compressible sediments varies in response to changes in saturated - thickness. (default is 1). - ivoid : int - ivoid is a flag to determine how void ratios of compressible sediments - vary in response to changes in saturated thickness. If ivoid < 1, void - ratio will be treated as a constant. If ivoid > 0, void ratio will be - treated as a variable. (default is 0). - nn : int - nn is the number of nodes used to discretize the half space to - approximate the head distributions in systems of delay interbeds. - (default is 20). - istpcs : int - istpcs is a flag to determine how initial preconsolidation stress will - be obtained. If istpcs does not equal 0, an array of offset values will - be read in for each model layer. The offset values will be added to the - initial effective stress to get initial preconsolidation stress. If - istpcs = 0, an array with initial preconsolidation stress values will - be read. (default is 1). - icrcc : int - icrcc is a flag to determine how recompression and compression indices - will be obtained. If ICRCC is not equal to 0, arrays of elastic - specific storage and inelastic skeletal specific storage will be read - for each system of interbeds; the recompression index and compression - index will not be read. If icrcc = 0, arrays of recompression index - and compression index will be read for each system of interbeds; - elastic skeletal specific storage and inelastic skeletal specific - storage will not be read. (default is 0). - lnwt : int or array of ints (nsystm) - lnwt is a one-dimensional array specifying the model layer assignments - for each system of interbeds. (default is 0). - izcfl : int - izcfl is a flag to specify whether or not initial calculated - values of layer-center elevation will be printed. (default is 0). - izcfm : int - izcfm is is a code for the format in which layer-center elevation will - be printed. (default is 0). - iglfl : int - iglfl is a flag to specify whether or not initial calculated values of - geostatic stress will be printed. (default is 0). - iglfm : int - iglfm is a code for the format in which geostatic stress will be - printed. (default is 0). - iestfl : int - iestfl is a flag to specify whether or not initial calculated values of - effective stress will be printed. (default is 0). - iestfm : int - iestfm is a code for the format in which effective stress will be - printed. (default is 0). - ipcsfl : int - ipcsfl is a flag to specify whether or not initial calculated values of - preconsolidation stress will be printed. (default is 0). - ipcsfm : int - ipcsfm is a code for the format in which preconsolidation stress will - be printed. (default is 0). - istfl : int - istfl is a flag to specify whether or not initial equivalent storage - properties will be printed for each system of interbeds. If icrcc is - not equal to 0, the - equivalent storage properties that can be printed are recompression and - compression indices (cr and cc), which are calculated from elastic and - inelastic skeletal specific storage (sske and sskv). If icrcc = 0, - equivalent storage properties that can be printed are elastic and - inelastic skeletal specific storage, which are calculated from the - recompression and compression indices. (default is 0). - istfm : int - istfm is a code for the format in which equivalent storage properties - will be printed. (default is 0). - gl0 : float or array of floats (nrow, ncol) - gl0 is an array specifying the geostatic stress above model layer 1. If - the top of model layer 1 is the land surface, enter values of zero for - this array. (default is 0.). - sgm : float or array of floats (nrow, ncol) - sgm is an array specifying the specific gravity of moist or unsaturated - sediments. (default is 1.7). - sgs : float or array of floats (nrow, ncol) - sgs is an array specifying the specific gravity of saturated sediments. - (default is 2.). - thick : float or array of floats (nsystm, nrow, ncol) - thick is an array specifying the thickness of compressible sediments. - (default is 1.). - sse : float or array of floats (nsystm, nrow, ncol) - sse is an array specifying the initial elastic skeletal specific - storage of compressible beds. sse is not used if icrcc = 0. - (default is 1.). - ssv : float or array of floats (nsystm, nrow, ncol) - ssv is an array specifying the initial inelastic skeletal specific - storage of compressible beds. ssv is not used if icrcc = 0. - (default is 1.). - cr : float or array of floats (nsystm, nrow, ncol) - cr is an array specifying the recompression index of compressible beds. - cr is not used if icrcc is not equal to 0. (default is 0.01). - cc : float or array of floats (nsystm, nrow, ncol) - cc is an array specifying the compression index of compressible beds - cc is not used if icrcc is not equal to 0. (default is 0.25). - void : float or array of floats (nsystm, nrow, ncol) - void is an array specifying the initial void ratio of compressible - beds. (default is 0.82). - sub : float or array of floats (nsystm, nrow, ncol) - sub is an array specifying the initial compaction in each system of - interbeds. Compaction values computed by the package are added to - values in this array so that printed or stored values of compaction and - land subsidence may include previous components. Values in this array - do not affect calculations of storage changes or resulting compaction. - For simulations in which output values will reflect compaction and - subsidence since the start of the simulation, enter zero values for all - elements of this array. (default is 0.). - pcsoff : float or array of floats (nlay, nrow, ncol) - pcsoff is an array specifying the offset from initial effective stress - to initial preconsolidation stress at the bottom of the model layer in - units of height of a column of water. pcsoff is not used if istpcs=0. - (default is 0.). - pcs : float or array of floats (nlay, nrow, ncol) - pcs is an array specifying the initial preconsolidation stress, in - units of height of a column of water, at the bottom of the model layer. - pcs is not used if istpcs is not equal to 0. (default is 0.). - ids16 : list or array of ints (26) - Format codes and unit numbers for swtsidence, compaction by model - layer, compaction by interbed system, vertical displacement, - preconsolidation stress, change in preconsolidation stress, geostatic - stress, change in geostatic stress, effective stress, void ration, - thickness of compressible sediments, and layer-center elevation will be - printed. If ids16 is None and iswtoc>0 then print code 0 will be used - for all data which is output to the binary swtsidence output file - (unit=1054). The 26 entries in ids16 correspond to ifm1, iun1, ifm2, - iun2, ifm3, iun3, ifm4, iun4, ifm5, iun5, ifm6, iun6, ifm7, iun7, ifm8, - iun8, ifm9, iun9, ifm10, iun11, ifm12, iun12, ifm13, and iun13 - variables. (default is None). - ids17 : list or array of ints (iswtoc, 30) - Stress period and time step range and print and save flags used to - control printing and saving of information generated by the SUB-WT - Package during program execution. Each row of ids17 corresponds to - isp1, isp2, its1, its2, ifl1, ifl2, ifl3, ifl4, ifl5, ifl6, ifl7, - ifl8, ifl9, ifl10, ifl11, ifl12, ifl13, ifl14, ifl15, ifl16, ifl17, - ifl18, ifl9, ifl20, ifl21, ifl22, ifl23, ifl24, ifl25, and ifl26 - variables for iswtoc entries. isp1, isp2, its1, and its2 are stress - period and time step ranges. ifl1 and ifl2 control subsidence printing - and saving. ifl3 and ifl4 control compaction by model layer printing - and saving. ifl5 and ifl6 control compaction by interbed system - printing and saving. ifl7 and ifl8 control vertical displacement - printing and saving. ifl9 and ifl10 control preconsolidation stress - printing and saving. ifl11 and ifl12 control change in preconsolidation - stress printing and saving. ifl13 and ifl14 control geostatic stress - printing and saving. ifl15 and ifl16 control change in geostatic stress - printing and saving. ifl17 and ifl18 control effective stress printing - and saving. ifl19 and ifl20 control change in effective stress printing - and saving. ifl21 and ifl22 control void ratio printing and saving. - ifl23 and ifl24 control compressible bed thickness printing and saving. - ifl25 and ifl26 control layer-center elevation printing and saving. - If ids17 is None and iswtoc>0 then all available subsidence output will - be printed and saved to the binary subsidence output file (unit=1054). - (default is None). - unitnumber : int - File unit number (default is None). - filenames : str or list of str - Filenames to use for the package and the output files. If - filenames=None the package name will be created using the model name - and package extension and the cbc output name and other swt output - files will be created using the model name and .cbc and swt output - extensions (for example, modflowtest.cbc), if ipakcbc and other - swt output files (dataset 16) are numbers greater than zero. - If a single string is passed the package name will be set to the - string and other swt output files will be set to the model name with - the appropriate output file extensions. To define the names for all - package files (input and output) the length of the list of strings - should be 15. - Default is None. - - Attributes - ---------- - - Methods - ------- - - See Also - -------- - - Notes - ----- - Parameters are supported in Flopy only when reading in existing models. - Parameter values are converted to native values in Flopy and the - connection to "parameters" is thus nonexistent. Parameters are not - supported in the SUB-WT Package. - - Examples - -------- - - >>> import flopy - >>> m = flopy.modflow.Modflow() - >>> swt = flopy.modflow.ModflowSwt(m) - - """ - - def write_file(self,f=None): - """ - Write the package file. - - Returns - ------- - None - - """ - nrow, ncol, nlay, nper = self.parent.nrow_ncol_nlay_nper - # Open file for writing - if f is None: - f = open(self.fn_path, 'w') - # First line: heading - f.write('{}\n'.format(self.heading)) - # write dataset 1 - f.write('{} {} {} {} {} {} {}\n'.format(self.ipakcb, self.iswtoc, - self.nsystm, self.ithk, - self.ivoid, self.istpcs, - self.icrcc)) - # write dataset 2 - t = self.lnwt.array - for tt in t: - f.write('{} '.format(tt + 1)) - f.write('\n') - - # write dataset 3 - f.write( - '{} {} {} {} {} {} {} {} {} {}\n'.format(self.izcfl, self.izcfm, - self.iglfl, self.iglfm, - self.iestfl, self.iestfm, - self.ipcsfl, self.ipcsfm, - self.istfl, self.istfm)) - - # write dataset 4 - f.write(self.gl0.get_file_entry()) - - # write dataset 5 - f.write(self.sgm.get_file_entry()) - - # write dataset 6 - f.write(self.sgs.get_file_entry()) - - # write datasets 7 to 13 - for k in range(self.nsystm): - f.write(self.thick[k].get_file_entry()) - if self.icrcc != 0: - f.write(self.sse[k].get_file_entry()) - f.write(self.ssv[k].get_file_entry()) - else: - f.write(self.cr[k].get_file_entry()) - f.write(self.cc[k].get_file_entry()) - f.write(self.void[k].get_file_entry()) - f.write(self.sub[k].get_file_entry()) - - # write datasets 14 and 15 - for k in range(nlay): - if self.istpcs != 0: - f.write(self.pcsoff[k].get_file_entry()) - else: - f.write(self.pcs[k].get_file_entry()) - - # write dataset 16 and 17 - if self.iswtoc > 0: - # dataset 16 - for i in self.ids16: - f.write('{} '.format(i)) - f.write(' #dataset 16\n') - - # dataset 17 - for k in range(self.iswtoc): - t = self.ids17[k, :].copy() - t[0:4] += 1 - for i in t: - f.write('{} '.format(i)) - f.write(' #dataset 17 iswtoc {}\n'.format(k + 1)) - - # close swt file - f.close() - - def __init__(self, model, ipakcb=None, iswtoc=0, nsystm=1, ithk=0, ivoid=0, - istpcs=1, icrcc=0, lnwt=0, izcfl=0, izcfm=0, iglfl=0, iglfm=0, - iestfl=0, iestfm=0, ipcsfl=0, ipcsfm=0, istfl=0, istfm=0, - gl0=0., sgm=1.7, sgs=2., thick=1., sse=1., ssv=1., - cr=0.01, cc=0.25, void=0.82, sub=0., pcsoff=0., pcs=0., - ids16=None, ids17=None, - extension='swt', unitnumber=None, filenames=None): - """ - Package constructor. - - """ - # set default unit number of one is not specified - if unitnumber is None: - unitnumber = ModflowSwt.defaultunit() - - # set filenames - if filenames is None: - filenames = [None for x in range(15)] - elif isinstance(filenames, str): - filenames = [filenames] + [None for x in range(14)] - elif isinstance(filenames, list): - if len(filenames) < 15: - n = 15 - len(filenames) + 1 - filenames = filenames + [None for x in range(n)] - - # update external file information with cbc output, if necessary - if ipakcb is not None: - fname = filenames[1] - model.add_output_file(ipakcb, fname=fname, - package=ModflowSwt.ftype()) - else: - ipakcb = 0 - - item16_extensions = ["swt_subsidence.hds", "swt_total_comp.hds", - "swt_inter_comp.hds", "swt_vert_disp.hds", - "swt_precon_stress.hds", - "swt_precon_stress_delta.hds", - "swt_geostatic_stress.hds", - "swt_geostatic_stress_delta.hds", - "swt_eff_stress.hds", "swt_eff_stress_delta.hds", - "swt_void_ratio.hds", "swt_thick.hds", - "swt_lay_center.hds"] - item16_units = [2052 + i for i in range(len(item16_extensions))] - - if iswtoc > 0: - idx = 0 - for k in range(1, 26, 2): - ext = item16_extensions[idx] - if ids16 is None: - iu = item16_units[idx] - else: - iu = ids16[k] - fname = filenames[idx + 2] - model.add_output_file(iu, fname=fname, extension=ext, - package=ModflowSwt.ftype()) - idx += 1 - - extensions = [extension] - name = [ModflowSwt.ftype()] - units = [unitnumber] - extra = [''] - - # set package name - fname = [filenames[0]] - - # Call ancestor's init to set self.parent, extension, name and - # unit number - Package.__init__(self, model, extension=extensions, name=name, - unit_number=units, extra=extra, filenames=fname) - - nrow, ncol, nlay, nper = self.parent.nrow_ncol_nlay_nper - - self.heading = '# {} package for '.format(self.name[0]) + \ - ' {}, '.format(model.version_types[model.version]) + \ - 'generated by Flopy.' - self.url = 'swt.htm' - - self.ipakcb = ipakcb - self.iswtoc = iswtoc - - self.nsystm = nsystm - self.ithk = ithk - self.ivoid = ivoid - self.istpcs = istpcs - self.icrcc = icrcc - - self.lnwt = Util2d(model, (nsystm,), np.int32, lnwt, name='lnwt') - - self.izcfl = izcfl - self.izcfm = izcfm - self.iglfl = iglfl - self.iglfm = iglfm - self.iestfl = iestfl - self.iestfm = iestfm - self.ipcsfl = ipcsfl - self.ipcsfm = ipcsfm - self.istfl = istfl - self.istfm = istfm - - self.gl0 = Util2d(model, (nrow, ncol), np.float32, gl0, name='gl0') - self.sgm = Util2d(model, (nrow, ncol), np.float32, sgm, name='sgm') - self.sgs = Util2d(model, (nrow, ncol), np.float32, sgs, name='sgs') - - # interbed data - names = ['thick system ' for n in range(nsystm)] - self.thick = Util3d(model, (nsystm, nrow, ncol), np.float32, thick, - name=names, - locat=self.unit_number[0]) - names = ['void system ' for n in range(nsystm)] - self.void = Util3d(model, (nsystm, nrow, ncol), np.float32, void, - name=names, - locat=self.unit_number[0]) - names = ['sub system ' for n in range(nsystm)] - self.sub = Util3d(model, (nsystm, nrow, ncol), np.float32, sub, - name=names, - locat=self.unit_number[0]) - if icrcc != 0: - names = ['sse system ' for n in range(nsystm)] - self.sse = Util3d(model, (nsystm, nrow, ncol), np.float32, sse, - name=names, - locat=self.unit_number[0]) - names = ['ssc system ' for n in range(nsystm)] - self.ssv = Util3d(model, (nsystm, nrow, ncol), np.float32, ssv, - name=names, - locat=self.unit_number[0]) - self.cr = None - self.cc = None - else: - self.sse = None - self.ssv = None - names = ['cr system ' for n in range(nsystm)] - self.cr = Util3d(model, (nsystm, nrow, ncol), np.float32, cr, - name=names, - locat=self.unit_number[0]) - names = ['cc system ' for n in range(nsystm)] - self.cc = Util3d(model, (nsystm, nrow, ncol), np.float32, cc, - name=names, - locat=self.unit_number[0]) - - # layer data - if istpcs != 0: - self.pcsoff = Util3d(model, (nlay, nrow, ncol), np.float32, pcsoff, - name='pcsoff', locat=self.unit_number[0]) - self.pcs = None - else: - self.pcsoff = None - self.pcs = Util3d(model, (nlay, nrow, ncol), np.float32, pcs, - name='pcs', locat=self.unit_number[0]) - - # output data - if iswtoc > 0: - if ids16 is None: - self.ids16 = np.zeros((26), dtype=np.int32) - ui = 0 - for i in range(1, 26, 2): - self.ids16[i] = item16_units[ui] - ui += 1 - else: - if isinstance(ids16, list): - ds16 = np.array(ids16) - assert len(ids16) == 26 - self.ids16 = ids16 - - if ids17 is None: - ids17 = np.ones((30), dtype=np.int32) - ids17[0] = 0 - ids17[2] = 0 - ids17[1] = 9999 - ids17[3] = 9999 - self.ids17 = np.atleast_2d(ids17) - else: - if isinstance(ids17, list): - ids17 = np.atleast_2d(np.array(ids17)) - assert ids17.shape[1] == 30 - self.ids17 = ids17 - - # add package to model - self.parent.add_package(self) - - @staticmethod - def load(f, model, ext_unit_dict=None): - """ - Load an existing package. - - Parameters - ---------- - f : filename or file handle - File to load. - model : model object - The model object (of type :class:`flopy.modflow.mf.Modflow`) to - which this package will be added. - ext_unit_dict : dictionary, optional - If the arrays in the file are specified using EXTERNAL, - or older style array control records, then `f` should be a file - handle. In this case ext_unit_dict is required, which can be - constructed using the function - :class:`flopy.utils.mfreadnam.parsenamefile`. - - Returns - ------- - swt : ModflowSwt object - - Examples - -------- - - >>> import flopy - >>> m = flopy.modflow.Modflow() - >>> swt = flopy.modflow.ModflowSwt.load('test.swt', m) - - """ - - if model.verbose: - sys.stdout.write('loading swt package file...\n') - - openfile = not hasattr(f, 'read') - if openfile: - filename = f - f = open(filename, 'r') - - # dataset 0 -- header - while True: - line = f.readline() - if line[0] != '#': - break - # determine problem dimensions - nrow, ncol, nlay, nper = model.get_nrow_ncol_nlay_nper() - - # read dataset 1 - if model.verbose: - sys.stdout.write(' loading swt dataset 1\n') - t = line.strip().split() - ipakcb, iswtoc, nsystm, ithk, ivoid, istpcs, icrcc = int(t[0]), \ - int(t[1]), \ - int(t[2]), \ - int(t[3]), \ - int(t[4]), \ - int(t[5]), \ - int(t[6]) - - # if ipakcb > 0: - # ipakcb = 53 - - # read dataset 2 - lnwt = None - if nsystm > 0: - if model.verbose: - sys.stdout.write(' loading swt dataset 2\n') - lnwt = np.empty((nsystm), dtype=np.int32) - lnwt = read1d(f, lnwt) - 1 - - # read dataset 3 - if model.verbose: - sys.stdout.write(' loading swt dataset 3\n') - line = f.readline() - t = line.strip().split() - iizcfl, izcfm, iglfl, iglfm, iestfl, \ - iestfm, ipcsfl, ipcsfm, istfl, istfm = int(t[0]), int(t[1]), \ - int(t[2]), int(t[3]), \ - int(t[4]), int(t[5]), \ - int(t[6]), int(t[7]), \ - int(t[8]), int(t[9]) - - # read dataset 4 - if model.verbose: - sys.stdout.write(' loading swt dataset 4') - gl0 = Util2d.load(f, model, (nrow, ncol), np.float32, 'gl0', - ext_unit_dict) - - # read dataset 5 - if model.verbose: - sys.stdout.write(' loading swt dataset 5') - sgm = Util2d.load(f, model, (nrow, ncol), np.float32, 'sgm', - ext_unit_dict) - - # read dataset 6 - if model.verbose: - sys.stdout.write(' loading swt dataset 6') - sgs = Util2d.load(f, model, (nrow, ncol), np.float32, 'sgs', - ext_unit_dict) - - # read datasets 7 to 13 - thick = [0] * nsystm - void = [0] * nsystm - sub = [0] * nsystm - if icrcc == 0: - sse = None - ssv = None - cr = [0] * nsystm - cc = [0] * nsystm - else: - sse = [0] * nsystm - ssv = [0] * nsystm - cr = None - cc = None - - for k in range(nsystm): - kk = lnwt[k] + 1 - # thick - if model.verbose: - sys.stdout.write( - ' loading swt dataset 7 for layer {}\n'.format(kk)) - t = Util2d.load(f, model, (nrow, ncol), np.float32, - 'thick layer {}'.format(kk), - ext_unit_dict) - thick[k] = t - if icrcc != 0: - # sse - if model.verbose: - sys.stdout.write( - ' loading swt dataset 8 for layer {}\n'.format(kk)) - t = Util2d.load(f, model, (nrow, ncol), np.float32, - 'sse layer {}'.format(kk), ext_unit_dict) - sse[k] = t - # ssv - if model.verbose: - sys.stdout.write( - ' loading swt dataset 9 for layer {}\n'.format(kk)) - t = Util2d.load(f, model, (nrow, ncol), np.float32, - 'sse layer {}'.format(kk), ext_unit_dict) - ssv[k] = t - else: - # cr - if model.verbose: - sys.stdout.write( - ' loading swt dataset 10 for layer {}\n'.format(kk)) - t = Util2d.load(f, model, (nrow, ncol), np.float32, - 'cr layer {}'.format(kk), ext_unit_dict) - cr[k] = t - # cc - if model.verbose: - sys.stdout.write( - ' loading swt dataset 11 for layer {}\n'.format(kk)) - t = Util2d.load(f, model, (nrow, ncol), np.float32, - 'cc layer {}'.format(kk), ext_unit_dict) - cc[k] = t - # void - if model.verbose: - sys.stdout.write( - ' loading swt dataset 12 for layer {}\n'.format(kk)) - t = Util2d.load(f, model, (nrow, ncol), np.float32, - 'void layer {}'.format(kk), ext_unit_dict) - void[k] = t - # sub - if model.verbose: - sys.stdout.write( - ' loading swt dataset 13 for layer {}\n'.format(kk)) - t = Util2d.load(f, model, (nrow, ncol), np.float32, - 'sub layer {}'.format(kk), ext_unit_dict) - sub[k] = t - - # dataset 14 and 15 - if istpcs != 0: - pcsoff = [0] * nlay - pcs = None - else: - pcsoff = None - pcs = [0] * nlay - for k in range(nlay): - if istpcs != 0: - if model.verbose: - sys.stdout.write( - ' loading swt dataset 14 for layer {}\n'.format(kk)) - t = Util2d.load(f, model, (nrow, ncol), np.float32, - 'pcsoff layer {}'.format(k + 1), ext_unit_dict) - pcsoff[k] = t - else: - if model.verbose: - sys.stdout.write( - ' loading swt dataset 15 for layer {}\n'.format(kk)) - t = Util2d.load(f, model, (nrow, ncol), np.float32, - 'pcs layer {}'.format(k + 1), ext_unit_dict) - pcs[k] = t - - ids16 = None - ids17 = None - if iswtoc > 0: - # dataset 16 - if model.verbose: - sys.stdout.write( - ' loading swt dataset 15 for layer {}\n'.format(kk)) - ids16 = np.empty(26, dtype=np.int32) - ids16 = read1d(f, ids16) - #for k in range(1, 26, 2): - # model.add_pop_key_list(ids16[k]) - # ids16[k] = 2054 # all sub-wt data sent to unit 2054 - # dataset 17 - ids17 = [0] * iswtoc - for k in range(iswtoc): - if model.verbose: - msg = 2 * ' ' + 'loading swt dataset 17 for ' + \ - 'iswtoc {}\n'.format(k + 1) - sys.stdout.write(msg) - t = np.empty(30, dtype=np.int32) - t = read1d(f, t) - t[0:4] -= 1 - ids17[k] = t - - if openfile: - f.close() - - # determine specified unit number - unitnumber = None - filenames = [None for x in range(15)] - if ext_unit_dict is not None: - unitnumber, filenames[0] = \ - model.get_ext_dict_attr(ext_unit_dict, - filetype=ModflowSwt.ftype()) - if ipakcb > 0: - iu, filenames[1] = \ - model.get_ext_dict_attr(ext_unit_dict, unit=ipakcb) - - if iswtoc > 0: - ipos = 2 - for k in range(1, 26, 2): - unit = ids16[k] - if unit > 0: - iu, filenames[ipos] = \ - model.get_ext_dict_attr(ext_unit_dict, - unit=unit) - model.add_pop_key_list(unit) - ipos += 1 - - # create sub-wt instance - swt = ModflowSwt(model, ipakcb=ipakcb, iswtoc=iswtoc, nsystm=nsystm, - ithk=ithk, ivoid=ivoid, istpcs=istpcs, - icrcc=icrcc, lnwt=lnwt, izcfl=iizcfl, izcfm=izcfm, - iglfl=iglfl, iglfm=iglfm, iestfl=iestfl, - iestfm=iestfm, ipcsfl=ipcsfl, ipcsfm=ipcsfm, - istfl=istfl, istfm=istfm, gl0=gl0, sgm=sgm, - sgs=sgs, thick=thick, sse=sse, ssv=ssv, cr=cr, cc=cc, - void=void, sub=sub, pcsoff=pcsoff, - pcs=pcs, ids16=ids16, ids17=ids17, - unitnumber=unitnumber, filenames=filenames) - - # return sut-wt instance - return swt - - @staticmethod - def ftype(): - return 'SWT' - - @staticmethod - def defaultunit(): - return 35 +""" +mfswt module. Contains the ModflowSwt class. Note that the user can access +the ModflowSub class as `flopy.modflow.ModflowSwt`. + +Additional information for this MODFLOW package can be found at the `Online +MODFLOW Guide +`_. + +""" +import sys + +import numpy as np + +from ..pakbase import Package +from ..utils import Util2d, Util3d, read1d + + +class ModflowSwt(Package): + """ + MODFLOW SUB-WT Package Class. + + Parameters + ---------- + model : model object + The model object (of type :class:`flopy.modflow.mf.Modflow`) to which + this package will be added. + ipakcb : int + A flag that is used to determine if cell-by-cell budget data should be + saved. If ipakcb is non-zero cell-by-cell budget data will be saved. + (default is 0). + iswtoc : int + iswtoc is a flag used to control output of information generated by the + SUB Package. (default is 0). + nystm : int + nsystm is the number of systems of interbeds. (default is 1). + ithk : int + ithk is a flag to determine how thicknesses of compressible sediments + vary in response to changes in saturated thickness. If ithk < 1, + thickness of compressible sediments is constant. If ithk > 0, thickness + of compressible sediments varies in response to changes in saturated + thickness. (default is 1). + ivoid : int + ivoid is a flag to determine how void ratios of compressible sediments + vary in response to changes in saturated thickness. If ivoid < 1, void + ratio will be treated as a constant. If ivoid > 0, void ratio will be + treated as a variable. (default is 0). + nn : int + nn is the number of nodes used to discretize the half space to + approximate the head distributions in systems of delay interbeds. + (default is 20). + istpcs : int + istpcs is a flag to determine how initial preconsolidation stress will + be obtained. If istpcs does not equal 0, an array of offset values will + be read in for each model layer. The offset values will be added to the + initial effective stress to get initial preconsolidation stress. If + istpcs = 0, an array with initial preconsolidation stress values will + be read. (default is 1). + icrcc : int + icrcc is a flag to determine how recompression and compression indices + will be obtained. If ICRCC is not equal to 0, arrays of elastic + specific storage and inelastic skeletal specific storage will be read + for each system of interbeds; the recompression index and compression + index will not be read. If icrcc = 0, arrays of recompression index + and compression index will be read for each system of interbeds; + elastic skeletal specific storage and inelastic skeletal specific + storage will not be read. (default is 0). + lnwt : int or array of ints (nsystm) + lnwt is a one-dimensional array specifying the model layer assignments + for each system of interbeds. (default is 0). + izcfl : int + izcfl is a flag to specify whether or not initial calculated + values of layer-center elevation will be printed. (default is 0). + izcfm : int + izcfm is is a code for the format in which layer-center elevation will + be printed. (default is 0). + iglfl : int + iglfl is a flag to specify whether or not initial calculated values of + geostatic stress will be printed. (default is 0). + iglfm : int + iglfm is a code for the format in which geostatic stress will be + printed. (default is 0). + iestfl : int + iestfl is a flag to specify whether or not initial calculated values of + effective stress will be printed. (default is 0). + iestfm : int + iestfm is a code for the format in which effective stress will be + printed. (default is 0). + ipcsfl : int + ipcsfl is a flag to specify whether or not initial calculated values of + preconsolidation stress will be printed. (default is 0). + ipcsfm : int + ipcsfm is a code for the format in which preconsolidation stress will + be printed. (default is 0). + istfl : int + istfl is a flag to specify whether or not initial equivalent storage + properties will be printed for each system of interbeds. If icrcc is + not equal to 0, the + equivalent storage properties that can be printed are recompression and + compression indices (cr and cc), which are calculated from elastic and + inelastic skeletal specific storage (sske and sskv). If icrcc = 0, + equivalent storage properties that can be printed are elastic and + inelastic skeletal specific storage, which are calculated from the + recompression and compression indices. (default is 0). + istfm : int + istfm is a code for the format in which equivalent storage properties + will be printed. (default is 0). + gl0 : float or array of floats (nrow, ncol) + gl0 is an array specifying the geostatic stress above model layer 1. If + the top of model layer 1 is the land surface, enter values of zero for + this array. (default is 0.). + sgm : float or array of floats (nrow, ncol) + sgm is an array specifying the specific gravity of moist or unsaturated + sediments. (default is 1.7). + sgs : float or array of floats (nrow, ncol) + sgs is an array specifying the specific gravity of saturated sediments. + (default is 2.). + thick : float or array of floats (nsystm, nrow, ncol) + thick is an array specifying the thickness of compressible sediments. + (default is 1.). + sse : float or array of floats (nsystm, nrow, ncol) + sse is an array specifying the initial elastic skeletal specific + storage of compressible beds. sse is not used if icrcc = 0. + (default is 1.). + ssv : float or array of floats (nsystm, nrow, ncol) + ssv is an array specifying the initial inelastic skeletal specific + storage of compressible beds. ssv is not used if icrcc = 0. + (default is 1.). + cr : float or array of floats (nsystm, nrow, ncol) + cr is an array specifying the recompression index of compressible beds. + cr is not used if icrcc is not equal to 0. (default is 0.01). + cc : float or array of floats (nsystm, nrow, ncol) + cc is an array specifying the compression index of compressible beds + cc is not used if icrcc is not equal to 0. (default is 0.25). + void : float or array of floats (nsystm, nrow, ncol) + void is an array specifying the initial void ratio of compressible + beds. (default is 0.82). + sub : float or array of floats (nsystm, nrow, ncol) + sub is an array specifying the initial compaction in each system of + interbeds. Compaction values computed by the package are added to + values in this array so that printed or stored values of compaction and + land subsidence may include previous components. Values in this array + do not affect calculations of storage changes or resulting compaction. + For simulations in which output values will reflect compaction and + subsidence since the start of the simulation, enter zero values for all + elements of this array. (default is 0.). + pcsoff : float or array of floats (nlay, nrow, ncol) + pcsoff is an array specifying the offset from initial effective stress + to initial preconsolidation stress at the bottom of the model layer in + units of height of a column of water. pcsoff is not used if istpcs=0. + (default is 0.). + pcs : float or array of floats (nlay, nrow, ncol) + pcs is an array specifying the initial preconsolidation stress, in + units of height of a column of water, at the bottom of the model layer. + pcs is not used if istpcs is not equal to 0. (default is 0.). + ids16 : list or array of ints (26) + Format codes and unit numbers for swtsidence, compaction by model + layer, compaction by interbed system, vertical displacement, + preconsolidation stress, change in preconsolidation stress, geostatic + stress, change in geostatic stress, effective stress, void ration, + thickness of compressible sediments, and layer-center elevation will be + printed. If ids16 is None and iswtoc>0 then print code 0 will be used + for all data which is output to the binary swtsidence output file + (unit=1054). The 26 entries in ids16 correspond to ifm1, iun1, ifm2, + iun2, ifm3, iun3, ifm4, iun4, ifm5, iun5, ifm6, iun6, ifm7, iun7, ifm8, + iun8, ifm9, iun9, ifm10, iun11, ifm12, iun12, ifm13, and iun13 + variables. (default is None). + ids17 : list or array of ints (iswtoc, 30) + Stress period and time step range and print and save flags used to + control printing and saving of information generated by the SUB-WT + Package during program execution. Each row of ids17 corresponds to + isp1, isp2, its1, its2, ifl1, ifl2, ifl3, ifl4, ifl5, ifl6, ifl7, + ifl8, ifl9, ifl10, ifl11, ifl12, ifl13, ifl14, ifl15, ifl16, ifl17, + ifl18, ifl9, ifl20, ifl21, ifl22, ifl23, ifl24, ifl25, and ifl26 + variables for iswtoc entries. isp1, isp2, its1, and its2 are stress + period and time step ranges. ifl1 and ifl2 control subsidence printing + and saving. ifl3 and ifl4 control compaction by model layer printing + and saving. ifl5 and ifl6 control compaction by interbed system + printing and saving. ifl7 and ifl8 control vertical displacement + printing and saving. ifl9 and ifl10 control preconsolidation stress + printing and saving. ifl11 and ifl12 control change in preconsolidation + stress printing and saving. ifl13 and ifl14 control geostatic stress + printing and saving. ifl15 and ifl16 control change in geostatic stress + printing and saving. ifl17 and ifl18 control effective stress printing + and saving. ifl19 and ifl20 control change in effective stress printing + and saving. ifl21 and ifl22 control void ratio printing and saving. + ifl23 and ifl24 control compressible bed thickness printing and saving. + ifl25 and ifl26 control layer-center elevation printing and saving. + If ids17 is None and iswtoc>0 then all available subsidence output will + be printed and saved to the binary subsidence output file (unit=1054). + (default is None). + unitnumber : int + File unit number (default is None). + filenames : str or list of str + Filenames to use for the package and the output files. If + filenames=None the package name will be created using the model name + and package extension and the cbc output name and other swt output + files will be created using the model name and .cbc and swt output + extensions (for example, modflowtest.cbc), if ipakcbc and other + swt output files (dataset 16) are numbers greater than zero. + If a single string is passed the package name will be set to the + string and other swt output files will be set to the model name with + the appropriate output file extensions. To define the names for all + package files (input and output) the length of the list of strings + should be 15. + Default is None. + + Attributes + ---------- + + Methods + ------- + + See Also + -------- + + Notes + ----- + Parameters are supported in Flopy only when reading in existing models. + Parameter values are converted to native values in Flopy and the + connection to "parameters" is thus nonexistent. Parameters are not + supported in the SUB-WT Package. + + Examples + -------- + + >>> import flopy + >>> m = flopy.modflow.Modflow() + >>> swt = flopy.modflow.ModflowSwt(m) + + """ + + def write_file(self,f=None): + """ + Write the package file. + + Returns + ------- + None + + """ + nrow, ncol, nlay, nper = self.parent.nrow_ncol_nlay_nper + # Open file for writing + if f is None: + f = open(self.fn_path, 'w') + # First line: heading + f.write('{}\n'.format(self.heading)) + # write dataset 1 + f.write('{} {} {} {} {} {} {}\n'.format(self.ipakcb, self.iswtoc, + self.nsystm, self.ithk, + self.ivoid, self.istpcs, + self.icrcc)) + # write dataset 2 + t = self.lnwt.array + for tt in t: + f.write('{} '.format(tt + 1)) + f.write('\n') + + # write dataset 3 + f.write( + '{} {} {} {} {} {} {} {} {} {}\n'.format(self.izcfl, self.izcfm, + self.iglfl, self.iglfm, + self.iestfl, self.iestfm, + self.ipcsfl, self.ipcsfm, + self.istfl, self.istfm)) + + # write dataset 4 + f.write(self.gl0.get_file_entry()) + + # write dataset 5 + f.write(self.sgm.get_file_entry()) + + # write dataset 6 + f.write(self.sgs.get_file_entry()) + + # write datasets 7 to 13 + for k in range(self.nsystm): + f.write(self.thick[k].get_file_entry()) + if self.icrcc != 0: + f.write(self.sse[k].get_file_entry()) + f.write(self.ssv[k].get_file_entry()) + else: + f.write(self.cr[k].get_file_entry()) + f.write(self.cc[k].get_file_entry()) + f.write(self.void[k].get_file_entry()) + f.write(self.sub[k].get_file_entry()) + + # write datasets 14 and 15 + for k in range(nlay): + if self.istpcs != 0: + f.write(self.pcsoff[k].get_file_entry()) + else: + f.write(self.pcs[k].get_file_entry()) + + # write dataset 16 and 17 + if self.iswtoc > 0: + # dataset 16 + for i in self.ids16: + f.write('{} '.format(i)) + f.write(' #dataset 16\n') + + # dataset 17 + for k in range(self.iswtoc): + t = self.ids17[k, :].copy() + t[0:4] += 1 + for i in t: + f.write('{} '.format(i)) + f.write(' #dataset 17 iswtoc {}\n'.format(k + 1)) + + # close swt file + f.close() + + def __init__(self, model, ipakcb=None, iswtoc=0, nsystm=1, ithk=0, ivoid=0, + istpcs=1, icrcc=0, lnwt=0, izcfl=0, izcfm=0, iglfl=0, iglfm=0, + iestfl=0, iestfm=0, ipcsfl=0, ipcsfm=0, istfl=0, istfm=0, + gl0=0., sgm=1.7, sgs=2., thick=1., sse=1., ssv=1., + cr=0.01, cc=0.25, void=0.82, sub=0., pcsoff=0., pcs=0., + ids16=None, ids17=None, + extension='swt', unitnumber=None, filenames=None): + """ + Package constructor. + + """ + # set default unit number of one is not specified + if unitnumber is None: + unitnumber = ModflowSwt.defaultunit() + + # set filenames + if filenames is None: + filenames = [None for x in range(15)] + elif isinstance(filenames, str): + filenames = [filenames] + [None for x in range(14)] + elif isinstance(filenames, list): + if len(filenames) < 15: + n = 15 - len(filenames) + 1 + filenames = filenames + [None for x in range(n)] + + # update external file information with cbc output, if necessary + if ipakcb is not None: + fname = filenames[1] + model.add_output_file(ipakcb, fname=fname, + package=ModflowSwt.ftype()) + else: + ipakcb = 0 + + item16_extensions = ["swt_subsidence.hds", "swt_total_comp.hds", + "swt_inter_comp.hds", "swt_vert_disp.hds", + "swt_precon_stress.hds", + "swt_precon_stress_delta.hds", + "swt_geostatic_stress.hds", + "swt_geostatic_stress_delta.hds", + "swt_eff_stress.hds", "swt_eff_stress_delta.hds", + "swt_void_ratio.hds", "swt_thick.hds", + "swt_lay_center.hds"] + item16_units = [2052 + i for i in range(len(item16_extensions))] + + if iswtoc > 0: + idx = 0 + for k in range(1, 26, 2): + ext = item16_extensions[idx] + if ids16 is None: + iu = item16_units[idx] + else: + iu = ids16[k] + fname = filenames[idx + 2] + model.add_output_file(iu, fname=fname, extension=ext, + package=ModflowSwt.ftype()) + idx += 1 + + extensions = [extension] + name = [ModflowSwt.ftype()] + units = [unitnumber] + extra = [''] + + # set package name + fname = [filenames[0]] + + # Call ancestor's init to set self.parent, extension, name and + # unit number + Package.__init__(self, model, extension=extensions, name=name, + unit_number=units, extra=extra, filenames=fname) + + nrow, ncol, nlay, nper = self.parent.nrow_ncol_nlay_nper + + self.heading = '# {} package for '.format(self.name[0]) + \ + ' {}, '.format(model.version_types[model.version]) + \ + 'generated by Flopy.' + self.url = 'swt.htm' + + self.ipakcb = ipakcb + self.iswtoc = iswtoc + + self.nsystm = nsystm + self.ithk = ithk + self.ivoid = ivoid + self.istpcs = istpcs + self.icrcc = icrcc + + self.lnwt = Util2d(model, (nsystm,), np.int32, lnwt, name='lnwt') + + self.izcfl = izcfl + self.izcfm = izcfm + self.iglfl = iglfl + self.iglfm = iglfm + self.iestfl = iestfl + self.iestfm = iestfm + self.ipcsfl = ipcsfl + self.ipcsfm = ipcsfm + self.istfl = istfl + self.istfm = istfm + + self.gl0 = Util2d(model, (nrow, ncol), np.float32, gl0, name='gl0') + self.sgm = Util2d(model, (nrow, ncol), np.float32, sgm, name='sgm') + self.sgs = Util2d(model, (nrow, ncol), np.float32, sgs, name='sgs') + + # interbed data + names = ['thick system ' for n in range(nsystm)] + self.thick = Util3d(model, (nsystm, nrow, ncol), np.float32, thick, + name=names, + locat=self.unit_number[0]) + names = ['void system ' for n in range(nsystm)] + self.void = Util3d(model, (nsystm, nrow, ncol), np.float32, void, + name=names, + locat=self.unit_number[0]) + names = ['sub system ' for n in range(nsystm)] + self.sub = Util3d(model, (nsystm, nrow, ncol), np.float32, sub, + name=names, + locat=self.unit_number[0]) + if icrcc != 0: + names = ['sse system ' for n in range(nsystm)] + self.sse = Util3d(model, (nsystm, nrow, ncol), np.float32, sse, + name=names, + locat=self.unit_number[0]) + names = ['ssc system ' for n in range(nsystm)] + self.ssv = Util3d(model, (nsystm, nrow, ncol), np.float32, ssv, + name=names, + locat=self.unit_number[0]) + self.cr = None + self.cc = None + else: + self.sse = None + self.ssv = None + names = ['cr system ' for n in range(nsystm)] + self.cr = Util3d(model, (nsystm, nrow, ncol), np.float32, cr, + name=names, + locat=self.unit_number[0]) + names = ['cc system ' for n in range(nsystm)] + self.cc = Util3d(model, (nsystm, nrow, ncol), np.float32, cc, + name=names, + locat=self.unit_number[0]) + + # layer data + if istpcs != 0: + self.pcsoff = Util3d(model, (nlay, nrow, ncol), np.float32, pcsoff, + name='pcsoff', locat=self.unit_number[0]) + self.pcs = None + else: + self.pcsoff = None + self.pcs = Util3d(model, (nlay, nrow, ncol), np.float32, pcs, + name='pcs', locat=self.unit_number[0]) + + # output data + if iswtoc > 0: + if ids16 is None: + self.ids16 = np.zeros((26), dtype=np.int32) + ui = 0 + for i in range(1, 26, 2): + self.ids16[i] = item16_units[ui] + ui += 1 + else: + if isinstance(ids16, list): + ds16 = np.array(ids16) + assert len(ids16) == 26 + self.ids16 = ids16 + + if ids17 is None: + ids17 = np.ones((30), dtype=np.int32) + ids17[0] = 0 + ids17[2] = 0 + ids17[1] = 9999 + ids17[3] = 9999 + self.ids17 = np.atleast_2d(ids17) + else: + if isinstance(ids17, list): + ids17 = np.atleast_2d(np.array(ids17)) + assert ids17.shape[1] == 30 + self.ids17 = ids17 + + # add package to model + self.parent.add_package(self) + + @staticmethod + def load(f, model, ext_unit_dict=None): + """ + Load an existing package. + + Parameters + ---------- + f : filename or file handle + File to load. + model : model object + The model object (of type :class:`flopy.modflow.mf.Modflow`) to + which this package will be added. + ext_unit_dict : dictionary, optional + If the arrays in the file are specified using EXTERNAL, + or older style array control records, then `f` should be a file + handle. In this case ext_unit_dict is required, which can be + constructed using the function + :class:`flopy.utils.mfreadnam.parsenamefile`. + + Returns + ------- + swt : ModflowSwt object + + Examples + -------- + + >>> import flopy + >>> m = flopy.modflow.Modflow() + >>> swt = flopy.modflow.ModflowSwt.load('test.swt', m) + + """ + + if model.verbose: + sys.stdout.write('loading swt package file...\n') + + openfile = not hasattr(f, 'read') + if openfile: + filename = f + f = open(filename, 'r') + + # dataset 0 -- header + while True: + line = f.readline() + if line[0] != '#': + break + # determine problem dimensions + nrow, ncol, nlay, nper = model.get_nrow_ncol_nlay_nper() + + # read dataset 1 + if model.verbose: + sys.stdout.write(' loading swt dataset 1\n') + t = line.strip().split() + ipakcb, iswtoc, nsystm, ithk, ivoid, istpcs, icrcc = int(t[0]), \ + int(t[1]), \ + int(t[2]), \ + int(t[3]), \ + int(t[4]), \ + int(t[5]), \ + int(t[6]) + + # if ipakcb > 0: + # ipakcb = 53 + + # read dataset 2 + lnwt = None + if nsystm > 0: + if model.verbose: + sys.stdout.write(' loading swt dataset 2\n') + lnwt = np.empty((nsystm), dtype=np.int32) + lnwt = read1d(f, lnwt) - 1 + + # read dataset 3 + if model.verbose: + sys.stdout.write(' loading swt dataset 3\n') + line = f.readline() + t = line.strip().split() + iizcfl, izcfm, iglfl, iglfm, iestfl, \ + iestfm, ipcsfl, ipcsfm, istfl, istfm = int(t[0]), int(t[1]), \ + int(t[2]), int(t[3]), \ + int(t[4]), int(t[5]), \ + int(t[6]), int(t[7]), \ + int(t[8]), int(t[9]) + + # read dataset 4 + if model.verbose: + sys.stdout.write(' loading swt dataset 4') + gl0 = Util2d.load(f, model, (nrow, ncol), np.float32, 'gl0', + ext_unit_dict) + + # read dataset 5 + if model.verbose: + sys.stdout.write(' loading swt dataset 5') + sgm = Util2d.load(f, model, (nrow, ncol), np.float32, 'sgm', + ext_unit_dict) + + # read dataset 6 + if model.verbose: + sys.stdout.write(' loading swt dataset 6') + sgs = Util2d.load(f, model, (nrow, ncol), np.float32, 'sgs', + ext_unit_dict) + + # read datasets 7 to 13 + thick = [0] * nsystm + void = [0] * nsystm + sub = [0] * nsystm + if icrcc == 0: + sse = None + ssv = None + cr = [0] * nsystm + cc = [0] * nsystm + else: + sse = [0] * nsystm + ssv = [0] * nsystm + cr = None + cc = None + + for k in range(nsystm): + kk = lnwt[k] + 1 + # thick + if model.verbose: + sys.stdout.write( + ' loading swt dataset 7 for layer {}\n'.format(kk)) + t = Util2d.load(f, model, (nrow, ncol), np.float32, + 'thick layer {}'.format(kk), + ext_unit_dict) + thick[k] = t + if icrcc != 0: + # sse + if model.verbose: + sys.stdout.write( + ' loading swt dataset 8 for layer {}\n'.format(kk)) + t = Util2d.load(f, model, (nrow, ncol), np.float32, + 'sse layer {}'.format(kk), ext_unit_dict) + sse[k] = t + # ssv + if model.verbose: + sys.stdout.write( + ' loading swt dataset 9 for layer {}\n'.format(kk)) + t = Util2d.load(f, model, (nrow, ncol), np.float32, + 'sse layer {}'.format(kk), ext_unit_dict) + ssv[k] = t + else: + # cr + if model.verbose: + sys.stdout.write( + ' loading swt dataset 10 for layer {}\n'.format(kk)) + t = Util2d.load(f, model, (nrow, ncol), np.float32, + 'cr layer {}'.format(kk), ext_unit_dict) + cr[k] = t + # cc + if model.verbose: + sys.stdout.write( + ' loading swt dataset 11 for layer {}\n'.format(kk)) + t = Util2d.load(f, model, (nrow, ncol), np.float32, + 'cc layer {}'.format(kk), ext_unit_dict) + cc[k] = t + # void + if model.verbose: + sys.stdout.write( + ' loading swt dataset 12 for layer {}\n'.format(kk)) + t = Util2d.load(f, model, (nrow, ncol), np.float32, + 'void layer {}'.format(kk), ext_unit_dict) + void[k] = t + # sub + if model.verbose: + sys.stdout.write( + ' loading swt dataset 13 for layer {}\n'.format(kk)) + t = Util2d.load(f, model, (nrow, ncol), np.float32, + 'sub layer {}'.format(kk), ext_unit_dict) + sub[k] = t + + # dataset 14 and 15 + if istpcs != 0: + pcsoff = [0] * nlay + pcs = None + else: + pcsoff = None + pcs = [0] * nlay + for k in range(nlay): + if istpcs != 0: + if model.verbose: + sys.stdout.write( + ' loading swt dataset 14 for layer {}\n'.format(kk)) + t = Util2d.load(f, model, (nrow, ncol), np.float32, + 'pcsoff layer {}'.format(k + 1), ext_unit_dict) + pcsoff[k] = t + else: + if model.verbose: + sys.stdout.write( + ' loading swt dataset 15 for layer {}\n'.format(kk)) + t = Util2d.load(f, model, (nrow, ncol), np.float32, + 'pcs layer {}'.format(k + 1), ext_unit_dict) + pcs[k] = t + + ids16 = None + ids17 = None + if iswtoc > 0: + # dataset 16 + if model.verbose: + sys.stdout.write( + ' loading swt dataset 15 for layer {}\n'.format(kk)) + ids16 = np.empty(26, dtype=np.int32) + ids16 = read1d(f, ids16) + #for k in range(1, 26, 2): + # model.add_pop_key_list(ids16[k]) + # ids16[k] = 2054 # all sub-wt data sent to unit 2054 + # dataset 17 + ids17 = [0] * iswtoc + for k in range(iswtoc): + if model.verbose: + msg = 2 * ' ' + 'loading swt dataset 17 for ' + \ + 'iswtoc {}\n'.format(k + 1) + sys.stdout.write(msg) + t = np.empty(30, dtype=np.int32) + t = read1d(f, t) + t[0:4] -= 1 + ids17[k] = t + + if openfile: + f.close() + + # determine specified unit number + unitnumber = None + filenames = [None for x in range(15)] + if ext_unit_dict is not None: + unitnumber, filenames[0] = \ + model.get_ext_dict_attr(ext_unit_dict, + filetype=ModflowSwt.ftype()) + if ipakcb > 0: + iu, filenames[1] = \ + model.get_ext_dict_attr(ext_unit_dict, unit=ipakcb) + + if iswtoc > 0: + ipos = 2 + for k in range(1, 26, 2): + unit = ids16[k] + if unit > 0: + iu, filenames[ipos] = \ + model.get_ext_dict_attr(ext_unit_dict, + unit=unit) + model.add_pop_key_list(unit) + ipos += 1 + + # create sub-wt instance + swt = ModflowSwt(model, ipakcb=ipakcb, iswtoc=iswtoc, nsystm=nsystm, + ithk=ithk, ivoid=ivoid, istpcs=istpcs, + icrcc=icrcc, lnwt=lnwt, izcfl=iizcfl, izcfm=izcfm, + iglfl=iglfl, iglfm=iglfm, iestfl=iestfl, + iestfm=iestfm, ipcsfl=ipcsfl, ipcsfm=ipcsfm, + istfl=istfl, istfm=istfm, gl0=gl0, sgm=sgm, + sgs=sgs, thick=thick, sse=sse, ssv=ssv, cr=cr, cc=cc, + void=void, sub=sub, pcsoff=pcsoff, + pcs=pcs, ids16=ids16, ids17=ids17, + unitnumber=unitnumber, filenames=filenames) + + # return sut-wt instance + return swt + + @staticmethod + def ftype(): + return 'SWT' + + @staticmethod + def defaultunit(): + return 35 diff --git a/flopy/modflow/mfupw.py b/flopy/modflow/mfupw.py index 23f2446012..3681cba8a3 100644 --- a/flopy/modflow/mfupw.py +++ b/flopy/modflow/mfupw.py @@ -1,531 +1,531 @@ -""" -mfupw module. Contains the ModflowUpw class. Note that the user can access -the ModflowUpw class as `flopy.modflow.ModflowUpw`. - -Additional information for this MODFLOW package can be found at the `Online -MODFLOW Guide -`_. - -""" - -import sys -import numpy as np -from .mfpar import ModflowPar as mfpar -from ..pakbase import Package -from ..utils import Util2d, Util3d, read1d -from ..utils.flopy_io import line_parse - - -class ModflowUpw(Package): - """ - Upstream weighting package class - - - Parameters - ---------- - model : model object - The model object (of type :class:`flopy.modflow.mf.Modflow`) to which - this package will be added. - ipakcb : int - A flag that is used to determine if cell-by-cell budget data should be - saved. If ipakcb is non-zero cell-by-cell budget data will be saved. - (default is 0). - hdry : float - Is the head that is assigned to cells that are converted to dry during - a simulation. Although this value plays no role in the model - calculations, it is useful as an indicator when looking at the - resulting heads that are output from the model. HDRY is thus similar - to HNOFLO in the Basic Package, which is the value assigned to cells - that are no-flow cells at the start of a model simulation. (default - is -1.e30). - iphdry : int - iphdry is a flag that indicates whether groundwater head will be set - to hdry when the groundwater head is less than 0.0001 above the cell - bottom (units defined by lenuni in the discretization package). If - iphdry=0, then head will not be set to hdry. If iphdry>0, then head - will be set to hdry. If the head solution from one simulation will be - used as starting heads for a subsequent simulation, or if the - Observation Process is used (Harbaugh and others, 2000), then hdry - should not be printed to the output file for dry cells (that is, the - upw package input variable should be set as iphdry=0). (default is 0) - noparcheck : bool - noparcheck turns off the checking that a value is defined for all - cells when parameters are used to define layer data. - laytyp : int or array of ints (nlay) - Layer type (default is 0). - layavg : int or array of ints (nlay) - Layer average (default is 0). - 0 is harmonic mean - 1 is logarithmic mean - 2 is arithmetic mean of saturated thickness and logarithmic mean of - of hydraulic conductivity - chani : float or array of floats (nlay) - contains a value for each layer that is a flag or the horizontal - anisotropy. If CHANI is less than or equal to 0, then variable HANI - defines horizontal anisotropy. If CHANI is greater than 0, then CHANI - is the horizontal anisotropy for the entire layer, and HANI is not - read. If any HANI parameters are used, CHANI for all layers must be - less than or equal to 0. Use as many records as needed to enter a - value of CHANI for each layer. The horizontal anisotropy is the ratio - of the hydraulic conductivity along columns (the Y direction) to the - hydraulic conductivity along rows (the X direction). - layvka : int or array of ints (nlay) - a flag for each layer that indicates whether variable VKA is vertical - hydraulic conductivity or the ratio of horizontal to vertical - hydraulic conductivity. - laywet : int or array of ints (nlay) - contains a flag for each layer that indicates if wetting is active. - laywet should always be zero for the UPW Package because all cells - initially active are wettable. - hk : float or array of floats (nlay, nrow, ncol) - is the hydraulic conductivity along rows. HK is multiplied by - horizontal anisotropy (see CHANI and HANI) to obtain hydraulic - conductivity along columns. (default is 1.0). - hani : float or array of floats (nlay, nrow, ncol) - is the ratio of hydraulic conductivity along columns to hydraulic - conductivity along rows, where HK of item 10 specifies the hydraulic - conductivity along rows. Thus, the hydraulic conductivity along - columns is the product of the values in HK and HANI. - (default is 1.0). - vka : float or array of floats (nlay, nrow, ncol) - is either vertical hydraulic conductivity or the ratio of horizontal - to vertical hydraulic conductivity depending on the value of LAYVKA. - (default is 1.0). - ss : float or array of floats (nlay, nrow, ncol) - is specific storage unless the STORAGECOEFFICIENT option is used. - When STORAGECOEFFICIENT is used, Ss is confined storage coefficient. - (default is 1.e-5). - sy : float or array of floats (nlay, nrow, ncol) - is specific yield. (default is 0.15). - vkcb : float or array of floats (nlay, nrow, ncol) - is the vertical hydraulic conductivity of a Quasi-three-dimensional - confining bed below a layer. (default is 0.0). - extension : string - Filename extension (default is 'upw') - unitnumber : int - File unit number (default is None). - filenames : str or list of str - Filenames to use for the package and the output files. If - filenames=None the package name will be created using the model name - and package extension and the cbc output name will be created using - the model name and .cbc extension (for example, modflowtest.cbc), - if ipakcbc is a number greater than zero. If a single string is passed - the package will be set to the string and cbc output name will be - created using the model name and .cbc extension, if ipakcbc is a - number greater than zero. To define the names for all package files - (input and output) the length of the list of strings should be 2. - Default is None. - - - Attributes - ---------- - - Methods - ------- - - See Also - -------- - - Notes - ----- - - Examples - -------- - - >>> import flopy - >>> m = flopy.modflow.Modflow() - >>> lpf = flopy.modflow.ModflowLpf(m) - - """ - - def __init__(self, model, laytyp=0, layavg=0, chani=1.0, layvka=0, - laywet=0, ipakcb=None, hdry=-1E+30, iphdry=0, - hk=1.0, hani=1.0, vka=1.0, ss=1e-5, sy=0.15, vkcb=0.0, - noparcheck=False, - extension='upw', unitnumber=None, filenames=None): - - if model.version != 'mfnwt': - err = 'Error: model version must be mfnwt to use ' + \ - '{} package'.format(ModflowUpw.ftype()) - raise Exception(err) - - # set default unit number of one is not specified - if unitnumber is None: - unitnumber = ModflowUpw.defaultunit() - - # set filenames - if filenames is None: - filenames = [None, None] - elif isinstance(filenames, str): - filenames = [filenames, None] - elif isinstance(filenames, list): - if len(filenames) < 2: - filenames.append(None) - - # update external file information with cbc output, if necessary - if ipakcb is not None: - fname = filenames[1] - model.add_output_file(ipakcb, fname=fname, - package=ModflowUpw.ftype()) - else: - ipakcb = 0 - - # Fill namefile items - name = [ModflowUpw.ftype()] - units = [unitnumber] - extra = [''] - - # set package name - fname = [filenames[0]] - - # Call ancestor's init to set self.parent, extension, name and - # unit number - Package.__init__(self, model, extension=extension, name=name, - unit_number=units, extra=extra, filenames=fname) - - self.heading = '# {} package for '.format(self.name[0]) + \ - ' {}, '.format(model.version_types[model.version]) + \ - 'generated by Flopy.' - self.url = 'upw_upstream_weighting_package.htm' - - nrow, ncol, nlay, nper = self.parent.nrow_ncol_nlay_nper - # item 1 - self.ipakcb = ipakcb - # Head in cells that are converted to dry during a simulation - self.hdry = hdry - # number of UPW parameters - self.npupw = 0 - self.iphdry = iphdry - self.laytyp = Util2d(model, (nlay,), np.int32, laytyp, name='laytyp') - self.layavg = Util2d(model, (nlay,), np.int32, layavg, name='layavg') - self.chani = Util2d(model, (nlay,), np.float32, chani, name='chani') - self.layvka = Util2d(model, (nlay,), np.int32, layvka, name='vka') - self.laywet = Util2d(model, (nlay,), np.int32, laywet, name='laywet') - - self.options = ' ' - if noparcheck: self.options = self.options + 'NOPARCHECK ' - - self.hk = Util3d(model, (nlay, nrow, ncol), np.float32, hk, name='hk', - locat=self.unit_number[0]) - self.hani = Util3d(model, (nlay, nrow, ncol), np.float32, hani, - name='hani', locat=self.unit_number[0]) - keys = [] - for k in range(nlay): - key = 'vka' - if self.layvka[k] != 0: - key = 'vani' - keys.append(key) - self.vka = Util3d(model, (nlay, nrow, ncol), np.float32, vka, - name=keys, locat=self.unit_number[0]) - self.ss = Util3d(model, (nlay, nrow, ncol), np.float32, ss, name='ss', - locat=self.unit_number[0]) - self.sy = Util3d(model, (nlay, nrow, ncol), np.float32, sy, name='sy', - locat=self.unit_number[0]) - self.vkcb = Util3d(model, (nlay, nrow, ncol), np.float32, vkcb, - name='vkcb', locat=self.unit_number[0]) - self.parent.add_package(self) - - def write_file(self, check=True, f=None): - """ - Write the package file. - - Parameters - ---------- - check : boolean - Check package data for common errors. (default True) - - Returns - ------- - None - - """ - # allows turning off package checks when writing files at model level - if check: - self.check(f='{}.chk'.format(self.name[0]), - verbose=self.parent.verbose, level=1) - nrow, ncol, nlay, nper = self.parent.nrow_ncol_nlay_nper - if f is not None: - f_upw = f - else: - f_upw = open(self.fn_path, 'w') - # Item 0: text - f_upw.write('{}\n'.format(self.heading)) - # Item 1: IBCFCB, HDRY, NPLPF - f_upw.write('{0:10d}{1:10.3G}{2:10d}{3:10d}{4:s}\n' - .format(self.ipakcb, - self.hdry, - self.npupw, - self.iphdry, - self.options)) - # LAYTYP array - f_upw.write(self.laytyp.string) - # LAYAVG array - f_upw.write(self.layavg.string) - # CHANI array - f_upw.write(self.chani.string) - # LAYVKA array - f_upw.write(self.layvka.string) - # LAYWET array - f_upw.write(self.laywet.string) - # Item 7: WETFCT, IWETIT, IHDWET - iwetdry = self.laywet.sum() - if iwetdry > 0: - raise Exception('LAYWET should be 0 for UPW') - transient = not self.parent.get_package('DIS').steady.all() - for k in range(nlay): - f_upw.write(self.hk[k].get_file_entry()) - if self.chani[k] < 1: - f_upw.write(self.hani[k].get_file_entry()) - f_upw.write(self.vka[k].get_file_entry()) - if transient: - f_upw.write(self.ss[k].get_file_entry()) - if self.laytyp[k] != 0: - f_upw.write(self.sy[k].get_file_entry()) - if self.parent.get_package('DIS').laycbd[k] > 0: - f_upw.write(self.vkcb[k].get_file_entry()) - if (self.laywet[k] != 0 and self.laytyp[k] != 0): - f_upw.write(self.laywet[k].get_file_entry()) - f_upw.close() - - @staticmethod - def load(f, model, ext_unit_dict=None, check=True): - """ - Load an existing package. - - Parameters - ---------- - f : filename or file handle - File to load. - model : model object - The model object (of type :class:`flopy.modflow.mf.Modflow`) to - which this package will be added. - ext_unit_dict : dictionary, optional - If the arrays in the file are specified using EXTERNAL, - or older style array control records, then `f` should be a file - handle. In this case ext_unit_dict is required, which can be - constructed using the function - :class:`flopy.utils.mfreadnam.parsenamefile`. - check : boolean - Check package data for common errors. (default True) - - Returns - ------- - dis : ModflowUPW object - ModflowLpf object. - - Examples - -------- - - >>> import flopy - >>> m = flopy.modflow.Modflow() - >>> upw = flopy.modflow.ModflowUpw.load('test.upw', m) - - """ - - if model.verbose: - sys.stdout.write('loading upw package file...\n') - - if model.version != 'mfnwt': - msg = "Warning: model version was reset from " + \ - "'{}' to 'mfnwt' in order to load a UPW file".format( - model.version) - print(msg) - model.version = 'mfnwt' - - openfile = not hasattr(f, 'read') - if openfile: - filename = f - f = open(filename, 'r') - - # dataset 0 -- header - while True: - line = f.readline() - if line[0] != '#': - break - # determine problem dimensions - nrow, ncol, nlay, nper = model.get_nrow_ncol_nlay_nper() - # Item 1: IBCFCB, HDRY, NPLPF - line already read above - if model.verbose: - print(' loading ipakcb, HDRY, NPUPW, IPHDRY...') - t = line_parse(line) - ipakcb, hdry, npupw, iphdry = int(t[0]), \ - float(t[1]), \ - int(t[2]), \ - int(t[3]) - - # options - noparcheck = False - if len(t) > 3: - for k in range(3, len(t)): - if 'NOPARCHECK' in t[k].upper(): - noparcheck = True - - # LAYTYP array - if model.verbose: - print(' loading LAYTYP...') - laytyp = np.empty((nlay,), dtype=np.int32) - laytyp = read1d(f, laytyp) - - # LAYAVG array - if model.verbose: - print(' loading LAYAVG...') - layavg = np.empty((nlay,), dtype=np.int32) - layavg = read1d(f, layavg) - - # CHANI array - if model.verbose: - print(' loading CHANI...') - chani = np.empty((nlay,), dtype=np.float32) - chani = read1d(f, chani) - - # LAYVKA array - if model.verbose: - print(' loading LAYVKA...') - layvka = np.empty((nlay,), dtype=np.int32) - layvka = read1d(f, layvka) - - # LAYWET array - if model.verbose: - print(' loading LAYWET...') - laywet = np.empty((nlay,), dtype=np.int32) - laywet = read1d(f, laywet) - - # check that LAYWET is 0 for all layers - iwetdry = laywet.sum() - if iwetdry > 0: - raise Exception('LAYWET should be 0 for UPW') - - # get parameters - par_types = [] - if npupw > 0: - par_types, parm_dict = mfpar.load(f, npupw, model.verbose) - - # get arrays - transient = not model.get_package('DIS').steady.all() - hk = [0] * nlay - hani = [0] * nlay - vka = [0] * nlay - ss = [0] * nlay - sy = [0] * nlay - vkcb = [0] * nlay - # load by layer - for k in range(nlay): - - # hk - if model.verbose: - print(' loading hk layer {0:3d}...'.format(k + 1)) - if 'hk' not in par_types: - t = Util2d.load(f, model, (nrow, ncol), np.float32, 'hk', - ext_unit_dict) - else: - line = f.readline() - t = mfpar.parameter_fill(model, (nrow, ncol), 'hk', parm_dict, - findlayer=k) - hk[k] = t - - # hani - if chani[k] < 1: - if model.verbose: - print(' loading hani layer {0:3d}...'.format(k + 1)) - if 'hani' not in par_types: - t = Util2d.load(f, model, (nrow, ncol), np.float32, 'hani', - ext_unit_dict) - else: - line = f.readline() - t = mfpar.parameter_fill(model, (nrow, ncol), 'hani', - parm_dict, findlayer=k) - hani[k] = t - - # vka - if model.verbose: - print(' loading vka layer {0:3d}...'.format(k + 1)) - key = 'vk' - if layvka[k] != 0: - key = 'vani' - if 'vk' not in par_types and 'vani' not in par_types: - t = Util2d.load(f, model, (nrow, ncol), np.float32, key, - ext_unit_dict) - else: - line = f.readline() - t = mfpar.parameter_fill(model, (nrow, ncol), key, parm_dict, - findlayer=k) - vka[k] = t - - # storage properties - if transient: - - # ss - if model.verbose: - print(' loading ss layer {0:3d}...'.format(k + 1)) - if 'ss' not in par_types: - t = Util2d.load(f, model, (nrow, ncol), np.float32, 'ss', - ext_unit_dict) - else: - line = f.readline() - t = mfpar.parameter_fill(model, (nrow, ncol), 'ss', - parm_dict, findlayer=k) - ss[k] = t - - # sy - if laytyp[k] != 0: - if model.verbose: - print(' loading sy layer {0:3d}...'.format(k + 1)) - if 'sy' not in par_types: - t = Util2d.load(f, model, (nrow, ncol), np.float32, - 'sy', - ext_unit_dict) - else: - line = f.readline() - t = mfpar.parameter_fill(model, (nrow, ncol), 'sy', - parm_dict, findlayer=k) - sy[k] = t - - # vkcb - if model.get_package('DIS').laycbd[k] > 0: - if model.verbose: - print(' loading vkcb layer {0:3d}...'.format(k + 1)) - if 'vkcb' not in par_types: - t = Util2d.load(f, model, (nrow, ncol), np.float32, 'vkcb', - ext_unit_dict) - else: - line = f.readline() - t = mfpar.parameter_fill(model, (nrow, ncol), 'vkcb', - parm_dict, findlayer=k) - vkcb[k] = t - - if openfile: - f.close() - - # determine specified unit number - unitnumber = None - filenames = [None, None] - if ext_unit_dict is not None: - unitnumber, filenames[0] = \ - model.get_ext_dict_attr(ext_unit_dict, - filetype=ModflowUpw.ftype()) - if ipakcb > 0: - iu, filenames[1] = \ - model.get_ext_dict_attr(ext_unit_dict, unit=ipakcb) - model.add_pop_key_list(ipakcb) - - # create upw object - upw = ModflowUpw(model, ipakcb=ipakcb, iphdry=iphdry, hdry=hdry, - noparcheck=noparcheck, - laytyp=laytyp, layavg=layavg, chani=chani, - layvka=layvka, laywet=laywet, - hk=hk, hani=hani, vka=vka, ss=ss, sy=sy, vkcb=vkcb, - unitnumber=unitnumber, filenames=filenames) - if check: - upw.check(f='{}.chk'.format(upw.name[0]), - verbose=upw.parent.verbose, level=0) - - # return upw object - return upw - - @staticmethod - def ftype(): - return 'UPW' - - @staticmethod - def defaultunit(): - return 31 +""" +mfupw module. Contains the ModflowUpw class. Note that the user can access +the ModflowUpw class as `flopy.modflow.ModflowUpw`. + +Additional information for this MODFLOW package can be found at the `Online +MODFLOW Guide +`_. + +""" + +import sys +import numpy as np +from .mfpar import ModflowPar as mfpar +from ..pakbase import Package +from ..utils import Util2d, Util3d, read1d +from ..utils.flopy_io import line_parse + + +class ModflowUpw(Package): + """ + Upstream weighting package class + + + Parameters + ---------- + model : model object + The model object (of type :class:`flopy.modflow.mf.Modflow`) to which + this package will be added. + ipakcb : int + A flag that is used to determine if cell-by-cell budget data should be + saved. If ipakcb is non-zero cell-by-cell budget data will be saved. + (default is 0). + hdry : float + Is the head that is assigned to cells that are converted to dry during + a simulation. Although this value plays no role in the model + calculations, it is useful as an indicator when looking at the + resulting heads that are output from the model. HDRY is thus similar + to HNOFLO in the Basic Package, which is the value assigned to cells + that are no-flow cells at the start of a model simulation. (default + is -1.e30). + iphdry : int + iphdry is a flag that indicates whether groundwater head will be set + to hdry when the groundwater head is less than 0.0001 above the cell + bottom (units defined by lenuni in the discretization package). If + iphdry=0, then head will not be set to hdry. If iphdry>0, then head + will be set to hdry. If the head solution from one simulation will be + used as starting heads for a subsequent simulation, or if the + Observation Process is used (Harbaugh and others, 2000), then hdry + should not be printed to the output file for dry cells (that is, the + upw package input variable should be set as iphdry=0). (default is 0) + noparcheck : bool + noparcheck turns off the checking that a value is defined for all + cells when parameters are used to define layer data. + laytyp : int or array of ints (nlay) + Layer type (default is 0). + layavg : int or array of ints (nlay) + Layer average (default is 0). + 0 is harmonic mean + 1 is logarithmic mean + 2 is arithmetic mean of saturated thickness and logarithmic mean of + of hydraulic conductivity + chani : float or array of floats (nlay) + contains a value for each layer that is a flag or the horizontal + anisotropy. If CHANI is less than or equal to 0, then variable HANI + defines horizontal anisotropy. If CHANI is greater than 0, then CHANI + is the horizontal anisotropy for the entire layer, and HANI is not + read. If any HANI parameters are used, CHANI for all layers must be + less than or equal to 0. Use as many records as needed to enter a + value of CHANI for each layer. The horizontal anisotropy is the ratio + of the hydraulic conductivity along columns (the Y direction) to the + hydraulic conductivity along rows (the X direction). + layvka : int or array of ints (nlay) + a flag for each layer that indicates whether variable VKA is vertical + hydraulic conductivity or the ratio of horizontal to vertical + hydraulic conductivity. + laywet : int or array of ints (nlay) + contains a flag for each layer that indicates if wetting is active. + laywet should always be zero for the UPW Package because all cells + initially active are wettable. + hk : float or array of floats (nlay, nrow, ncol) + is the hydraulic conductivity along rows. HK is multiplied by + horizontal anisotropy (see CHANI and HANI) to obtain hydraulic + conductivity along columns. (default is 1.0). + hani : float or array of floats (nlay, nrow, ncol) + is the ratio of hydraulic conductivity along columns to hydraulic + conductivity along rows, where HK of item 10 specifies the hydraulic + conductivity along rows. Thus, the hydraulic conductivity along + columns is the product of the values in HK and HANI. + (default is 1.0). + vka : float or array of floats (nlay, nrow, ncol) + is either vertical hydraulic conductivity or the ratio of horizontal + to vertical hydraulic conductivity depending on the value of LAYVKA. + (default is 1.0). + ss : float or array of floats (nlay, nrow, ncol) + is specific storage unless the STORAGECOEFFICIENT option is used. + When STORAGECOEFFICIENT is used, Ss is confined storage coefficient. + (default is 1.e-5). + sy : float or array of floats (nlay, nrow, ncol) + is specific yield. (default is 0.15). + vkcb : float or array of floats (nlay, nrow, ncol) + is the vertical hydraulic conductivity of a Quasi-three-dimensional + confining bed below a layer. (default is 0.0). + extension : string + Filename extension (default is 'upw') + unitnumber : int + File unit number (default is None). + filenames : str or list of str + Filenames to use for the package and the output files. If + filenames=None the package name will be created using the model name + and package extension and the cbc output name will be created using + the model name and .cbc extension (for example, modflowtest.cbc), + if ipakcbc is a number greater than zero. If a single string is passed + the package will be set to the string and cbc output name will be + created using the model name and .cbc extension, if ipakcbc is a + number greater than zero. To define the names for all package files + (input and output) the length of the list of strings should be 2. + Default is None. + + + Attributes + ---------- + + Methods + ------- + + See Also + -------- + + Notes + ----- + + Examples + -------- + + >>> import flopy + >>> m = flopy.modflow.Modflow() + >>> lpf = flopy.modflow.ModflowLpf(m) + + """ + + def __init__(self, model, laytyp=0, layavg=0, chani=1.0, layvka=0, + laywet=0, ipakcb=None, hdry=-1E+30, iphdry=0, + hk=1.0, hani=1.0, vka=1.0, ss=1e-5, sy=0.15, vkcb=0.0, + noparcheck=False, + extension='upw', unitnumber=None, filenames=None): + + if model.version != 'mfnwt': + err = 'Error: model version must be mfnwt to use ' + \ + '{} package'.format(ModflowUpw.ftype()) + raise Exception(err) + + # set default unit number of one is not specified + if unitnumber is None: + unitnumber = ModflowUpw.defaultunit() + + # set filenames + if filenames is None: + filenames = [None, None] + elif isinstance(filenames, str): + filenames = [filenames, None] + elif isinstance(filenames, list): + if len(filenames) < 2: + filenames.append(None) + + # update external file information with cbc output, if necessary + if ipakcb is not None: + fname = filenames[1] + model.add_output_file(ipakcb, fname=fname, + package=ModflowUpw.ftype()) + else: + ipakcb = 0 + + # Fill namefile items + name = [ModflowUpw.ftype()] + units = [unitnumber] + extra = [''] + + # set package name + fname = [filenames[0]] + + # Call ancestor's init to set self.parent, extension, name and + # unit number + Package.__init__(self, model, extension=extension, name=name, + unit_number=units, extra=extra, filenames=fname) + + self.heading = '# {} package for '.format(self.name[0]) + \ + ' {}, '.format(model.version_types[model.version]) + \ + 'generated by Flopy.' + self.url = 'upw_upstream_weighting_package.htm' + + nrow, ncol, nlay, nper = self.parent.nrow_ncol_nlay_nper + # item 1 + self.ipakcb = ipakcb + # Head in cells that are converted to dry during a simulation + self.hdry = hdry + # number of UPW parameters + self.npupw = 0 + self.iphdry = iphdry + self.laytyp = Util2d(model, (nlay,), np.int32, laytyp, name='laytyp') + self.layavg = Util2d(model, (nlay,), np.int32, layavg, name='layavg') + self.chani = Util2d(model, (nlay,), np.float32, chani, name='chani') + self.layvka = Util2d(model, (nlay,), np.int32, layvka, name='vka') + self.laywet = Util2d(model, (nlay,), np.int32, laywet, name='laywet') + + self.options = ' ' + if noparcheck: self.options = self.options + 'NOPARCHECK ' + + self.hk = Util3d(model, (nlay, nrow, ncol), np.float32, hk, name='hk', + locat=self.unit_number[0]) + self.hani = Util3d(model, (nlay, nrow, ncol), np.float32, hani, + name='hani', locat=self.unit_number[0]) + keys = [] + for k in range(nlay): + key = 'vka' + if self.layvka[k] != 0: + key = 'vani' + keys.append(key) + self.vka = Util3d(model, (nlay, nrow, ncol), np.float32, vka, + name=keys, locat=self.unit_number[0]) + self.ss = Util3d(model, (nlay, nrow, ncol), np.float32, ss, name='ss', + locat=self.unit_number[0]) + self.sy = Util3d(model, (nlay, nrow, ncol), np.float32, sy, name='sy', + locat=self.unit_number[0]) + self.vkcb = Util3d(model, (nlay, nrow, ncol), np.float32, vkcb, + name='vkcb', locat=self.unit_number[0]) + self.parent.add_package(self) + + def write_file(self, check=True, f=None): + """ + Write the package file. + + Parameters + ---------- + check : boolean + Check package data for common errors. (default True) + + Returns + ------- + None + + """ + # allows turning off package checks when writing files at model level + if check: + self.check(f='{}.chk'.format(self.name[0]), + verbose=self.parent.verbose, level=1) + nrow, ncol, nlay, nper = self.parent.nrow_ncol_nlay_nper + if f is not None: + f_upw = f + else: + f_upw = open(self.fn_path, 'w') + # Item 0: text + f_upw.write('{}\n'.format(self.heading)) + # Item 1: IBCFCB, HDRY, NPLPF + f_upw.write('{0:10d}{1:10.3G}{2:10d}{3:10d}{4:s}\n' + .format(self.ipakcb, + self.hdry, + self.npupw, + self.iphdry, + self.options)) + # LAYTYP array + f_upw.write(self.laytyp.string) + # LAYAVG array + f_upw.write(self.layavg.string) + # CHANI array + f_upw.write(self.chani.string) + # LAYVKA array + f_upw.write(self.layvka.string) + # LAYWET array + f_upw.write(self.laywet.string) + # Item 7: WETFCT, IWETIT, IHDWET + iwetdry = self.laywet.sum() + if iwetdry > 0: + raise Exception('LAYWET should be 0 for UPW') + transient = not self.parent.get_package('DIS').steady.all() + for k in range(nlay): + f_upw.write(self.hk[k].get_file_entry()) + if self.chani[k] < 1: + f_upw.write(self.hani[k].get_file_entry()) + f_upw.write(self.vka[k].get_file_entry()) + if transient: + f_upw.write(self.ss[k].get_file_entry()) + if self.laytyp[k] != 0: + f_upw.write(self.sy[k].get_file_entry()) + if self.parent.get_package('DIS').laycbd[k] > 0: + f_upw.write(self.vkcb[k].get_file_entry()) + if (self.laywet[k] != 0 and self.laytyp[k] != 0): + f_upw.write(self.laywet[k].get_file_entry()) + f_upw.close() + + @staticmethod + def load(f, model, ext_unit_dict=None, check=True): + """ + Load an existing package. + + Parameters + ---------- + f : filename or file handle + File to load. + model : model object + The model object (of type :class:`flopy.modflow.mf.Modflow`) to + which this package will be added. + ext_unit_dict : dictionary, optional + If the arrays in the file are specified using EXTERNAL, + or older style array control records, then `f` should be a file + handle. In this case ext_unit_dict is required, which can be + constructed using the function + :class:`flopy.utils.mfreadnam.parsenamefile`. + check : boolean + Check package data for common errors. (default True) + + Returns + ------- + dis : ModflowUPW object + ModflowLpf object. + + Examples + -------- + + >>> import flopy + >>> m = flopy.modflow.Modflow() + >>> upw = flopy.modflow.ModflowUpw.load('test.upw', m) + + """ + + if model.verbose: + sys.stdout.write('loading upw package file...\n') + + if model.version != 'mfnwt': + msg = "Warning: model version was reset from " + \ + "'{}' to 'mfnwt' in order to load a UPW file".format( + model.version) + print(msg) + model.version = 'mfnwt' + + openfile = not hasattr(f, 'read') + if openfile: + filename = f + f = open(filename, 'r') + + # dataset 0 -- header + while True: + line = f.readline() + if line[0] != '#': + break + # determine problem dimensions + nrow, ncol, nlay, nper = model.get_nrow_ncol_nlay_nper() + # Item 1: IBCFCB, HDRY, NPLPF - line already read above + if model.verbose: + print(' loading ipakcb, HDRY, NPUPW, IPHDRY...') + t = line_parse(line) + ipakcb, hdry, npupw, iphdry = int(t[0]), \ + float(t[1]), \ + int(t[2]), \ + int(t[3]) + + # options + noparcheck = False + if len(t) > 3: + for k in range(3, len(t)): + if 'NOPARCHECK' in t[k].upper(): + noparcheck = True + + # LAYTYP array + if model.verbose: + print(' loading LAYTYP...') + laytyp = np.empty((nlay,), dtype=np.int32) + laytyp = read1d(f, laytyp) + + # LAYAVG array + if model.verbose: + print(' loading LAYAVG...') + layavg = np.empty((nlay,), dtype=np.int32) + layavg = read1d(f, layavg) + + # CHANI array + if model.verbose: + print(' loading CHANI...') + chani = np.empty((nlay,), dtype=np.float32) + chani = read1d(f, chani) + + # LAYVKA array + if model.verbose: + print(' loading LAYVKA...') + layvka = np.empty((nlay,), dtype=np.int32) + layvka = read1d(f, layvka) + + # LAYWET array + if model.verbose: + print(' loading LAYWET...') + laywet = np.empty((nlay,), dtype=np.int32) + laywet = read1d(f, laywet) + + # check that LAYWET is 0 for all layers + iwetdry = laywet.sum() + if iwetdry > 0: + raise Exception('LAYWET should be 0 for UPW') + + # get parameters + par_types = [] + if npupw > 0: + par_types, parm_dict = mfpar.load(f, npupw, model.verbose) + + # get arrays + transient = not model.get_package('DIS').steady.all() + hk = [0] * nlay + hani = [0] * nlay + vka = [0] * nlay + ss = [0] * nlay + sy = [0] * nlay + vkcb = [0] * nlay + # load by layer + for k in range(nlay): + + # hk + if model.verbose: + print(' loading hk layer {0:3d}...'.format(k + 1)) + if 'hk' not in par_types: + t = Util2d.load(f, model, (nrow, ncol), np.float32, 'hk', + ext_unit_dict) + else: + line = f.readline() + t = mfpar.parameter_fill(model, (nrow, ncol), 'hk', parm_dict, + findlayer=k) + hk[k] = t + + # hani + if chani[k] < 1: + if model.verbose: + print(' loading hani layer {0:3d}...'.format(k + 1)) + if 'hani' not in par_types: + t = Util2d.load(f, model, (nrow, ncol), np.float32, 'hani', + ext_unit_dict) + else: + line = f.readline() + t = mfpar.parameter_fill(model, (nrow, ncol), 'hani', + parm_dict, findlayer=k) + hani[k] = t + + # vka + if model.verbose: + print(' loading vka layer {0:3d}...'.format(k + 1)) + key = 'vk' + if layvka[k] != 0: + key = 'vani' + if 'vk' not in par_types and 'vani' not in par_types: + t = Util2d.load(f, model, (nrow, ncol), np.float32, key, + ext_unit_dict) + else: + line = f.readline() + t = mfpar.parameter_fill(model, (nrow, ncol), key, parm_dict, + findlayer=k) + vka[k] = t + + # storage properties + if transient: + + # ss + if model.verbose: + print(' loading ss layer {0:3d}...'.format(k + 1)) + if 'ss' not in par_types: + t = Util2d.load(f, model, (nrow, ncol), np.float32, 'ss', + ext_unit_dict) + else: + line = f.readline() + t = mfpar.parameter_fill(model, (nrow, ncol), 'ss', + parm_dict, findlayer=k) + ss[k] = t + + # sy + if laytyp[k] != 0: + if model.verbose: + print(' loading sy layer {0:3d}...'.format(k + 1)) + if 'sy' not in par_types: + t = Util2d.load(f, model, (nrow, ncol), np.float32, + 'sy', + ext_unit_dict) + else: + line = f.readline() + t = mfpar.parameter_fill(model, (nrow, ncol), 'sy', + parm_dict, findlayer=k) + sy[k] = t + + # vkcb + if model.get_package('DIS').laycbd[k] > 0: + if model.verbose: + print(' loading vkcb layer {0:3d}...'.format(k + 1)) + if 'vkcb' not in par_types: + t = Util2d.load(f, model, (nrow, ncol), np.float32, 'vkcb', + ext_unit_dict) + else: + line = f.readline() + t = mfpar.parameter_fill(model, (nrow, ncol), 'vkcb', + parm_dict, findlayer=k) + vkcb[k] = t + + if openfile: + f.close() + + # determine specified unit number + unitnumber = None + filenames = [None, None] + if ext_unit_dict is not None: + unitnumber, filenames[0] = \ + model.get_ext_dict_attr(ext_unit_dict, + filetype=ModflowUpw.ftype()) + if ipakcb > 0: + iu, filenames[1] = \ + model.get_ext_dict_attr(ext_unit_dict, unit=ipakcb) + model.add_pop_key_list(ipakcb) + + # create upw object + upw = ModflowUpw(model, ipakcb=ipakcb, iphdry=iphdry, hdry=hdry, + noparcheck=noparcheck, + laytyp=laytyp, layavg=layavg, chani=chani, + layvka=layvka, laywet=laywet, + hk=hk, hani=hani, vka=vka, ss=ss, sy=sy, vkcb=vkcb, + unitnumber=unitnumber, filenames=filenames) + if check: + upw.check(f='{}.chk'.format(upw.name[0]), + verbose=upw.parent.verbose, level=0) + + # return upw object + return upw + + @staticmethod + def ftype(): + return 'UPW' + + @staticmethod + def defaultunit(): + return 31 diff --git a/flopy/modflow/mfuzf1.py b/flopy/modflow/mfuzf1.py index f206db3deb..39c76a767a 100644 --- a/flopy/modflow/mfuzf1.py +++ b/flopy/modflow/mfuzf1.py @@ -1,1007 +1,1007 @@ -""" -mfuzf1 module. Contains the ModflowUzf1 class. Note that the user can access -the ModflowUzf1 class as `flopy.modflow.ModflowUzf1`. - -Additional information for this MODFLOW package can be found at the `Online -MODFLOW Guide -`_. - -""" - -import sys -import numpy as np -from ..utils.flopy_io import pop_item, line_parse -from ..pakbase import Package -from ..utils import Util2d, Transient2d -from ..utils.optionblock import OptionBlock -from collections import OrderedDict -import warnings - - -class ModflowUzf1(Package): - """ - MODFLOW Unsaturated Zone Flow 1 Boundary Package Class. - - Parameters - ---------- - model : model object - The model object (of type :class:`flopy.modflow.mf.Modflow`) to which - this package will be added. - nuztop : integer - used to define which cell in a vertical column that recharge and - discharge is simulated. (default is 1) - - 1 Recharge to and discharge from only the top model layer. This - option assumes land surface is defined as top of layer 1. - 2 Recharge to and discharge from the specified layer in variable - IUZFBND. This option assumes land surface is defined as top of - layer specified in IUZFBND. - 3 Recharge to and discharge from the highest active cell in each - vertical column. Land surface is determined as top of layer - specified in IUZFBND. A constant head node intercepts any recharge - and prevents deeper percolation. - - iuzfopt : integer - equal to 1 or 2. A value of 1 indicates that the vertical hydraulic - conductivity will be specified within the UZF1 Package input file using - array VKS. A value of 2 indicates that the vertical hydraulic - conductivity will be specified within either the BCF or LPF Package - input file. (default is 0) - irunflg : integer - specifies whether ground water that discharges to land surface will - be routed to stream segments or lakes as specified in the IRUNBND - array (IRUNFLG not equal to zero) or if ground-water discharge is - removed from the model simulation and accounted for in the - ground-water budget as a loss of water (IRUNFLG=0). The - Streamflow-Routing (SFR2) and(or) the Lake (LAK3) Packages must be - active if IRUNFLG is not zero. (default is 0) - ietflg : integer - specifies whether or not evapotranspiration (ET) will be simulated. - ET will not be simulated if IETFLG is zero, otherwise it will be - simulated. (default is 0) - ipakcb : integer - flag for writing ground-water recharge, ET, and ground-water - discharge to land surface rates to a separate unformatted file using - subroutine UBUDSV. If ipakcb>0, it is the unit number to which the - cell-by-cell rates will be written when 'SAVE BUDGET' or a non-zero - value for ICBCFL is specified in Output Control. If ipakcb less than - or equal to 0, cell-by-cell rates will not be written to a file. - (default is 57) - iuzfcb2 : integer - flag for writing ground-water recharge, ET, and ground-water - discharge to land surface rates to a separate unformatted file using - module UBDSV3. If IUZFCB2>0, it is the unit number to which - cell-by-cell rates will be written when 'SAVE BUDGET' or a non-zero - value for ICBCFL is specified in Output Control. If IUZFCB2 less than - or equal to 0, cell-by-cell rates will not be written to file. - (default is 0) - ntrail2 : integer - equal to the number of trailing waves used to define the - water-content profile following a decrease in the infiltration rate. - The number of trailing waves varies depending on the problem, but a - range between 10 and 20 is usually adequate. More trailing waves may - decrease mass-balance error and will increase computational - requirements and memory usage. (default is 10) - nsets : integer - equal to the number of wave sets used to simulate multiple - infiltration periods. The number of wave sets should be set to 20 for - most problems involving time varying infiltration. The total number of - waves allowed within an unsaturated zone cell is equal to - NTRAIL2*NSETS2. An error will occur if the number of waves in a cell - exceeds this value. (default is 20) - surfdep : float - The average height of undulations, D (Figure 1 in UZF documentation), - in the land surface altitude. (default is 1.0) - iuzfbnd : integer - used to define the aerial extent of the active model in which recharge - and discharge will be simulated. (default is 1) - irunbnd : integer - used to define the stream segments within the Streamflow-Routing - (SFR2) Package or lake numbers in the Lake (LAK3) Package that - overland runoff from excess infiltration and ground-water - discharge to land surface will be added. A positive integer value - identifies the stream segment and a negative integer value identifies - the lake number. (default is 0) - vks : float - used to define the saturated vertical hydraulic conductivity of the - unsaturated zone (LT-1). (default is 1.0E-6) - eps : float - values for each model cell used to define the Brooks-Corey epsilon of - the unsaturated zone. Epsilon is used in the relation of water - content to hydraulic conductivity (Brooks and Corey, 1966). - (default is 3.5) - thts : float - used to define the saturated water content of the unsaturated zone in - units of volume of water to total volume (L3L-3). (default is 0.35) - thtr : float - used to define the residual water content for each vertical column of - cells in units of volume of water to total volume (L3L-3). THTR is - the irreducible water content and the unsaturated water content - cannot drain to water contents less than THTR. This variable is not - included unless the key word SPECIFYTHTR is specified. (default is - 0.15) - thti : float - used to define the initial water content for each vertical column of - cells in units of volume of water at start of simulation to total - volume (L3L-3). THTI should not be specified for steady-state - simulations. (default is 0.20) - row_col_iftunit_iuzopt : list - used to specify where information will be printed for each time step. - row and col are zero-based. IUZOPT specifies what that information - will be. (default is []) - IUZOPT is - - 1 Prints time, ground-water head, and thickness of unsaturated zone, - and cumulative volumes of infiltration, recharge, storage, change - in storage and ground-water discharge to land surface. - 2 Same as option 1 except rates of infiltration, recharge, change in - storage, and ground-water discharge also are printed. - 3 Prints time, ground-water head, thickness of unsaturated zone, - followed by a series of depths and water contents in the - unsaturated zone. - - nwt_11_fmt : boolean - flag indicating whether or not to utilize a newer (MODFLOW-NWT - version 1.1 or later) format style, i.e., uzf1 optional variables - appear line-by-line rather than in a specific order on a single - line. True means that optional variables (e.g., SPECIFYTHTR, - SPECIFYTHTI, NOSURFLEAK) appear on new lines. True also supports - a number of newer optional variables (e.g., SPECIFYSURFK, - REJECTSURFK, SEEPSURFK). False means that optional variables - appear on one line. (default is False) - specifythtr : boolean - key word for specifying optional input variable THTR (default is 0) - specifythti : boolean - key word for specifying optional input variable THTI. (default is 0) - nosurfleak : boolean - key word for inactivating calculation of surface leakage. - (default is 0) - specifysurfk : boolean - (MODFLOW-NWT version 1.1 and MODFLOW-2005 1.12 or later) - An optional character variable. When SPECIFYSURFK is specified, - the variable SURFK is specified in Data Set 4b. - rejectsurfk : boolean - (MODFLOW-NWT version 1.1 and MODFLOW-2005 1.12 or later) - An optional character variable. When REJECTSURFK is specified, - SURFK instead of VKS is used for calculating rejected infiltration. - REJECTSURFK only is included if SPECIFYSURFK is included. - seepsurfk : boolean - (MODFLOW-NWT version 1.1 and MODFLOW-2005 1.12 or later) - An optional character variable. When SEEPSURFK is specified, - SURFK instead of VKS is used for calculating surface leakage. - SEEPSURFK only is included if SPECIFYSURFK is included. - etsquare : float (smoothfact) - (MODFLOW-NWT version 1.1 and MODFLOW-2005 1.12 or later) - An optional character variable. When ETSQUARE is specified, - groundwater ET is simulated using a constant potential ET rate, - and is smoothed over a specified smoothing interval. - This option is recommended only when using the NWT solver. - - etsquare is activated in flopy by specifying a real value - for smoothfact (default is None). - For example, if the interval factor (smoothfact) - is specified as smoothfact=0.1 (recommended), - then the smoothing interval will be calculated as: - SMOOTHINT = 0.1*EXTDP and is applied over the range for groundwater - head (h): - * h < CELTOP-EXTDP, ET is zero; - * CELTOP-EXTDP < h < CELTOP-EXTDP+SMOOTHINT, ET is smoothed; - CELTOP-EXTDP+SMOOTHINT < h, ET is equal to potential ET. - uzgage : dict of lists or list of lists - Dataset 8 in UZF Package documentation. Each entry in the dict - is keyed by iftunit. - Dict of lists: If iftunit is negative, the list is empty. - If iftunit is positive, the list includes [IUZROW, IUZCOL, IUZOPT] - List of lists: - Lists follow the format described in the documentation: - [[IUZROW, IUZCOL, IFTUNIT, IUZOPT]] or [[-IFTUNIT]] - netflux : list of [Unitrech (int), Unitdis (int)] - (MODFLOW-NWT version 1.1 and MODFLOW-2005 1.12 or later) - An optional character variable. When NETFLUX is specified, - the sum of recharge (L3/T) and the sum of discharge (L3/T) is written - to separate unformatted files using module UBDSV3. - - netflux is activated in flopy by specifying a list for - Unitrech and Unitdis (default is None). - Unitrech and Unitdis are the unit numbers to which these values - are written when “SAVE BUDGET” is specified in Output Control. - Values written to Unitrech are the sum of recharge values - for the UZF, SFR2, and LAK packages, and values written to Unitdis - are the sum of discharge values for the UZF, SFR2, and LAK packages. - Values are averaged over the period between output times. - - [NETFLUX unitrech unitdis] - finf : float, 2-D array, or dict of {kper:value} - where kper is the zero-based stress period - to assign a value to. Value should be cast-able to Util2d instance - can be a scalar, list, or ndarray is the array value is constant in - time. - Used to define the infiltration rates (LT-1) at land surface for each - vertical column of cells. If FINF is specified as being greater than - the vertical hydraulic conductivity then FINF is set equal to the - vertical unsaturated hydraulic conductivity. Excess water is routed - to streams or lakes when IRUNFLG is not zero, and if SFR2 or LAK3 is - active. (default is 1.0E-8) - pet : float, 2-D array, or dict of {kper:value} - where kper is the zero-based stress period - to assign a value to. Value should be cast-able to Util2d instance - can be a scalar, list, or ndarray is the array value is constant in - time. - Used to define the ET demand rates (L1T-1) within the ET extinction - depth interval for each vertical column of cells. (default is 5.0E-8) - extdp : float, 2-D array, or dict of {kper:value} - where kper is the zero-based stress period - to assign a value to. Value should be cast-able to Util2d instance - can be a scalar, list, or ndarray is the array value is constant in - time. - Used to define the ET extinction depths. The quantity of ET removed - from a cell is limited by the volume of water stored in the - unsaturated zone above the extinction depth. If ground water is - within the ET extinction depth, then the rate removed is based - on a linear decrease in the maximum rate at land surface and zero at - the ET extinction depth. The linear decrease is the same method used - in the Evapotranspiration Package (McDonald and Harbaugh, 1988, chap. - 10). (default is 15.0) - extwc : float, 2-D array, or dict of {kper:value} - where kper is the zero-based stress period - to assign a value to. Value should be cast-able to Util2d instance - can be a scalar, list, or ndarray is the array value is constant in - time. - Used to define the extinction water content below which ET cannot be - removed from the unsaturated zone. EXTWC must have a value between - (THTS-Sy) and THTS, where Sy is the specific yield specified in - either the LPF or BCF Package. (default is 0.1) - uzfbud_ext : list - appears to be used for sequential naming of budget output files - (default is []) - extension : string - Filename extension (default is 'uzf') - unitnumber : int - File unit number (default is None). - filenames : str or list of str - Filenames to use for the package and the output files. If - filenames=None the package name will be created using the model name - and package extension and the cbc output, uzf output, and uzf - observation names will be created using the model name and .cbc, - uzfcb2.bin, and .uzf#.out extensions (for example, modflowtest.cbc, - and modflowtest.uzfcd2.bin), if ipakcbc, iuzfcb2, and len(uzgag) are - numbers greater than zero. For uzf observations the file extension is - created using the uzf observation file unit number (for example, for - uzf observations written to unit 123 the file extension would be - .uzf123.out). If a single string is passed the package name will be - set to the string and other uzf output files will be set to the model - name with the appropriate output file extensions. To define the names - for all package files (input and output) the length of the list of - strings should be 3 + len(uzgag). Default is None. - surfk : float - An optional array of positive real values used to define the hydraulic - conductivity (LT-1). SURFK is used for calculating the rejected - infiltration and/or surface leakage. IF SURFK is set greater than - VKS then it is set equal to VKS. Only used if SEEPSURFK is True. - - Attributes - ---------- - nuzgag : integer (deprecated - counter is set based on length of uzgage) - equal to the number of cells (one per vertical column) that will be - specified for printing detailed information on the unsaturated zone - water budget and water content. A gage also may be used to print - the budget summed over all model cells. (default is None) - - Methods - ------- - - See Also - -------- - - Notes - ----- - Parameters are not supported in FloPy. - - Examples - -------- - - >>> import flopy - >>> ml = flopy.modflow.Modflow() - >>> uzf = flopy.modflow.ModflowUzf1(ml, ...) - - """ - _options = OrderedDict([('specifythtr', - OptionBlock.simple_flag), - ('specifythti', - OptionBlock.simple_flag), - ('nosurfleak', - OptionBlock.simple_flag), - ('specifysurfk', - OptionBlock.simple_flag), - ('rejectsurfk', - OptionBlock.simple_flag), - ("seepsurfk", - OptionBlock.simple_flag), - ("etsquare", - {OptionBlock.dtype: np.bool_, - OptionBlock.nested: True, - OptionBlock.n_nested: 1, - OptionBlock.vars: - {"smoothfact": - OptionBlock.simple_float}}), - ("netflux", - {OptionBlock.dtype: np.bool_, - OptionBlock.nested: True, - OptionBlock.n_nested: 2, - OptionBlock.vars: - OrderedDict([("unitrech", - OptionBlock.simple_int), - ("unitdis", - OptionBlock.simple_int)])}), - ("savefinf", OptionBlock.simple_flag)]) - - def __init__(self, model, - nuztop=1, iuzfopt=0, irunflg=0, ietflg=0, ipakcb=None, - iuzfcb2=None, ntrail2=10, nsets=20, - surfdep=1.0, - iuzfbnd=1, irunbnd=0, vks=1.0E-6, eps=3.5, thts=0.35, - thtr=0.15, thti=0.20, - specifythtr=False, specifythti=False, nosurfleak=False, - finf=1.0E-8, pet=5.0E-8, extdp=15.0, extwc=0.1, - nwt_11_fmt=False, - specifysurfk=False, rejectsurfk=False, seepsurfk=False, - etsquare=None, netflux=None, nuzgag=None, - uzgag=None, extension='uzf', unitnumber=None, - filenames=None, options=None, surfk=0.1): - - # set default unit number of one is not specified - if unitnumber is None: - unitnumber = ModflowUzf1.defaultunit() - - # set filenames - nlen = 3 - if uzgag is not None: - nlen += len(uzgag) - if filenames is None: - filenames = [None for x in range(nlen)] - elif isinstance(filenames, str): - filenames = [filenames] + [None for x in range(nlen)] - elif isinstance(filenames, list): - if len(filenames) < nlen: - for idx in range(len(filenames), nlen + 1): - filenames.append(None) - - # update external file information with cbc output, if necessary - if ipakcb is not None: - fname = filenames[1] - model.add_output_file(abs(ipakcb), fname=fname, - package=ModflowUzf1.ftype()) - else: - ipakcb = 0 - - if iuzfcb2 is not None: - fname = filenames[2] - model.add_output_file(abs(iuzfcb2), fname=fname, - extension='uzfcb2.bin', - package=ModflowUzf1.ftype()) - else: - iuzfcb2 = 0 - - ipos = 3 - if uzgag is not None: - # convert to dict - if isinstance(uzgag, list): - d = {} - for l in uzgag: - if len(l) > 1: - d[l[2]] = [l[0], l[1], l[3]] - else: - d[-np.abs(l[0])] = [] - uzgag = d - for key, value in uzgag.items(): - fname = filenames[ipos] - iu = abs(key) - uzgagext = 'uzf{}.out'.format(iu) - model.add_output_file(iu, fname=fname, - binflag=False, - extension=uzgagext, - package=ModflowUzf1.ftype()) - ipos += 1 - # handle case where iftunit is listed in the values - # (otherwise, iftunit will be written instead of iuzopt) - if len(value) == 4: - uzgag[key] = value[:2] + value[-1:] - elif len(value) == 1: - uzgag[-np.abs(key)] = [] - - # Fill namefile items - name = [ModflowUzf1.ftype()] - units = [unitnumber] - extra = [''] - - # set package name - fname = [filenames[0]] - - # Call ancestor's init to set self.parent, extension, name and - # unit number - Package.__init__(self, model, extension=extension, name=name, - unit_number=units, extra=extra, filenames=fname) - - if self.parent.get_package('RCH') != None or \ - self.parent.get_package('EVT') != None: - msg = 'WARNING!\n The RCH and EVT packages should not be ' + \ - 'active when the UZF1 package is active!' - print(msg) - if self.parent.version == 'mf2000': - msg = 'WARNING!\nThe UZF1 package is only compatible ' + \ - 'with MODFLOW-2005 and MODFLOW-NWT!' - print(msg) - nrow, ncol, nlay, nper = self.parent.nrow_ncol_nlay_nper - - self.heading = '# {} package for '.format(self.name[0]) + \ - ' {}, '.format(model.version_types[model.version]) + \ - 'generated by Flopy.' - self.url = 'uzf_unsaturated_zone_flow_pack.htm' - - # Data Set 1a - if nwt_11_fmt: - warnings.warn("nwt_11_fmt has been deprecated," - " and will be removed in the next release" - " please provide a flopy.utils.OptionBlock object" - " to the options argument", DeprecationWarning) - self.nwt_11_fmt = nwt_11_fmt - self.specifythtr = bool(specifythtr) - self.specifythti = bool(specifythti) - self.nosurfleak = bool(nosurfleak) - self.specifysurfk = bool(specifysurfk) - self.rejectsurfk = bool(rejectsurfk) - self.seepsurfk = bool(seepsurfk) - self.etsquare = False - self.smoothfact = None - if etsquare is not None: - try: - float(etsquare) - except: - msg = 'etsquare must be specified by entering a real ' + \ - 'number for smoothfact.' - print(msg) - self.etsquare = True - self.smoothfact = etsquare - self.netflux = False - self.unitrech = None - self.unitdis = None - if netflux is not None: - e = 'netflux must be a length=2 sequence of unitrech, unitdis' - assert len(netflux) == 2, e - self.netflux = True - self.unitrech, self.unitdis = netflux - - if options is None: - if ( - specifythti, specifythtr, nosurfleak, specifysurfk, rejectsurfk, - seepsurfk, self.etsquare, self.netflux) != (False, False, False, - False, False, False, - False, False): - options = OptionBlock("", ModflowUzf1, block=False) - - self.options = options - - # Data Set 1b - # NUZTOP IUZFOPT IRUNFLG IETFLG ipakcb IUZFCB2 [NTRAIL2 NSETS2] NUZGAG SURFDEP - self.nuztop = nuztop - self.iuzfopt = iuzfopt - # The Streamflow-Routing (SFR2) and(or) the Lake (LAK3) Packages - # must be active if IRUNFLG is not zero. - self.irunflg = irunflg - self.ietflg = ietflg - self.ipakcb = ipakcb - self.iuzfcb2 = iuzfcb2 - if iuzfopt > 0: - self.ntrail2 = ntrail2 - self.nsets = nsets - self.surfdep = surfdep - - # Data Set 2 - # IUZFBND (NCOL, NROW) -- U2DINT - self.iuzfbnd = Util2d(model, (nrow, ncol), np.int32, iuzfbnd, - name='iuzfbnd') - - # If IRUNFLG > 0: Read item 3 - # Data Set 3 - # [IRUNBND (NCOL, NROW)] -- U2DINT - if irunflg > 0: - self.irunbnd = Util2d(model, (nrow, ncol), np.int32, irunbnd, - name='irunbnd') - - # IF the absolute value of IUZFOPT = 1: Read item 4. - # Data Set 4 - # [VKS (NCOL, NROW)] -- U2DREL - if abs(iuzfopt) in [0, 1]: - self.vks = Util2d(model, (nrow, ncol), np.float32, vks, name='vks') - - if seepsurfk or specifysurfk: - self.surfk = Util2d(model, (nrow, ncol), np.float32, surfk, - name='surfk') - - if iuzfopt > 0: - # Data Set 5 - # EPS (NCOL, NROW) -- U2DREL - self.eps = Util2d(model, (nrow, ncol), np.float32, eps, name='eps') - # Data Set 6a - # THTS (NCOL, NROW) -- U2DREL - self.thts = Util2d(model, (nrow, ncol), np.float32, thts, - name='thts') - # Data Set 6b - # THTS (NCOL, NROW) -- U2DREL - if self.specifythtr > 0: - self.thtr = Util2d(model, (nrow, ncol), np.float32, thtr, - name='thtr') - # Data Set 7 - # [THTI (NCOL, NROW)] -- U2DREL - self.thti = Util2d(model, (nrow, ncol), np.float32, thti, - name='thti') - - # Data Set 8 - # {IFTUNIT: [IUZROW, IUZCOL, IUZOPT]} - self._uzgag = uzgag - - # Dataset 9, 11, 13 and 15 will be written automatically in the - # write_file function - # Data Set 10 - # [FINF (NCOL, NROW)] – U2DREL - - self.finf = Transient2d(model, (nrow, ncol), np.float32, - finf, name='finf') - if ietflg > 0: - self.pet = Transient2d(model, (nrow, ncol), np.float32, - pet, name='pet') - self.extdp = Transient2d(model, (nrow, ncol), np.float32, - extdp, name='extdp') - self.extwc = Transient2d(model, (nrow, ncol), np.float32, - extwc, name='extwc') - self.parent.add_package(self) - - def __setattr__(self, key, value): - if key == "uzgag": - msg = 'Uzgag must be set by the constructor' + \ - 'modifying this attribute requires creating a ' + \ - 'new ModflowUzf1 instance' - print(msg) - else: - super(ModflowUzf1, self).__setattr__(key, value) - - @property - def nuzgag(self): - if self.uzgag is None: - return 0 - else: - return len(self.uzgag) - - @property - def uzgag(self): - return self._uzgag - - def _2list(self, arg): - # input as a 3D array - if isinstance(arg, np.ndarray) and len(arg.shape) == 3: - lst = [arg[per, :, :] for per in range(arg.shape[0])] - # input is not a 3D array, and not a list - # (could be numeric value or 2D array) - elif not isinstance(arg, list): - lst = [arg] - # input was already a list - else: - lst = arg - return lst - - def ncells(self): - # Returns the maximum number of cells that have recharge - # (developed for MT3DMS SSM package) - nrow, ncol, nlay, nper = self.parent.nrow_ncol_nlay_nper - return (nrow * ncol) - - def _write_1a(self, f_uzf): - - # the nwt_11_fmt code is slated for removal (deprecated!) - if not self.nwt_11_fmt: - specify_temp = '' - if self.specifythtr > 0: - specify_temp += 'SPECIFYTHTR ' - if self.specifythti > 0: - specify_temp += 'SPECIFYTHTI ' - if self.nosurfleak > 0: - specify_temp += 'NOSURFLEAK' - if (self.specifythtr + self.specifythti + self.nosurfleak) > 0: - f_uzf.write('{}\n'.format(specify_temp)) - del specify_temp - else: - txt = 'options\n' - for var in ['specifythtr', 'specifythti', 'nosurfleak', - 'specifysurfk', 'rejectsurfk', 'seepsurfk']: - value = self.__dict__[var] - if int(value) > 0: - txt += '{}\n'.format(var) - if self.etsquare: - txt += 'etsquare {}\n'.format(self.smoothfact) - if self.netflux: - txt += 'netflux {} {}\n'.format(self.unitrech, self.unitdis) - txt += 'end\n' - f_uzf.write(txt) - - def write_file(self, f=None): - """ - Write the package file. - - Returns - ------- - None - - """ - nrow, ncol, nlay, nper = self.parent.nrow_ncol_nlay_nper - # Open file for writing - if f is not None: - if isinstance(f, str): - f_uzf = open(f, "w") - else: - f_uzf = f - else: - f_uzf = open(self.fn_path, 'w') - f_uzf.write('{}\n'.format(self.heading)) - - # Dataset 1a - if isinstance(self.options, - OptionBlock) and self.parent.version == "mfnwt": - self.options.update_from_package(self) - self.options.write_options(f_uzf) - - else: - self._write_1a(f_uzf) - - # Dataset 1b - if self.iuzfopt > 0: - comment = ' #NUZTOP IUZFOPT IRUNFLG IETFLG ipakcb IUZFCB2 NTRAIL NSETS NUZGAGES' - f_uzf.write( - '{0:10d}{1:10d}{2:10d}{3:10d}{4:10d}{5:10d}{6:10d}{7:10d}{8:10d}{9:15.6E}{10:100s}\n'. \ - format(self.nuztop, self.iuzfopt, self.irunflg, - self.ietflg, - self.ipakcb, self.iuzfcb2, \ - self.ntrail2, self.nsets, self.nuzgag, self.surfdep, - comment)) - else: - comment = ' #NUZTOP IUZFOPT IRUNFLG IETFLG ipakcb IUZFCB2 NUZGAGES' - f_uzf.write( - '{0:10d}{1:10d}{2:10d}{3:10d}{4:10d}{5:10d}{6:10d}{7:15.6E}{8:100s}\n'. \ - format(self.nuztop, self.iuzfopt, self.irunflg, - self.ietflg, - self.ipakcb, self.iuzfcb2, \ - self.nuzgag, self.surfdep, comment)) - f_uzf.write(self.iuzfbnd.get_file_entry()) - if self.irunflg > 0: - f_uzf.write(self.irunbnd.get_file_entry()) - # IF the absolute value of IUZFOPT = 1: Read item 4. - # Data Set 4 - # [VKS (NCOL, NROW)] -- U2DREL - if abs(self.iuzfopt) in [0, 1]: - f_uzf.write(self.vks.get_file_entry()) - - # Dataset 4b modflow 2005 v. 1.12 and modflow-nwt v. 1.1 - if self.seepsurfk or self.specifysurfk: - f_uzf.write(self.surfk.get_file_entry()) - - if self.iuzfopt > 0: - # Data Set 5 - # EPS (NCOL, NROW) -- U2DREL - f_uzf.write(self.eps.get_file_entry()) - # Data Set 6a - # THTS (NCOL, NROW) -- U2DREL - f_uzf.write(self.thts.get_file_entry()) - # Data Set 6b - # THTR (NCOL, NROW) -- U2DREL - if self.specifythtr > 0.0: - f_uzf.write(self.thtr.get_file_entry()) - # Data Set 7 - # [THTI (NCOL, NROW)] -- U2DREL - if not self.parent.get_package('DIS').steady[ - 0] or self.specifythti > 0.0: - f_uzf.write(self.thti.get_file_entry()) - # If NUZGAG>0: Item 8 is repeated NUZGAG times - # Data Set 8 - # [IUZROW] [IUZCOL] IFTUNIT [IUZOPT] - if self.nuzgag > 0: - for iftunit, values in self.uzgag.items(): - if iftunit > 0: - values[0] += 1 - values[1] += 1 - comment = ' #IUZROW IUZCOL IFTUNIT IUZOPT' - values.insert(2, iftunit) - for v in values: - f_uzf.write('{:10d}'.format(v)) - f_uzf.write('{}\n'.format(comment)) - else: - comment = ' #IFTUNIT' - f_uzf.write('{:10d}'.format(iftunit)) - f_uzf.write('{}\n'.format(comment)) - - def write_transient(name): - invar, var = self.__dict__[name].get_kper_entry(n) - - comment = ' #{} for stress period '.format(name) + str(n + 1) - f_uzf.write('{0:10d}{1:20s}\n'.format(invar, comment)) - if (invar >= 0): - f_uzf.write(var) - - for n in range(nper): - write_transient('finf') - if self.ietflg > 0: - write_transient('pet') - write_transient('extdp') - if self.iuzfopt > 0: - write_transient('extwc') - f_uzf.close() - - @staticmethod - def load(f, model, ext_unit_dict=None, check=False): - """ - Load an existing package. - - Parameters - ---------- - f : filename or file handle - File to load. - model : model object - The model object (of type :class:`flopy.modflow.mf.Modflow`) to - which this package will be added. - ext_unit_dict : dictionary, optional - If the arrays in the file are specified using EXTERNAL, - or older style array control records, then `f` should be a file - handle. In this case ext_unit_dict is required, which can be - constructed using the function - :class:`flopy.utils.mfreadnam.parsenamefile`. - - Returns - ------- - uzf : ModflowUZF1 object - ModflowUZF1 object. - - Examples - -------- - - >>> import flopy - >>> m = flopy.modflow.Modflow() - >>> uzf = flopy.modflow.ModflowUZF1.load('test.uzf', m) - - """ - if model.verbose: - sys.stdout.write('loading uzf package file...\n') - - openfile = not hasattr(f, 'read') - if openfile: - filename = f - f = open(filename, 'r') - - # dataset 0 -- header - while True: - line = f.readline() - if line[0] != '#': - break - # determine problem dimensions - nrow, ncol, nlay, nper = model.get_nrow_ncol_nlay_nper() - - # dataset 1a - specifythtr = False - specifythti = False - nosurfleak = False - specifysurfk = False - etsquare = None - netflux = None - rejectsurfk = False - seepsurfk = False - options = None - if model.version == 'mfnwt' and 'options' in line.lower(): - options = OptionBlock.load_options(f, ModflowUzf1) - line = f.readline() - - else: - query = ("specifythtr", "specifythti", "nosurfleak", - "specifysurfk", "rejectsurfk", "seepsurfk", - "etsquare", "netflux", "savefinf") - for i in query: - if i in line.lower(): - options = OptionBlock(line.lower().strip(), - ModflowUzf1, block=False) - line = f.readline() - break - - if options is not None: - specifythtr = options.specifythtr - specifythti = options.specifythti - nosurfleak = options.nosurfleak - rejectsurfk = options.rejectsurfk - seepsurfk = options.seepsurfk - specifysurfk = options.specifysurfk - - if options.etsquare: - etsquare = options.smoothfact - if options.netflux: - netflux = [options.unitrech, options.unitdis] - - # dataset 1b - nuztop, iuzfopt, irunflg, ietflg, ipakcb, iuzfcb2, \ - ntrail2, nsets2, nuzgag, surfdep = _parse1(line) - - arrays = {'finf': {}, - # datasets 10, 12, 14, 16 are lists of util2d arrays - 'pet': {}, - 'extdp': {}, - 'extwc': {}} - - def load_util2d(name, dtype, per=None): - print(' loading {} array...'.format(name)) - if per is not None: - arrays[name][per] = \ - Util2d.load(f, model, (nrow, ncol), dtype, name, - ext_unit_dict) - else: - arrays[name] = Util2d.load(f, model, (nrow, ncol), dtype, name, - ext_unit_dict) - - # dataset 2 - load_util2d('iuzfbnd', np.int32) - - # dataset 3 - if irunflg > 0: - load_util2d('irunbnd', np.int32) - - # dataset 4 - if iuzfopt in [0, 1]: - load_util2d('vks', np.float32) - - # dataset 4b - if seepsurfk or specifysurfk: - load_util2d('surfk', np.float32) - - if iuzfopt > 0: - # dataset 5 - load_util2d('eps', np.float32) - - # dataset 6 - load_util2d('thts', np.float32) - - if specifythtr: - # dataset 6b (residual water content) - load_util2d('thtr', np.float32) - - if specifythti or np.all(~model.dis.steady.array): - # dataset 7 (initial water content; - # only read if not steady-state) - load_util2d('thti', np.float32) - - # dataset 8 - uzgag = {} - if nuzgag > 0: - for i in range(nuzgag): - iuzrow, iuzcol, iftunit, iuzopt = _parse8(f.readline()) - tmp = [iuzrow, iuzcol] if iftunit > 0 else [] - tmp.append(iftunit) - if iuzopt > 0: - tmp.append(iuzopt) - uzgag[iftunit] = tmp - - # dataset 9 - for per in range(nper): - print('stress period {}:'.format(per + 1)) - line = line_parse(f.readline()) - nuzf1 = pop_item(line, int) - - # dataset 10 - if nuzf1 >= 0: - load_util2d('finf', np.float32, per=per) - - if ietflg > 0: - # dataset 11 - line = line_parse(f.readline()) - nuzf2 = pop_item(line, int) - if nuzf2 >= 0: - # dataset 12 - load_util2d('pet', np.float32, per=per) - # dataset 13 - line = line_parse(f.readline()) - nuzf3 = pop_item(line, int) - if nuzf3 >= 0: - # dataset 14 - load_util2d('extdp', np.float32, per=per) - # dataset 15 - line = line_parse(f.readline()) - nuzf4 = pop_item(line, int) - if nuzf4 >= 0: - # dataset 16 - load_util2d('extwc', np.float32, per=per) - - # close the file - f.close() - - # determine specified unit number - unitnumber = None - filenames = [None for x in range(3 + nuzgag)] - if ext_unit_dict is not None: - unitnumber, filenames[0] = \ - model.get_ext_dict_attr(ext_unit_dict, - filetype=ModflowUzf1.ftype()) - if abs(ipakcb) > 0: - iu, filenames[1] = \ - model.get_ext_dict_attr(ext_unit_dict, unit=abs(ipakcb)) - model.add_pop_key_list(ipakcb) - if abs(iuzfcb2) > 0: - iu, filenames[2] = \ - model.get_ext_dict_attr(ext_unit_dict, unit=abs(iuzfcb2)) - model.add_pop_key_list(abs(iuzfcb2)) - - ipos = 3 - if nuzgag > 0: - for key, value in uzgag.items(): - iu, filenames[ipos] = \ - model.get_ext_dict_attr(ext_unit_dict, - unit=abs(key)) - model.add_pop_key_list(abs(iu)) - ipos += 1 - - # create uzf object - return ModflowUzf1(model, - nuztop=nuztop, iuzfopt=iuzfopt, irunflg=irunflg, - ietflg=ietflg, - ipakcb=ipakcb, iuzfcb2=iuzfcb2, - ntrail2=ntrail2, nsets=nsets2, - surfdep=surfdep, uzgag=uzgag, - specifythtr=specifythtr, specifythti=specifythti, - nosurfleak=nosurfleak, etsquare=etsquare, - netflux=netflux, seepsurfk=seepsurfk, - specifysurfk=specifysurfk, - rejectsurfk=rejectsurfk, - unitnumber=unitnumber, - filenames=filenames, options=options, **arrays) - - @staticmethod - def ftype(): - return 'UZF' - - @staticmethod - def defaultunit(): - return 19 - - -def _parse1a(line): - line = line_parse(line) - line = [s.lower() if isinstance(s, str) else s for s in line] - specifythtr = True if 'specifythtr' in line else False - specifythti = True if 'specifythti' in line else False - nosurfleak = True if 'nosurfleak' in line else False - return specifythtr, specifythti, nosurfleak - - -def _parse1(line): - ntrail2 = None - nsets2 = None - line = line_parse(line) - nuztop = pop_item(line, int) - iuzfopt = pop_item(line, int) - irunflg = pop_item(line, int) - ietflag = pop_item(line, int) - ipakcb = pop_item(line, int) - iuzfcb2 = pop_item(line, int) - if iuzfopt > 0: - ntrail2 = pop_item(line, int) - nsets2 = pop_item(line, int) - nuzgag = pop_item(line, int) - surfdep = pop_item(line, float) - return nuztop, iuzfopt, irunflg, ietflag, ipakcb, iuzfcb2, ntrail2, nsets2, nuzgag, surfdep - - -def _parse8(line): - iuzrow = None - iuzcol = None - iuzopt = 0 - line = line_parse(line) - if((len(line) > 1 and not int(line[0]) < 0) or - (len(line) > 1 and line[1].isdigit())): - iuzrow = pop_item(line, int) - 1 - iuzcol = pop_item(line, int) - 1 - iftunit = pop_item(line, int) - iuzopt = pop_item(line, int) - else: - iftunit = pop_item(line, int) - return iuzrow, iuzcol, iftunit, iuzopt +""" +mfuzf1 module. Contains the ModflowUzf1 class. Note that the user can access +the ModflowUzf1 class as `flopy.modflow.ModflowUzf1`. + +Additional information for this MODFLOW package can be found at the `Online +MODFLOW Guide +`_. + +""" + +import sys +import numpy as np +from ..utils.flopy_io import pop_item, line_parse +from ..pakbase import Package +from ..utils import Util2d, Transient2d +from ..utils.optionblock import OptionBlock +from collections import OrderedDict +import warnings + + +class ModflowUzf1(Package): + """ + MODFLOW Unsaturated Zone Flow 1 Boundary Package Class. + + Parameters + ---------- + model : model object + The model object (of type :class:`flopy.modflow.mf.Modflow`) to which + this package will be added. + nuztop : integer + used to define which cell in a vertical column that recharge and + discharge is simulated. (default is 1) + + 1 Recharge to and discharge from only the top model layer. This + option assumes land surface is defined as top of layer 1. + 2 Recharge to and discharge from the specified layer in variable + IUZFBND. This option assumes land surface is defined as top of + layer specified in IUZFBND. + 3 Recharge to and discharge from the highest active cell in each + vertical column. Land surface is determined as top of layer + specified in IUZFBND. A constant head node intercepts any recharge + and prevents deeper percolation. + + iuzfopt : integer + equal to 1 or 2. A value of 1 indicates that the vertical hydraulic + conductivity will be specified within the UZF1 Package input file using + array VKS. A value of 2 indicates that the vertical hydraulic + conductivity will be specified within either the BCF or LPF Package + input file. (default is 0) + irunflg : integer + specifies whether ground water that discharges to land surface will + be routed to stream segments or lakes as specified in the IRUNBND + array (IRUNFLG not equal to zero) or if ground-water discharge is + removed from the model simulation and accounted for in the + ground-water budget as a loss of water (IRUNFLG=0). The + Streamflow-Routing (SFR2) and(or) the Lake (LAK3) Packages must be + active if IRUNFLG is not zero. (default is 0) + ietflg : integer + specifies whether or not evapotranspiration (ET) will be simulated. + ET will not be simulated if IETFLG is zero, otherwise it will be + simulated. (default is 0) + ipakcb : integer + flag for writing ground-water recharge, ET, and ground-water + discharge to land surface rates to a separate unformatted file using + subroutine UBUDSV. If ipakcb>0, it is the unit number to which the + cell-by-cell rates will be written when 'SAVE BUDGET' or a non-zero + value for ICBCFL is specified in Output Control. If ipakcb less than + or equal to 0, cell-by-cell rates will not be written to a file. + (default is 57) + iuzfcb2 : integer + flag for writing ground-water recharge, ET, and ground-water + discharge to land surface rates to a separate unformatted file using + module UBDSV3. If IUZFCB2>0, it is the unit number to which + cell-by-cell rates will be written when 'SAVE BUDGET' or a non-zero + value for ICBCFL is specified in Output Control. If IUZFCB2 less than + or equal to 0, cell-by-cell rates will not be written to file. + (default is 0) + ntrail2 : integer + equal to the number of trailing waves used to define the + water-content profile following a decrease in the infiltration rate. + The number of trailing waves varies depending on the problem, but a + range between 10 and 20 is usually adequate. More trailing waves may + decrease mass-balance error and will increase computational + requirements and memory usage. (default is 10) + nsets : integer + equal to the number of wave sets used to simulate multiple + infiltration periods. The number of wave sets should be set to 20 for + most problems involving time varying infiltration. The total number of + waves allowed within an unsaturated zone cell is equal to + NTRAIL2*NSETS2. An error will occur if the number of waves in a cell + exceeds this value. (default is 20) + surfdep : float + The average height of undulations, D (Figure 1 in UZF documentation), + in the land surface altitude. (default is 1.0) + iuzfbnd : integer + used to define the aerial extent of the active model in which recharge + and discharge will be simulated. (default is 1) + irunbnd : integer + used to define the stream segments within the Streamflow-Routing + (SFR2) Package or lake numbers in the Lake (LAK3) Package that + overland runoff from excess infiltration and ground-water + discharge to land surface will be added. A positive integer value + identifies the stream segment and a negative integer value identifies + the lake number. (default is 0) + vks : float + used to define the saturated vertical hydraulic conductivity of the + unsaturated zone (LT-1). (default is 1.0E-6) + eps : float + values for each model cell used to define the Brooks-Corey epsilon of + the unsaturated zone. Epsilon is used in the relation of water + content to hydraulic conductivity (Brooks and Corey, 1966). + (default is 3.5) + thts : float + used to define the saturated water content of the unsaturated zone in + units of volume of water to total volume (L3L-3). (default is 0.35) + thtr : float + used to define the residual water content for each vertical column of + cells in units of volume of water to total volume (L3L-3). THTR is + the irreducible water content and the unsaturated water content + cannot drain to water contents less than THTR. This variable is not + included unless the key word SPECIFYTHTR is specified. (default is + 0.15) + thti : float + used to define the initial water content for each vertical column of + cells in units of volume of water at start of simulation to total + volume (L3L-3). THTI should not be specified for steady-state + simulations. (default is 0.20) + row_col_iftunit_iuzopt : list + used to specify where information will be printed for each time step. + row and col are zero-based. IUZOPT specifies what that information + will be. (default is []) + IUZOPT is + + 1 Prints time, ground-water head, and thickness of unsaturated zone, + and cumulative volumes of infiltration, recharge, storage, change + in storage and ground-water discharge to land surface. + 2 Same as option 1 except rates of infiltration, recharge, change in + storage, and ground-water discharge also are printed. + 3 Prints time, ground-water head, thickness of unsaturated zone, + followed by a series of depths and water contents in the + unsaturated zone. + + nwt_11_fmt : boolean + flag indicating whether or not to utilize a newer (MODFLOW-NWT + version 1.1 or later) format style, i.e., uzf1 optional variables + appear line-by-line rather than in a specific order on a single + line. True means that optional variables (e.g., SPECIFYTHTR, + SPECIFYTHTI, NOSURFLEAK) appear on new lines. True also supports + a number of newer optional variables (e.g., SPECIFYSURFK, + REJECTSURFK, SEEPSURFK). False means that optional variables + appear on one line. (default is False) + specifythtr : boolean + key word for specifying optional input variable THTR (default is 0) + specifythti : boolean + key word for specifying optional input variable THTI. (default is 0) + nosurfleak : boolean + key word for inactivating calculation of surface leakage. + (default is 0) + specifysurfk : boolean + (MODFLOW-NWT version 1.1 and MODFLOW-2005 1.12 or later) + An optional character variable. When SPECIFYSURFK is specified, + the variable SURFK is specified in Data Set 4b. + rejectsurfk : boolean + (MODFLOW-NWT version 1.1 and MODFLOW-2005 1.12 or later) + An optional character variable. When REJECTSURFK is specified, + SURFK instead of VKS is used for calculating rejected infiltration. + REJECTSURFK only is included if SPECIFYSURFK is included. + seepsurfk : boolean + (MODFLOW-NWT version 1.1 and MODFLOW-2005 1.12 or later) + An optional character variable. When SEEPSURFK is specified, + SURFK instead of VKS is used for calculating surface leakage. + SEEPSURFK only is included if SPECIFYSURFK is included. + etsquare : float (smoothfact) + (MODFLOW-NWT version 1.1 and MODFLOW-2005 1.12 or later) + An optional character variable. When ETSQUARE is specified, + groundwater ET is simulated using a constant potential ET rate, + and is smoothed over a specified smoothing interval. + This option is recommended only when using the NWT solver. + + etsquare is activated in flopy by specifying a real value + for smoothfact (default is None). + For example, if the interval factor (smoothfact) + is specified as smoothfact=0.1 (recommended), + then the smoothing interval will be calculated as: + SMOOTHINT = 0.1*EXTDP and is applied over the range for groundwater + head (h): + * h < CELTOP-EXTDP, ET is zero; + * CELTOP-EXTDP < h < CELTOP-EXTDP+SMOOTHINT, ET is smoothed; + CELTOP-EXTDP+SMOOTHINT < h, ET is equal to potential ET. + uzgage : dict of lists or list of lists + Dataset 8 in UZF Package documentation. Each entry in the dict + is keyed by iftunit. + Dict of lists: If iftunit is negative, the list is empty. + If iftunit is positive, the list includes [IUZROW, IUZCOL, IUZOPT] + List of lists: + Lists follow the format described in the documentation: + [[IUZROW, IUZCOL, IFTUNIT, IUZOPT]] or [[-IFTUNIT]] + netflux : list of [Unitrech (int), Unitdis (int)] + (MODFLOW-NWT version 1.1 and MODFLOW-2005 1.12 or later) + An optional character variable. When NETFLUX is specified, + the sum of recharge (L3/T) and the sum of discharge (L3/T) is written + to separate unformatted files using module UBDSV3. + + netflux is activated in flopy by specifying a list for + Unitrech and Unitdis (default is None). + Unitrech and Unitdis are the unit numbers to which these values + are written when “SAVE BUDGET” is specified in Output Control. + Values written to Unitrech are the sum of recharge values + for the UZF, SFR2, and LAK packages, and values written to Unitdis + are the sum of discharge values for the UZF, SFR2, and LAK packages. + Values are averaged over the period between output times. + + [NETFLUX unitrech unitdis] + finf : float, 2-D array, or dict of {kper:value} + where kper is the zero-based stress period + to assign a value to. Value should be cast-able to Util2d instance + can be a scalar, list, or ndarray is the array value is constant in + time. + Used to define the infiltration rates (LT-1) at land surface for each + vertical column of cells. If FINF is specified as being greater than + the vertical hydraulic conductivity then FINF is set equal to the + vertical unsaturated hydraulic conductivity. Excess water is routed + to streams or lakes when IRUNFLG is not zero, and if SFR2 or LAK3 is + active. (default is 1.0E-8) + pet : float, 2-D array, or dict of {kper:value} + where kper is the zero-based stress period + to assign a value to. Value should be cast-able to Util2d instance + can be a scalar, list, or ndarray is the array value is constant in + time. + Used to define the ET demand rates (L1T-1) within the ET extinction + depth interval for each vertical column of cells. (default is 5.0E-8) + extdp : float, 2-D array, or dict of {kper:value} + where kper is the zero-based stress period + to assign a value to. Value should be cast-able to Util2d instance + can be a scalar, list, or ndarray is the array value is constant in + time. + Used to define the ET extinction depths. The quantity of ET removed + from a cell is limited by the volume of water stored in the + unsaturated zone above the extinction depth. If ground water is + within the ET extinction depth, then the rate removed is based + on a linear decrease in the maximum rate at land surface and zero at + the ET extinction depth. The linear decrease is the same method used + in the Evapotranspiration Package (McDonald and Harbaugh, 1988, chap. + 10). (default is 15.0) + extwc : float, 2-D array, or dict of {kper:value} + where kper is the zero-based stress period + to assign a value to. Value should be cast-able to Util2d instance + can be a scalar, list, or ndarray is the array value is constant in + time. + Used to define the extinction water content below which ET cannot be + removed from the unsaturated zone. EXTWC must have a value between + (THTS-Sy) and THTS, where Sy is the specific yield specified in + either the LPF or BCF Package. (default is 0.1) + uzfbud_ext : list + appears to be used for sequential naming of budget output files + (default is []) + extension : string + Filename extension (default is 'uzf') + unitnumber : int + File unit number (default is None). + filenames : str or list of str + Filenames to use for the package and the output files. If + filenames=None the package name will be created using the model name + and package extension and the cbc output, uzf output, and uzf + observation names will be created using the model name and .cbc, + uzfcb2.bin, and .uzf#.out extensions (for example, modflowtest.cbc, + and modflowtest.uzfcd2.bin), if ipakcbc, iuzfcb2, and len(uzgag) are + numbers greater than zero. For uzf observations the file extension is + created using the uzf observation file unit number (for example, for + uzf observations written to unit 123 the file extension would be + .uzf123.out). If a single string is passed the package name will be + set to the string and other uzf output files will be set to the model + name with the appropriate output file extensions. To define the names + for all package files (input and output) the length of the list of + strings should be 3 + len(uzgag). Default is None. + surfk : float + An optional array of positive real values used to define the hydraulic + conductivity (LT-1). SURFK is used for calculating the rejected + infiltration and/or surface leakage. IF SURFK is set greater than + VKS then it is set equal to VKS. Only used if SEEPSURFK is True. + + Attributes + ---------- + nuzgag : integer (deprecated - counter is set based on length of uzgage) + equal to the number of cells (one per vertical column) that will be + specified for printing detailed information on the unsaturated zone + water budget and water content. A gage also may be used to print + the budget summed over all model cells. (default is None) + + Methods + ------- + + See Also + -------- + + Notes + ----- + Parameters are not supported in FloPy. + + Examples + -------- + + >>> import flopy + >>> ml = flopy.modflow.Modflow() + >>> uzf = flopy.modflow.ModflowUzf1(ml, ...) + + """ + _options = OrderedDict([('specifythtr', + OptionBlock.simple_flag), + ('specifythti', + OptionBlock.simple_flag), + ('nosurfleak', + OptionBlock.simple_flag), + ('specifysurfk', + OptionBlock.simple_flag), + ('rejectsurfk', + OptionBlock.simple_flag), + ("seepsurfk", + OptionBlock.simple_flag), + ("etsquare", + {OptionBlock.dtype: np.bool_, + OptionBlock.nested: True, + OptionBlock.n_nested: 1, + OptionBlock.vars: + {"smoothfact": + OptionBlock.simple_float}}), + ("netflux", + {OptionBlock.dtype: np.bool_, + OptionBlock.nested: True, + OptionBlock.n_nested: 2, + OptionBlock.vars: + OrderedDict([("unitrech", + OptionBlock.simple_int), + ("unitdis", + OptionBlock.simple_int)])}), + ("savefinf", OptionBlock.simple_flag)]) + + def __init__(self, model, + nuztop=1, iuzfopt=0, irunflg=0, ietflg=0, ipakcb=None, + iuzfcb2=None, ntrail2=10, nsets=20, + surfdep=1.0, + iuzfbnd=1, irunbnd=0, vks=1.0E-6, eps=3.5, thts=0.35, + thtr=0.15, thti=0.20, + specifythtr=False, specifythti=False, nosurfleak=False, + finf=1.0E-8, pet=5.0E-8, extdp=15.0, extwc=0.1, + nwt_11_fmt=False, + specifysurfk=False, rejectsurfk=False, seepsurfk=False, + etsquare=None, netflux=None, nuzgag=None, + uzgag=None, extension='uzf', unitnumber=None, + filenames=None, options=None, surfk=0.1): + + # set default unit number of one is not specified + if unitnumber is None: + unitnumber = ModflowUzf1.defaultunit() + + # set filenames + nlen = 3 + if uzgag is not None: + nlen += len(uzgag) + if filenames is None: + filenames = [None for x in range(nlen)] + elif isinstance(filenames, str): + filenames = [filenames] + [None for x in range(nlen)] + elif isinstance(filenames, list): + if len(filenames) < nlen: + for idx in range(len(filenames), nlen + 1): + filenames.append(None) + + # update external file information with cbc output, if necessary + if ipakcb is not None: + fname = filenames[1] + model.add_output_file(abs(ipakcb), fname=fname, + package=ModflowUzf1.ftype()) + else: + ipakcb = 0 + + if iuzfcb2 is not None: + fname = filenames[2] + model.add_output_file(abs(iuzfcb2), fname=fname, + extension='uzfcb2.bin', + package=ModflowUzf1.ftype()) + else: + iuzfcb2 = 0 + + ipos = 3 + if uzgag is not None: + # convert to dict + if isinstance(uzgag, list): + d = {} + for l in uzgag: + if len(l) > 1: + d[l[2]] = [l[0], l[1], l[3]] + else: + d[-np.abs(l[0])] = [] + uzgag = d + for key, value in uzgag.items(): + fname = filenames[ipos] + iu = abs(key) + uzgagext = 'uzf{}.out'.format(iu) + model.add_output_file(iu, fname=fname, + binflag=False, + extension=uzgagext, + package=ModflowUzf1.ftype()) + ipos += 1 + # handle case where iftunit is listed in the values + # (otherwise, iftunit will be written instead of iuzopt) + if len(value) == 4: + uzgag[key] = value[:2] + value[-1:] + elif len(value) == 1: + uzgag[-np.abs(key)] = [] + + # Fill namefile items + name = [ModflowUzf1.ftype()] + units = [unitnumber] + extra = [''] + + # set package name + fname = [filenames[0]] + + # Call ancestor's init to set self.parent, extension, name and + # unit number + Package.__init__(self, model, extension=extension, name=name, + unit_number=units, extra=extra, filenames=fname) + + if self.parent.get_package('RCH') != None or \ + self.parent.get_package('EVT') != None: + msg = 'WARNING!\n The RCH and EVT packages should not be ' + \ + 'active when the UZF1 package is active!' + print(msg) + if self.parent.version == 'mf2000': + msg = 'WARNING!\nThe UZF1 package is only compatible ' + \ + 'with MODFLOW-2005 and MODFLOW-NWT!' + print(msg) + nrow, ncol, nlay, nper = self.parent.nrow_ncol_nlay_nper + + self.heading = '# {} package for '.format(self.name[0]) + \ + ' {}, '.format(model.version_types[model.version]) + \ + 'generated by Flopy.' + self.url = 'uzf_unsaturated_zone_flow_pack.htm' + + # Data Set 1a + if nwt_11_fmt: + warnings.warn("nwt_11_fmt has been deprecated," + " and will be removed in the next release" + " please provide a flopy.utils.OptionBlock object" + " to the options argument", DeprecationWarning) + self.nwt_11_fmt = nwt_11_fmt + self.specifythtr = bool(specifythtr) + self.specifythti = bool(specifythti) + self.nosurfleak = bool(nosurfleak) + self.specifysurfk = bool(specifysurfk) + self.rejectsurfk = bool(rejectsurfk) + self.seepsurfk = bool(seepsurfk) + self.etsquare = False + self.smoothfact = None + if etsquare is not None: + try: + float(etsquare) + except: + msg = 'etsquare must be specified by entering a real ' + \ + 'number for smoothfact.' + print(msg) + self.etsquare = True + self.smoothfact = etsquare + self.netflux = False + self.unitrech = None + self.unitdis = None + if netflux is not None: + e = 'netflux must be a length=2 sequence of unitrech, unitdis' + assert len(netflux) == 2, e + self.netflux = True + self.unitrech, self.unitdis = netflux + + if options is None: + if ( + specifythti, specifythtr, nosurfleak, specifysurfk, rejectsurfk, + seepsurfk, self.etsquare, self.netflux) != (False, False, False, + False, False, False, + False, False): + options = OptionBlock("", ModflowUzf1, block=False) + + self.options = options + + # Data Set 1b + # NUZTOP IUZFOPT IRUNFLG IETFLG ipakcb IUZFCB2 [NTRAIL2 NSETS2] NUZGAG SURFDEP + self.nuztop = nuztop + self.iuzfopt = iuzfopt + # The Streamflow-Routing (SFR2) and(or) the Lake (LAK3) Packages + # must be active if IRUNFLG is not zero. + self.irunflg = irunflg + self.ietflg = ietflg + self.ipakcb = ipakcb + self.iuzfcb2 = iuzfcb2 + if iuzfopt > 0: + self.ntrail2 = ntrail2 + self.nsets = nsets + self.surfdep = surfdep + + # Data Set 2 + # IUZFBND (NCOL, NROW) -- U2DINT + self.iuzfbnd = Util2d(model, (nrow, ncol), np.int32, iuzfbnd, + name='iuzfbnd') + + # If IRUNFLG > 0: Read item 3 + # Data Set 3 + # [IRUNBND (NCOL, NROW)] -- U2DINT + if irunflg > 0: + self.irunbnd = Util2d(model, (nrow, ncol), np.int32, irunbnd, + name='irunbnd') + + # IF the absolute value of IUZFOPT = 1: Read item 4. + # Data Set 4 + # [VKS (NCOL, NROW)] -- U2DREL + if abs(iuzfopt) in [0, 1]: + self.vks = Util2d(model, (nrow, ncol), np.float32, vks, name='vks') + + if seepsurfk or specifysurfk: + self.surfk = Util2d(model, (nrow, ncol), np.float32, surfk, + name='surfk') + + if iuzfopt > 0: + # Data Set 5 + # EPS (NCOL, NROW) -- U2DREL + self.eps = Util2d(model, (nrow, ncol), np.float32, eps, name='eps') + # Data Set 6a + # THTS (NCOL, NROW) -- U2DREL + self.thts = Util2d(model, (nrow, ncol), np.float32, thts, + name='thts') + # Data Set 6b + # THTS (NCOL, NROW) -- U2DREL + if self.specifythtr > 0: + self.thtr = Util2d(model, (nrow, ncol), np.float32, thtr, + name='thtr') + # Data Set 7 + # [THTI (NCOL, NROW)] -- U2DREL + self.thti = Util2d(model, (nrow, ncol), np.float32, thti, + name='thti') + + # Data Set 8 + # {IFTUNIT: [IUZROW, IUZCOL, IUZOPT]} + self._uzgag = uzgag + + # Dataset 9, 11, 13 and 15 will be written automatically in the + # write_file function + # Data Set 10 + # [FINF (NCOL, NROW)] – U2DREL + + self.finf = Transient2d(model, (nrow, ncol), np.float32, + finf, name='finf') + if ietflg > 0: + self.pet = Transient2d(model, (nrow, ncol), np.float32, + pet, name='pet') + self.extdp = Transient2d(model, (nrow, ncol), np.float32, + extdp, name='extdp') + self.extwc = Transient2d(model, (nrow, ncol), np.float32, + extwc, name='extwc') + self.parent.add_package(self) + + def __setattr__(self, key, value): + if key == "uzgag": + msg = 'Uzgag must be set by the constructor' + \ + 'modifying this attribute requires creating a ' + \ + 'new ModflowUzf1 instance' + print(msg) + else: + super(ModflowUzf1, self).__setattr__(key, value) + + @property + def nuzgag(self): + if self.uzgag is None: + return 0 + else: + return len(self.uzgag) + + @property + def uzgag(self): + return self._uzgag + + def _2list(self, arg): + # input as a 3D array + if isinstance(arg, np.ndarray) and len(arg.shape) == 3: + lst = [arg[per, :, :] for per in range(arg.shape[0])] + # input is not a 3D array, and not a list + # (could be numeric value or 2D array) + elif not isinstance(arg, list): + lst = [arg] + # input was already a list + else: + lst = arg + return lst + + def ncells(self): + # Returns the maximum number of cells that have recharge + # (developed for MT3DMS SSM package) + nrow, ncol, nlay, nper = self.parent.nrow_ncol_nlay_nper + return (nrow * ncol) + + def _write_1a(self, f_uzf): + + # the nwt_11_fmt code is slated for removal (deprecated!) + if not self.nwt_11_fmt: + specify_temp = '' + if self.specifythtr > 0: + specify_temp += 'SPECIFYTHTR ' + if self.specifythti > 0: + specify_temp += 'SPECIFYTHTI ' + if self.nosurfleak > 0: + specify_temp += 'NOSURFLEAK' + if (self.specifythtr + self.specifythti + self.nosurfleak) > 0: + f_uzf.write('{}\n'.format(specify_temp)) + del specify_temp + else: + txt = 'options\n' + for var in ['specifythtr', 'specifythti', 'nosurfleak', + 'specifysurfk', 'rejectsurfk', 'seepsurfk']: + value = self.__dict__[var] + if int(value) > 0: + txt += '{}\n'.format(var) + if self.etsquare: + txt += 'etsquare {}\n'.format(self.smoothfact) + if self.netflux: + txt += 'netflux {} {}\n'.format(self.unitrech, self.unitdis) + txt += 'end\n' + f_uzf.write(txt) + + def write_file(self, f=None): + """ + Write the package file. + + Returns + ------- + None + + """ + nrow, ncol, nlay, nper = self.parent.nrow_ncol_nlay_nper + # Open file for writing + if f is not None: + if isinstance(f, str): + f_uzf = open(f, "w") + else: + f_uzf = f + else: + f_uzf = open(self.fn_path, 'w') + f_uzf.write('{}\n'.format(self.heading)) + + # Dataset 1a + if isinstance(self.options, + OptionBlock) and self.parent.version == "mfnwt": + self.options.update_from_package(self) + self.options.write_options(f_uzf) + + else: + self._write_1a(f_uzf) + + # Dataset 1b + if self.iuzfopt > 0: + comment = ' #NUZTOP IUZFOPT IRUNFLG IETFLG ipakcb IUZFCB2 NTRAIL NSETS NUZGAGES' + f_uzf.write( + '{0:10d}{1:10d}{2:10d}{3:10d}{4:10d}{5:10d}{6:10d}{7:10d}{8:10d}{9:15.6E}{10:100s}\n'. \ + format(self.nuztop, self.iuzfopt, self.irunflg, + self.ietflg, + self.ipakcb, self.iuzfcb2, \ + self.ntrail2, self.nsets, self.nuzgag, self.surfdep, + comment)) + else: + comment = ' #NUZTOP IUZFOPT IRUNFLG IETFLG ipakcb IUZFCB2 NUZGAGES' + f_uzf.write( + '{0:10d}{1:10d}{2:10d}{3:10d}{4:10d}{5:10d}{6:10d}{7:15.6E}{8:100s}\n'. \ + format(self.nuztop, self.iuzfopt, self.irunflg, + self.ietflg, + self.ipakcb, self.iuzfcb2, \ + self.nuzgag, self.surfdep, comment)) + f_uzf.write(self.iuzfbnd.get_file_entry()) + if self.irunflg > 0: + f_uzf.write(self.irunbnd.get_file_entry()) + # IF the absolute value of IUZFOPT = 1: Read item 4. + # Data Set 4 + # [VKS (NCOL, NROW)] -- U2DREL + if abs(self.iuzfopt) in [0, 1]: + f_uzf.write(self.vks.get_file_entry()) + + # Dataset 4b modflow 2005 v. 1.12 and modflow-nwt v. 1.1 + if self.seepsurfk or self.specifysurfk: + f_uzf.write(self.surfk.get_file_entry()) + + if self.iuzfopt > 0: + # Data Set 5 + # EPS (NCOL, NROW) -- U2DREL + f_uzf.write(self.eps.get_file_entry()) + # Data Set 6a + # THTS (NCOL, NROW) -- U2DREL + f_uzf.write(self.thts.get_file_entry()) + # Data Set 6b + # THTR (NCOL, NROW) -- U2DREL + if self.specifythtr > 0.0: + f_uzf.write(self.thtr.get_file_entry()) + # Data Set 7 + # [THTI (NCOL, NROW)] -- U2DREL + if not self.parent.get_package('DIS').steady[ + 0] or self.specifythti > 0.0: + f_uzf.write(self.thti.get_file_entry()) + # If NUZGAG>0: Item 8 is repeated NUZGAG times + # Data Set 8 + # [IUZROW] [IUZCOL] IFTUNIT [IUZOPT] + if self.nuzgag > 0: + for iftunit, values in self.uzgag.items(): + if iftunit > 0: + values[0] += 1 + values[1] += 1 + comment = ' #IUZROW IUZCOL IFTUNIT IUZOPT' + values.insert(2, iftunit) + for v in values: + f_uzf.write('{:10d}'.format(v)) + f_uzf.write('{}\n'.format(comment)) + else: + comment = ' #IFTUNIT' + f_uzf.write('{:10d}'.format(iftunit)) + f_uzf.write('{}\n'.format(comment)) + + def write_transient(name): + invar, var = self.__dict__[name].get_kper_entry(n) + + comment = ' #{} for stress period '.format(name) + str(n + 1) + f_uzf.write('{0:10d}{1:20s}\n'.format(invar, comment)) + if (invar >= 0): + f_uzf.write(var) + + for n in range(nper): + write_transient('finf') + if self.ietflg > 0: + write_transient('pet') + write_transient('extdp') + if self.iuzfopt > 0: + write_transient('extwc') + f_uzf.close() + + @staticmethod + def load(f, model, ext_unit_dict=None, check=False): + """ + Load an existing package. + + Parameters + ---------- + f : filename or file handle + File to load. + model : model object + The model object (of type :class:`flopy.modflow.mf.Modflow`) to + which this package will be added. + ext_unit_dict : dictionary, optional + If the arrays in the file are specified using EXTERNAL, + or older style array control records, then `f` should be a file + handle. In this case ext_unit_dict is required, which can be + constructed using the function + :class:`flopy.utils.mfreadnam.parsenamefile`. + + Returns + ------- + uzf : ModflowUZF1 object + ModflowUZF1 object. + + Examples + -------- + + >>> import flopy + >>> m = flopy.modflow.Modflow() + >>> uzf = flopy.modflow.ModflowUZF1.load('test.uzf', m) + + """ + if model.verbose: + sys.stdout.write('loading uzf package file...\n') + + openfile = not hasattr(f, 'read') + if openfile: + filename = f + f = open(filename, 'r') + + # dataset 0 -- header + while True: + line = f.readline() + if line[0] != '#': + break + # determine problem dimensions + nrow, ncol, nlay, nper = model.get_nrow_ncol_nlay_nper() + + # dataset 1a + specifythtr = False + specifythti = False + nosurfleak = False + specifysurfk = False + etsquare = None + netflux = None + rejectsurfk = False + seepsurfk = False + options = None + if model.version == 'mfnwt' and 'options' in line.lower(): + options = OptionBlock.load_options(f, ModflowUzf1) + line = f.readline() + + else: + query = ("specifythtr", "specifythti", "nosurfleak", + "specifysurfk", "rejectsurfk", "seepsurfk", + "etsquare", "netflux", "savefinf") + for i in query: + if i in line.lower(): + options = OptionBlock(line.lower().strip(), + ModflowUzf1, block=False) + line = f.readline() + break + + if options is not None: + specifythtr = options.specifythtr + specifythti = options.specifythti + nosurfleak = options.nosurfleak + rejectsurfk = options.rejectsurfk + seepsurfk = options.seepsurfk + specifysurfk = options.specifysurfk + + if options.etsquare: + etsquare = options.smoothfact + if options.netflux: + netflux = [options.unitrech, options.unitdis] + + # dataset 1b + nuztop, iuzfopt, irunflg, ietflg, ipakcb, iuzfcb2, \ + ntrail2, nsets2, nuzgag, surfdep = _parse1(line) + + arrays = {'finf': {}, + # datasets 10, 12, 14, 16 are lists of util2d arrays + 'pet': {}, + 'extdp': {}, + 'extwc': {}} + + def load_util2d(name, dtype, per=None): + print(' loading {} array...'.format(name)) + if per is not None: + arrays[name][per] = \ + Util2d.load(f, model, (nrow, ncol), dtype, name, + ext_unit_dict) + else: + arrays[name] = Util2d.load(f, model, (nrow, ncol), dtype, name, + ext_unit_dict) + + # dataset 2 + load_util2d('iuzfbnd', np.int32) + + # dataset 3 + if irunflg > 0: + load_util2d('irunbnd', np.int32) + + # dataset 4 + if iuzfopt in [0, 1]: + load_util2d('vks', np.float32) + + # dataset 4b + if seepsurfk or specifysurfk: + load_util2d('surfk', np.float32) + + if iuzfopt > 0: + # dataset 5 + load_util2d('eps', np.float32) + + # dataset 6 + load_util2d('thts', np.float32) + + if specifythtr: + # dataset 6b (residual water content) + load_util2d('thtr', np.float32) + + if specifythti or np.all(~model.dis.steady.array): + # dataset 7 (initial water content; + # only read if not steady-state) + load_util2d('thti', np.float32) + + # dataset 8 + uzgag = {} + if nuzgag > 0: + for i in range(nuzgag): + iuzrow, iuzcol, iftunit, iuzopt = _parse8(f.readline()) + tmp = [iuzrow, iuzcol] if iftunit > 0 else [] + tmp.append(iftunit) + if iuzopt > 0: + tmp.append(iuzopt) + uzgag[iftunit] = tmp + + # dataset 9 + for per in range(nper): + print('stress period {}:'.format(per + 1)) + line = line_parse(f.readline()) + nuzf1 = pop_item(line, int) + + # dataset 10 + if nuzf1 >= 0: + load_util2d('finf', np.float32, per=per) + + if ietflg > 0: + # dataset 11 + line = line_parse(f.readline()) + nuzf2 = pop_item(line, int) + if nuzf2 >= 0: + # dataset 12 + load_util2d('pet', np.float32, per=per) + # dataset 13 + line = line_parse(f.readline()) + nuzf3 = pop_item(line, int) + if nuzf3 >= 0: + # dataset 14 + load_util2d('extdp', np.float32, per=per) + # dataset 15 + line = line_parse(f.readline()) + nuzf4 = pop_item(line, int) + if nuzf4 >= 0: + # dataset 16 + load_util2d('extwc', np.float32, per=per) + + # close the file + f.close() + + # determine specified unit number + unitnumber = None + filenames = [None for x in range(3 + nuzgag)] + if ext_unit_dict is not None: + unitnumber, filenames[0] = \ + model.get_ext_dict_attr(ext_unit_dict, + filetype=ModflowUzf1.ftype()) + if abs(ipakcb) > 0: + iu, filenames[1] = \ + model.get_ext_dict_attr(ext_unit_dict, unit=abs(ipakcb)) + model.add_pop_key_list(ipakcb) + if abs(iuzfcb2) > 0: + iu, filenames[2] = \ + model.get_ext_dict_attr(ext_unit_dict, unit=abs(iuzfcb2)) + model.add_pop_key_list(abs(iuzfcb2)) + + ipos = 3 + if nuzgag > 0: + for key, value in uzgag.items(): + iu, filenames[ipos] = \ + model.get_ext_dict_attr(ext_unit_dict, + unit=abs(key)) + model.add_pop_key_list(abs(iu)) + ipos += 1 + + # create uzf object + return ModflowUzf1(model, + nuztop=nuztop, iuzfopt=iuzfopt, irunflg=irunflg, + ietflg=ietflg, + ipakcb=ipakcb, iuzfcb2=iuzfcb2, + ntrail2=ntrail2, nsets=nsets2, + surfdep=surfdep, uzgag=uzgag, + specifythtr=specifythtr, specifythti=specifythti, + nosurfleak=nosurfleak, etsquare=etsquare, + netflux=netflux, seepsurfk=seepsurfk, + specifysurfk=specifysurfk, + rejectsurfk=rejectsurfk, + unitnumber=unitnumber, + filenames=filenames, options=options, **arrays) + + @staticmethod + def ftype(): + return 'UZF' + + @staticmethod + def defaultunit(): + return 19 + + +def _parse1a(line): + line = line_parse(line) + line = [s.lower() if isinstance(s, str) else s for s in line] + specifythtr = True if 'specifythtr' in line else False + specifythti = True if 'specifythti' in line else False + nosurfleak = True if 'nosurfleak' in line else False + return specifythtr, specifythti, nosurfleak + + +def _parse1(line): + ntrail2 = None + nsets2 = None + line = line_parse(line) + nuztop = pop_item(line, int) + iuzfopt = pop_item(line, int) + irunflg = pop_item(line, int) + ietflag = pop_item(line, int) + ipakcb = pop_item(line, int) + iuzfcb2 = pop_item(line, int) + if iuzfopt > 0: + ntrail2 = pop_item(line, int) + nsets2 = pop_item(line, int) + nuzgag = pop_item(line, int) + surfdep = pop_item(line, float) + return nuztop, iuzfopt, irunflg, ietflag, ipakcb, iuzfcb2, ntrail2, nsets2, nuzgag, surfdep + + +def _parse8(line): + iuzrow = None + iuzcol = None + iuzopt = 0 + line = line_parse(line) + if((len(line) > 1 and not int(line[0]) < 0) or + (len(line) > 1 and line[1].isdigit())): + iuzrow = pop_item(line, int) - 1 + iuzcol = pop_item(line, int) - 1 + iftunit = pop_item(line, int) + iuzopt = pop_item(line, int) + else: + iftunit = pop_item(line, int) + return iuzrow, iuzcol, iftunit, iuzopt diff --git a/flopy/modflow/mfwel.py b/flopy/modflow/mfwel.py index a1b44fc23a..84d94abe03 100644 --- a/flopy/modflow/mfwel.py +++ b/flopy/modflow/mfwel.py @@ -1,386 +1,386 @@ -""" -mfwel module. Contains the ModflowWel class. Note that the user can access -the ModflowWel class as `flopy.modflow.ModflowWel`. - -Additional information for this MODFLOW package can be found at the `Online -MODFLOW Guide -`_. - -""" - -import sys -import numpy as np -from ..utils import MfList -from ..pakbase import Package -from ..utils.recarray_utils import create_empty_recarray -from ..utils.optionblock import OptionBlock -from collections import OrderedDict -import warnings - - -class ModflowWel(Package): - """ - MODFLOW Well Package Class. - - Parameters - ---------- - model : model object - The model object (of type :class:`flopy.modflow.mf.Modflow`) to which - this package will be added. - ipakcb : int - A flag that is used to determine if cell-by-cell budget data should be - saved. If ipakcb is non-zero cell-by-cell budget data will be saved. - (default is 0). - stress_period_data : list of boundaries, or recarray of boundaries, or - dictionary of boundaries - Each well is defined through definition of - layer (int), row (int), column (int), flux (float). - The simplest form is a dictionary with a lists of boundaries for each - stress period, where each list of boundaries itself is a list of - boundaries. Indices of the dictionary are the numbers of the stress - period. This gives the form of: - - stress_period_data = - {0: [ - [lay, row, col, flux], - [lay, row, col, flux], - [lay, row, col, flux] - ], - 1: [ - [lay, row, col, flux], - [lay, row, col, flux], - [lay, row, col, flux] - ], ... - kper: - [ - [lay, row, col, flux], - [lay, row, col, flux], - [lay, row, col, flux] - ] - } - - Note that if the number of lists is smaller than the number of stress - periods, then the last list of wells will apply until the end of the - simulation. Full details of all options to specify stress_period_data - can be found in the flopy3 boundaries Notebook in the basic - subdirectory of the examples directory - dtype : custom datatype of stress_period_data. - If None the default well datatype will be applied (default is None). - extension : string - Filename extension (default is 'wel') - options : list of strings - Package options (default is None). - unitnumber : int - File unit number (default is None). - filenames : str or list of str - Filenames to use for the package and the output files. If - filenames=None the package name will be created using the model name - and package extension and the cbc output name will be created using - the model name and .cbc extension (for example, modflowtest.cbc), - if ipakcbc is a number greater than zero. If a single string is passed - the package will be set to the string and cbc output names will be - created using the model name and .cbc extension, if ipakcbc is a - number greater than zero. To define the names for all package files - (input and output) the length of the list of strings should be 2. - Default is None. - - Attributes - ---------- - mxactw : int - Maximum number of wells for a stress period. This is calculated - automatically by FloPy based on the information in - stress_period_data. - - Methods - ------- - - See Also - -------- - - Notes - ----- - Parameters are not supported in FloPy. - - Examples - -------- - - >>> import flopy - >>> m = flopy.modflow.Modflow() - >>> lrcq = {0:[[2, 3, 4, -100.]], 1:[[2, 3, 4, -100.]]} - >>> wel = flopy.modflow.ModflowWel(m, stress_period_data=lrcq) - - """ - _options = OrderedDict([('specify', {OptionBlock.dtype: np.bool_, - OptionBlock.nested: True, - OptionBlock.n_nested: 2, - OptionBlock.vars: OrderedDict( - [('phiramp', - OptionBlock.simple_float), - ('iunitramp', - OrderedDict( - [(OptionBlock.dtype, int), - (OptionBlock.nested, False), - (OptionBlock.optional, True) - ]))])}), - ('tabfiles', OptionBlock.simple_tabfile)]) - - def __init__(self, model, ipakcb=None, stress_period_data=None, dtype=None, - extension='wel', options=None, binary=False, - unitnumber=None, filenames=None): - """ - Package constructor. - - """ - # set default unit number of one is not specified - if unitnumber is None: - unitnumber = ModflowWel.defaultunit() - - # set filenames - if filenames is None: - filenames = [None, None] - elif isinstance(filenames, str): - filenames = [filenames, None] - elif isinstance(filenames, list): - if len(filenames) < 2: - filenames.append(None) - - # update external file information with cbc output, if necessary - if ipakcb is not None: - fname = filenames[1] - model.add_output_file(ipakcb, fname=fname, - package=ModflowWel.ftype()) - else: - ipakcb = 0 - - # Fill namefile items - name = [ModflowWel.ftype()] - units = [unitnumber] - extra = [''] - - # set package name - fname = [filenames[0]] - - # Call ancestor's init to set self.parent, extension, name and - # unit number - Package.__init__(self, model, extension=extension, name=name, - unit_number=units, extra=extra, filenames=fname) - - self.heading = '# {} package for '.format(self.name[0]) + \ - ' {}, '.format(model.version_types[model.version]) + \ - 'generated by Flopy.' - self.url = 'wel.htm' - - self.ipakcb = ipakcb - self.np = 0 - - if options is None: - options = [] - self.specify = False - self.phiramp = None - self.iunitramp = None - self.options = options - if isinstance(options, OptionBlock): - if not self.options.specify: - self.specify = self.options.specify - else: - self.specify = True - - self.phiramp = self.options.phiramp - self.iunitramp = self.options.iunitramp - # this is to grab the aux variables... - options = [] - - else: - for idx, opt in enumerate(options): - if 'specify' in opt: - t = opt.strip().split() - self.specify = True - self.phiramp = np.float(t[1]) - self.iunitramp = np.int(t[2]) - self.options.pop(idx) - break - - if dtype is not None: - self.dtype = dtype - else: - self.dtype = self.get_default_dtype( - structured=self.parent.structured) - - # determine if any aux variables in dtype - dt = self.get_default_dtype(structured=self.parent.structured) - if len(self.dtype.names) > len(dt.names): - for name in self.dtype.names[len(dt.names):]: - ladd = True - for option in options: - if name.lower() in option.lower(): - ladd = False - break - if ladd: - options.append('aux {} '.format(name)) - - if isinstance(self.options, OptionBlock): - if not self.options.auxillary: - self.options.auxillary = options - else: - self.options = options - - # initialize MfList - self.stress_period_data = MfList(self, stress_period_data, - binary=binary) - - self.parent.add_package(self) - - @property - def phiramp_unit(self): - err = "phiramp_unit will be replaced " \ - "with iunitramp for consistency" - warnings.warn(err, DeprecationWarning) - return self.iunitramp - - @phiramp_unit.setter - def phiramp_unit(self, phiramp_unit): - self.iunitramp = phiramp_unit - - def ncells(self): - # Returns the maximum number of cells that have a well - # (developed for MT3DMS SSM package) - return self.stress_period_data.mxact - - def write_file(self, f=None): - """ - Write the package file. - - Parameters: - f: (str) optional file name - - Returns - ------- - None - - """ - if f is not None: - if isinstance(f, str): - f_wel = open(f, "w") - else: - f_wel = f - else: - f_wel = open(self.fn_path, 'w') - - f_wel.write('%s\n' % self.heading) - - if isinstance(self.options, OptionBlock) and \ - self.parent.version == "mfnwt": - - self.options.update_from_package(self) - if self.options.block: - self.options.write_options(f_wel) - - line = ( - ' {0:9d} {1:9d} '.format(self.stress_period_data.mxact, - self.ipakcb)) - - if isinstance(self.options, OptionBlock): - if self.options.noprint: - line += "NOPRINT " - if self.options.auxillary: - line += " ".join([str(aux).upper() for aux in - self.options.auxillary]) - - else: - for opt in self.options: - line += ' ' + str(opt) - - line += '\n' - f_wel.write(line) - - if isinstance(self.options, OptionBlock) and \ - self.parent.version == 'mfnwt': - if not self.options.block: - if isinstance(self.options.specify, np.ndarray): - self.options.tabfiles = False - self.options.write_options(f_wel) - - else: - if self.specify and self.parent.version == 'mfnwt': - f_wel.write('SPECIFY {0:10.5g} {1:10d}\n'.format(self.phiramp, - self.iunitramp)) - - self.stress_period_data.write_transient(f_wel) - f_wel.close() - - def add_record(self, kper, index, values): - try: - self.stress_period_data.add_record(kper, index, values) - except Exception as e: - raise Exception("mfwel error adding record to list: " + str(e)) - - @staticmethod - def get_default_dtype(structured=True): - if structured: - dtype = np.dtype([("k", np.int), ("i", np.int), - ("j", np.int), ("flux", np.float32)]) - else: - dtype = np.dtype([("node", np.int), ("flux", np.float32)]) - return dtype - - @staticmethod - def get_empty(ncells=0, aux_names=None, structured=True): - # get an empty recarray that corresponds to dtype - dtype = ModflowWel.get_default_dtype(structured=structured) - if aux_names is not None: - dtype = Package.add_to_dtype(dtype, aux_names, np.float32) - return create_empty_recarray(ncells, dtype, default_value=-1.0E+10) - - @staticmethod - def get_sfac_columns(): - return ['flux'] - - @staticmethod - def load(f, model, nper=None, ext_unit_dict=None, check=True): - """ - Load an existing package. - - Parameters - ---------- - f : filename or file handle - File to load. - model : model object - The model object (of type :class:`flopy.modflow.mf.Modflow`) to - which this package will be added. - nper : int - The number of stress periods. If nper is None, then nper will be - obtained from the model object. (default is None). - ext_unit_dict : dictionary, optional - If the arrays in the file are specified using EXTERNAL, - or older style array control records, then `f` should be a file - handle. In this case ext_unit_dict is required, which can be - constructed using the function - :class:`flopy.utils.mfreadnam.parsenamefile`. - - Returns - ------- - wel : ModflowWel object - ModflowWel object. - - Examples - -------- - - >>> import flopy - >>> m = flopy.modflow.Modflow() - >>> wel = flopy.modflow.ModflowWel.load('test.wel', m) - - """ - - if model.verbose: - sys.stdout.write('loading wel package file...\n') - - return Package.load(f, model, ModflowWel, nper=nper, check=check, - ext_unit_dict=ext_unit_dict) - - @staticmethod - def ftype(): - return 'WEL' - - @staticmethod - def defaultunit(): - return 20 +""" +mfwel module. Contains the ModflowWel class. Note that the user can access +the ModflowWel class as `flopy.modflow.ModflowWel`. + +Additional information for this MODFLOW package can be found at the `Online +MODFLOW Guide +`_. + +""" + +import sys +import numpy as np +from ..utils import MfList +from ..pakbase import Package +from ..utils.recarray_utils import create_empty_recarray +from ..utils.optionblock import OptionBlock +from collections import OrderedDict +import warnings + + +class ModflowWel(Package): + """ + MODFLOW Well Package Class. + + Parameters + ---------- + model : model object + The model object (of type :class:`flopy.modflow.mf.Modflow`) to which + this package will be added. + ipakcb : int + A flag that is used to determine if cell-by-cell budget data should be + saved. If ipakcb is non-zero cell-by-cell budget data will be saved. + (default is 0). + stress_period_data : list of boundaries, or recarray of boundaries, or + dictionary of boundaries + Each well is defined through definition of + layer (int), row (int), column (int), flux (float). + The simplest form is a dictionary with a lists of boundaries for each + stress period, where each list of boundaries itself is a list of + boundaries. Indices of the dictionary are the numbers of the stress + period. This gives the form of: + + stress_period_data = + {0: [ + [lay, row, col, flux], + [lay, row, col, flux], + [lay, row, col, flux] + ], + 1: [ + [lay, row, col, flux], + [lay, row, col, flux], + [lay, row, col, flux] + ], ... + kper: + [ + [lay, row, col, flux], + [lay, row, col, flux], + [lay, row, col, flux] + ] + } + + Note that if the number of lists is smaller than the number of stress + periods, then the last list of wells will apply until the end of the + simulation. Full details of all options to specify stress_period_data + can be found in the flopy3 boundaries Notebook in the basic + subdirectory of the examples directory + dtype : custom datatype of stress_period_data. + If None the default well datatype will be applied (default is None). + extension : string + Filename extension (default is 'wel') + options : list of strings + Package options (default is None). + unitnumber : int + File unit number (default is None). + filenames : str or list of str + Filenames to use for the package and the output files. If + filenames=None the package name will be created using the model name + and package extension and the cbc output name will be created using + the model name and .cbc extension (for example, modflowtest.cbc), + if ipakcbc is a number greater than zero. If a single string is passed + the package will be set to the string and cbc output names will be + created using the model name and .cbc extension, if ipakcbc is a + number greater than zero. To define the names for all package files + (input and output) the length of the list of strings should be 2. + Default is None. + + Attributes + ---------- + mxactw : int + Maximum number of wells for a stress period. This is calculated + automatically by FloPy based on the information in + stress_period_data. + + Methods + ------- + + See Also + -------- + + Notes + ----- + Parameters are not supported in FloPy. + + Examples + -------- + + >>> import flopy + >>> m = flopy.modflow.Modflow() + >>> lrcq = {0:[[2, 3, 4, -100.]], 1:[[2, 3, 4, -100.]]} + >>> wel = flopy.modflow.ModflowWel(m, stress_period_data=lrcq) + + """ + _options = OrderedDict([('specify', {OptionBlock.dtype: np.bool_, + OptionBlock.nested: True, + OptionBlock.n_nested: 2, + OptionBlock.vars: OrderedDict( + [('phiramp', + OptionBlock.simple_float), + ('iunitramp', + OrderedDict( + [(OptionBlock.dtype, int), + (OptionBlock.nested, False), + (OptionBlock.optional, True) + ]))])}), + ('tabfiles', OptionBlock.simple_tabfile)]) + + def __init__(self, model, ipakcb=None, stress_period_data=None, dtype=None, + extension='wel', options=None, binary=False, + unitnumber=None, filenames=None): + """ + Package constructor. + + """ + # set default unit number of one is not specified + if unitnumber is None: + unitnumber = ModflowWel.defaultunit() + + # set filenames + if filenames is None: + filenames = [None, None] + elif isinstance(filenames, str): + filenames = [filenames, None] + elif isinstance(filenames, list): + if len(filenames) < 2: + filenames.append(None) + + # update external file information with cbc output, if necessary + if ipakcb is not None: + fname = filenames[1] + model.add_output_file(ipakcb, fname=fname, + package=ModflowWel.ftype()) + else: + ipakcb = 0 + + # Fill namefile items + name = [ModflowWel.ftype()] + units = [unitnumber] + extra = [''] + + # set package name + fname = [filenames[0]] + + # Call ancestor's init to set self.parent, extension, name and + # unit number + Package.__init__(self, model, extension=extension, name=name, + unit_number=units, extra=extra, filenames=fname) + + self.heading = '# {} package for '.format(self.name[0]) + \ + ' {}, '.format(model.version_types[model.version]) + \ + 'generated by Flopy.' + self.url = 'wel.htm' + + self.ipakcb = ipakcb + self.np = 0 + + if options is None: + options = [] + self.specify = False + self.phiramp = None + self.iunitramp = None + self.options = options + if isinstance(options, OptionBlock): + if not self.options.specify: + self.specify = self.options.specify + else: + self.specify = True + + self.phiramp = self.options.phiramp + self.iunitramp = self.options.iunitramp + # this is to grab the aux variables... + options = [] + + else: + for idx, opt in enumerate(options): + if 'specify' in opt: + t = opt.strip().split() + self.specify = True + self.phiramp = np.float(t[1]) + self.iunitramp = np.int(t[2]) + self.options.pop(idx) + break + + if dtype is not None: + self.dtype = dtype + else: + self.dtype = self.get_default_dtype( + structured=self.parent.structured) + + # determine if any aux variables in dtype + dt = self.get_default_dtype(structured=self.parent.structured) + if len(self.dtype.names) > len(dt.names): + for name in self.dtype.names[len(dt.names):]: + ladd = True + for option in options: + if name.lower() in option.lower(): + ladd = False + break + if ladd: + options.append('aux {} '.format(name)) + + if isinstance(self.options, OptionBlock): + if not self.options.auxillary: + self.options.auxillary = options + else: + self.options = options + + # initialize MfList + self.stress_period_data = MfList(self, stress_period_data, + binary=binary) + + self.parent.add_package(self) + + @property + def phiramp_unit(self): + err = "phiramp_unit will be replaced " \ + "with iunitramp for consistency" + warnings.warn(err, DeprecationWarning) + return self.iunitramp + + @phiramp_unit.setter + def phiramp_unit(self, phiramp_unit): + self.iunitramp = phiramp_unit + + def ncells(self): + # Returns the maximum number of cells that have a well + # (developed for MT3DMS SSM package) + return self.stress_period_data.mxact + + def write_file(self, f=None): + """ + Write the package file. + + Parameters: + f: (str) optional file name + + Returns + ------- + None + + """ + if f is not None: + if isinstance(f, str): + f_wel = open(f, "w") + else: + f_wel = f + else: + f_wel = open(self.fn_path, 'w') + + f_wel.write('%s\n' % self.heading) + + if isinstance(self.options, OptionBlock) and \ + self.parent.version == "mfnwt": + + self.options.update_from_package(self) + if self.options.block: + self.options.write_options(f_wel) + + line = ( + ' {0:9d} {1:9d} '.format(self.stress_period_data.mxact, + self.ipakcb)) + + if isinstance(self.options, OptionBlock): + if self.options.noprint: + line += "NOPRINT " + if self.options.auxillary: + line += " ".join([str(aux).upper() for aux in + self.options.auxillary]) + + else: + for opt in self.options: + line += ' ' + str(opt) + + line += '\n' + f_wel.write(line) + + if isinstance(self.options, OptionBlock) and \ + self.parent.version == 'mfnwt': + if not self.options.block: + if isinstance(self.options.specify, np.ndarray): + self.options.tabfiles = False + self.options.write_options(f_wel) + + else: + if self.specify and self.parent.version == 'mfnwt': + f_wel.write('SPECIFY {0:10.5g} {1:10d}\n'.format(self.phiramp, + self.iunitramp)) + + self.stress_period_data.write_transient(f_wel) + f_wel.close() + + def add_record(self, kper, index, values): + try: + self.stress_period_data.add_record(kper, index, values) + except Exception as e: + raise Exception("mfwel error adding record to list: " + str(e)) + + @staticmethod + def get_default_dtype(structured=True): + if structured: + dtype = np.dtype([("k", np.int), ("i", np.int), + ("j", np.int), ("flux", np.float32)]) + else: + dtype = np.dtype([("node", np.int), ("flux", np.float32)]) + return dtype + + @staticmethod + def get_empty(ncells=0, aux_names=None, structured=True): + # get an empty recarray that corresponds to dtype + dtype = ModflowWel.get_default_dtype(structured=structured) + if aux_names is not None: + dtype = Package.add_to_dtype(dtype, aux_names, np.float32) + return create_empty_recarray(ncells, dtype, default_value=-1.0E+10) + + @staticmethod + def get_sfac_columns(): + return ['flux'] + + @staticmethod + def load(f, model, nper=None, ext_unit_dict=None, check=True): + """ + Load an existing package. + + Parameters + ---------- + f : filename or file handle + File to load. + model : model object + The model object (of type :class:`flopy.modflow.mf.Modflow`) to + which this package will be added. + nper : int + The number of stress periods. If nper is None, then nper will be + obtained from the model object. (default is None). + ext_unit_dict : dictionary, optional + If the arrays in the file are specified using EXTERNAL, + or older style array control records, then `f` should be a file + handle. In this case ext_unit_dict is required, which can be + constructed using the function + :class:`flopy.utils.mfreadnam.parsenamefile`. + + Returns + ------- + wel : ModflowWel object + ModflowWel object. + + Examples + -------- + + >>> import flopy + >>> m = flopy.modflow.Modflow() + >>> wel = flopy.modflow.ModflowWel.load('test.wel', m) + + """ + + if model.verbose: + sys.stdout.write('loading wel package file...\n') + + return Package.load(f, model, ModflowWel, nper=nper, check=check, + ext_unit_dict=ext_unit_dict) + + @staticmethod + def ftype(): + return 'WEL' + + @staticmethod + def defaultunit(): + return 20 diff --git a/flopy/modflow/mfzon.py b/flopy/modflow/mfzon.py index 36af874ad5..397e504a6b 100644 --- a/flopy/modflow/mfzon.py +++ b/flopy/modflow/mfzon.py @@ -1,220 +1,220 @@ -""" -mfzon module. Contains the ModflowZone class. Note that the user can access -the ModflowZone class as `flopy.modflow.ModflowZone`. - -Additional information for this MODFLOW package can be found at the `Online -MODFLOW Guide -`_. - -""" -import sys -import collections -import numpy as np -from ..pakbase import Package -from ..utils import Util2d - - -class ModflowZon(Package): - """ - MODFLOW Zone Package Class. - - Parameters - ---------- - model : model object - The model object (of type :class:`flopy.modflow.mf.Modflow`) to which - this package will be added. - zone_dict : dict - Dictionary with zone data for the model. zone_dict is typically - instantiated using load method. - extension : string - Filename extension (default is 'zon') - unitnumber : int - File unit number (default is None). - filenames : str or list of str - Filenames to use for the package. If filenames=None the package name - will be created using the model name and package extension. If a - single string is passed the package will be set to the string. - Default is None. - - - Attributes - ---------- - - Methods - ------- - - See Also - -------- - - Notes - ----- - Parameters are supported in Flopy only when reading in existing models. - Parameter values are converted to native values in Flopy and the - connection to "parameters" is thus nonexistent. - - Examples - -------- - - >>> import flopy - >>> m = flopy.modflow.Modflow() - >>> zonedict = flopy.modflow.ModflowZon(m, zone_dict=zone_dict) - - """ - - def __init__(self, model, zone_dict=None, - extension='zon', unitnumber=None, filenames=None): - """ - Package constructor. - - """ - # set default unit number of one is not specified - if unitnumber is None: - unitnumber = ModflowZon.defaultunit() - - # set filenames - if filenames is None: - filenames = [None] - elif isinstance(filenames, str): - filenames = [filenames] - - # Fill namefile items - name = [ModflowZon.ftype()] - units = [unitnumber] - extra = [''] - - # set package name - fname = [filenames[0]] - - # Call ancestor's init to set self.parent, extension, name and - # unit number - Package.__init__(self, model, extension=extension, name=name, - unit_number=units, extra=extra, filenames=fname) - - self.heading = '# {} package for '.format(self.name[0]) + \ - ' {}, '.format(model.version_types[model.version]) + \ - 'generated by Flopy.' - self.url = 'zone.htm' - - self.nzn = 0 - if zone_dict is not None: - self.nzn = len(zone_dict) - self.zone_dict = zone_dict - self.parent.add_package(self) - - def write_file(self): - """ - Write the package file. - - Returns - ------- - None - - Notes - ----- - Not implemented because parameters are only supported on load - - """ - return - - @staticmethod - def load(f, model, nrow=None, ncol=None, ext_unit_dict=None): - """ - Load an existing package. - - Parameters - ---------- - f : filename or file handle - File to load. - model : model object - The model object (of type :class:`flopy.modflow.mf.Modflow`) to - which this package will be added. - nrow : int - number of rows. If not specified it will be retrieved from - the model object. (default is None). - ncol : int - number of columns. If not specified it will be retrieved from - the model object. (default is None). - ext_unit_dict : dictionary, optional - If the arrays in the file are specified using EXTERNAL, - or older style array control records, then `f` should be a file - handle. In this case ext_unit_dict is required, which can be - constructed using the function - :class:`flopy.utils.mfreadnam.parsenamefile`. - - Returns - ------- - zone : ModflowZone dict - - Examples - -------- - - >>> import flopy - >>> m = flopy.modflow.Modflow() - >>> zon = flopy.modflow.ModflowZon.load('test.zon', m) - - """ - - if model.verbose: - sys.stdout.write('loading zone package file...\n') - - openfile = not hasattr(f, 'read') - if openfile: - filename = f - f = open(filename, 'r') - - # dataset 0 -- header - while True: - line = f.readline() - if line[0] != '#': - break - # dataset 1 - t = line.strip().split() - nzn = int(t[0]) - - # get nlay,nrow,ncol if not passed - if nrow is None and ncol is None: - nrow, ncol, nlay, nper = model.get_nrow_ncol_nlay_nper() - - # read zone data - zone_dict = collections.OrderedDict() - for n in range(nzn): - line = f.readline() - t = line.strip().split() - if len(t[0]) > 10: - zonnam = t[0][0:10].lower() - else: - zonnam = t[0].lower() - if model.verbose: - sys.stdout.write( - ' reading data for "{:<10s}" zone\n'.format(zonnam)) - # load data - t = Util2d.load(f, model, (nrow, ncol), np.int32, zonnam, - ext_unit_dict) - # add unit number to list of external files in ext_unit_dict - # to remove. - if t.locat is not None: - model.add_pop_key_list(t.locat) - zone_dict[zonnam] = t - - if openfile: - f.close() - - # set package unit number - unitnumber = None - filenames = [None] - if ext_unit_dict is not None: - unitnumber, filenames[0] = \ - model.get_ext_dict_attr(ext_unit_dict, - filetype=ModflowZon.ftype()) - - zon = ModflowZon(model, zone_dict=zone_dict, unitnumber=unitnumber, - filenames=filenames) - return zon - - @staticmethod - def ftype(): - return 'ZONE' - - @staticmethod - def defaultunit(): - return 1001 +""" +mfzon module. Contains the ModflowZone class. Note that the user can access +the ModflowZone class as `flopy.modflow.ModflowZone`. + +Additional information for this MODFLOW package can be found at the `Online +MODFLOW Guide +`_. + +""" +import sys +import collections +import numpy as np +from ..pakbase import Package +from ..utils import Util2d + + +class ModflowZon(Package): + """ + MODFLOW Zone Package Class. + + Parameters + ---------- + model : model object + The model object (of type :class:`flopy.modflow.mf.Modflow`) to which + this package will be added. + zone_dict : dict + Dictionary with zone data for the model. zone_dict is typically + instantiated using load method. + extension : string + Filename extension (default is 'zon') + unitnumber : int + File unit number (default is None). + filenames : str or list of str + Filenames to use for the package. If filenames=None the package name + will be created using the model name and package extension. If a + single string is passed the package will be set to the string. + Default is None. + + + Attributes + ---------- + + Methods + ------- + + See Also + -------- + + Notes + ----- + Parameters are supported in Flopy only when reading in existing models. + Parameter values are converted to native values in Flopy and the + connection to "parameters" is thus nonexistent. + + Examples + -------- + + >>> import flopy + >>> m = flopy.modflow.Modflow() + >>> zonedict = flopy.modflow.ModflowZon(m, zone_dict=zone_dict) + + """ + + def __init__(self, model, zone_dict=None, + extension='zon', unitnumber=None, filenames=None): + """ + Package constructor. + + """ + # set default unit number of one is not specified + if unitnumber is None: + unitnumber = ModflowZon.defaultunit() + + # set filenames + if filenames is None: + filenames = [None] + elif isinstance(filenames, str): + filenames = [filenames] + + # Fill namefile items + name = [ModflowZon.ftype()] + units = [unitnumber] + extra = [''] + + # set package name + fname = [filenames[0]] + + # Call ancestor's init to set self.parent, extension, name and + # unit number + Package.__init__(self, model, extension=extension, name=name, + unit_number=units, extra=extra, filenames=fname) + + self.heading = '# {} package for '.format(self.name[0]) + \ + ' {}, '.format(model.version_types[model.version]) + \ + 'generated by Flopy.' + self.url = 'zone.htm' + + self.nzn = 0 + if zone_dict is not None: + self.nzn = len(zone_dict) + self.zone_dict = zone_dict + self.parent.add_package(self) + + def write_file(self): + """ + Write the package file. + + Returns + ------- + None + + Notes + ----- + Not implemented because parameters are only supported on load + + """ + return + + @staticmethod + def load(f, model, nrow=None, ncol=None, ext_unit_dict=None): + """ + Load an existing package. + + Parameters + ---------- + f : filename or file handle + File to load. + model : model object + The model object (of type :class:`flopy.modflow.mf.Modflow`) to + which this package will be added. + nrow : int + number of rows. If not specified it will be retrieved from + the model object. (default is None). + ncol : int + number of columns. If not specified it will be retrieved from + the model object. (default is None). + ext_unit_dict : dictionary, optional + If the arrays in the file are specified using EXTERNAL, + or older style array control records, then `f` should be a file + handle. In this case ext_unit_dict is required, which can be + constructed using the function + :class:`flopy.utils.mfreadnam.parsenamefile`. + + Returns + ------- + zone : ModflowZone dict + + Examples + -------- + + >>> import flopy + >>> m = flopy.modflow.Modflow() + >>> zon = flopy.modflow.ModflowZon.load('test.zon', m) + + """ + + if model.verbose: + sys.stdout.write('loading zone package file...\n') + + openfile = not hasattr(f, 'read') + if openfile: + filename = f + f = open(filename, 'r') + + # dataset 0 -- header + while True: + line = f.readline() + if line[0] != '#': + break + # dataset 1 + t = line.strip().split() + nzn = int(t[0]) + + # get nlay,nrow,ncol if not passed + if nrow is None and ncol is None: + nrow, ncol, nlay, nper = model.get_nrow_ncol_nlay_nper() + + # read zone data + zone_dict = collections.OrderedDict() + for n in range(nzn): + line = f.readline() + t = line.strip().split() + if len(t[0]) > 10: + zonnam = t[0][0:10].lower() + else: + zonnam = t[0].lower() + if model.verbose: + sys.stdout.write( + ' reading data for "{:<10s}" zone\n'.format(zonnam)) + # load data + t = Util2d.load(f, model, (nrow, ncol), np.int32, zonnam, + ext_unit_dict) + # add unit number to list of external files in ext_unit_dict + # to remove. + if t.locat is not None: + model.add_pop_key_list(t.locat) + zone_dict[zonnam] = t + + if openfile: + f.close() + + # set package unit number + unitnumber = None + filenames = [None] + if ext_unit_dict is not None: + unitnumber, filenames[0] = \ + model.get_ext_dict_attr(ext_unit_dict, + filetype=ModflowZon.ftype()) + + zon = ModflowZon(model, zone_dict=zone_dict, unitnumber=unitnumber, + filenames=filenames) + return zon + + @staticmethod + def ftype(): + return 'ZONE' + + @staticmethod + def defaultunit(): + return 1001 diff --git a/flopy/modflowlgr/__init__.py b/flopy/modflowlgr/__init__.py index 8efcb69bd7..6442d112ca 100644 --- a/flopy/modflowlgr/__init__.py +++ b/flopy/modflowlgr/__init__.py @@ -1,2 +1,2 @@ -from .mflgr import ModflowLgr, LgrChild - +from .mflgr import ModflowLgr, LgrChild + diff --git a/flopy/modflowlgr/mflgr.py b/flopy/modflowlgr/mflgr.py index fbbe4564b0..1750cfc719 100644 --- a/flopy/modflowlgr/mflgr.py +++ b/flopy/modflowlgr/mflgr.py @@ -1,603 +1,603 @@ -""" -mf module. Contains the ModflowGlobal, ModflowList, and Modflow classes. - - -""" - -import os -import sys - -from ..mbase import BaseModel -from ..modflow import Modflow - - -class LgrChild(): - def __init__(self, ishflg=1, ibflg=59, iucbhsv=0, iucbfsv=0, - mxlgriter=20, ioutlgr=1, relaxh=0.4, relaxf=0.4, - hcloselgr=5e-3, fcloselgr=5e-2, - nplbeg=0, nprbeg=0, npcbeg=0, - nplend=0, nprend=1, npcend=1, - ncpp=2, ncppl=1): - self.ishflg = ishflg - self.ibflg = ibflg - self.iucbhsv = iucbhsv - self.iucbfsv = iucbfsv - self.mxlgriter = mxlgriter - self.ioutlgr = ioutlgr - self.relaxh = relaxh - self.relaxf = relaxf - self.hcloselgr = hcloselgr - self.fcloselgr = fcloselgr - self.nplbeg = nplbeg - self.nprbeg = nprbeg - self.npcbeg = npcbeg - self.nplend = nplend - self.nprend = nprend - self.npcend = npcend - self.ncpp = ncpp - if isinstance(ncppl, int): - nlaychild = nplend - nplbeg + 1 - self.ncppl = nlaychild * [ncppl] - else: - self.ncppl = ncppl - - -class ModflowLgr(BaseModel): - """ - MODFLOW-LGR Model Class. - - Parameters - ---------- - modelname : string, optional - Name of model. This string will be used to name the MODFLOW input - that are created with write_model. (the default is 'modflowtest') - namefile_ext : string, optional - Extension for the namefile (the default is 'nam') - version : string, optional - Version of MODFLOW to use (the default is 'mf2005'). - exe_name : string, optional - The name of the executable to use (the default is - 'mf2005'). - listunit : integer, optional - Unit number for the list file (the default is 2). - model_ws : string, optional - model workspace. Directory name to create model data sets. - (default is the present working directory). - external_path : string - Location for external files (default is None). - verbose : boolean, optional - Print additional information to the screen (default is False). - load : boolean, optional - (default is True). - - Attributes - ---------- - - Methods - ------- - - See Also - -------- - - Notes - ----- - - Examples - -------- - - >>> import flopy - >>> lgr = flopy.modflowlgr.ModflowLgr(parent=parent, children=children, - >>> children_data=children_data) - - """ - - def __init__(self, modelname='modflowlgrtest', namefile_ext='lgr', - version='mflgr', exe_name='mflgr.exe', - iupbhsv=0, iupbfsv=0, - parent=None, children=None, children_data=None, model_ws='.', - external_path=None, - verbose=False, **kwargs): - BaseModel.__init__(self, modelname, namefile_ext, exe_name, model_ws, - structured=True, verbose=verbose, **kwargs) - self.version_types = {'mflgr': 'MODFLOW-LGR'} - - self.set_version(version) - - # external option stuff - self.array_free_format = True - self.array_format = 'modflow' - - self.iupbhsv = iupbhsv - self.iupbfsv = iupbfsv - - self.parent = parent - if children is not None: - if not isinstance(children, list): - children = [children] - self.children_models = children - if children_data is not None: - if not isinstance(children_data, list): - children_data = [children_data] - self.children_data = children_data - - # set the number of grids - self.children = 0 - if children is not None: - self.children += len(children) - - self.load_fail = False - # the starting external data unit number - self._next_ext_unit = 2000 - - # convert iupbhsv, iupbhsv, iucbhsv, and iucbfsv units from - # external_files to output_files - ibhsv = self.iupbhsv - ibfsv = self.iupbfsv - if ibhsv > 0: - self.parent.add_output_file(ibhsv, binflag=False) - if ibfsv > 0: - self.parent.add_output_file(ibfsv, binflag=False) - for child, child_data in zip(self.children_models, self.children_data): - ibhsv = child_data.iucbhsv - ibfsv = child_data.iucbfsv - if ibhsv > 0: - child.add_output_file(ibhsv, binflag=False) - if ibfsv > 0: - child.add_output_file(ibfsv, binflag=False) - - if external_path is not None: - if os.path.exists(os.path.join(model_ws, external_path)): - print("Note: external_path " + str(external_path) + - " already exists") - else: - os.makedirs(os.path.join(model_ws, external_path)) - self.external_path = external_path - - return - - def __repr__(self): - return 'MODFLOW-LGR model with {} grids'.format(self.ngrids) - - @property - def ngrids(self): - try: - return 1 + self.children - except: - return None - - def write_input(self, SelPackList=False, check=False): - """ - Write the input. Overrides BaseModels's write_input - - Parameters - ---------- - SelPackList : False or list of packages - - """ - if check: - # run check prior to writing input - pass - - if self.verbose: - print('\nWriting packages:') - - # write lgr file - self.write_name_file() - - # write MODFLOW files for parent model - self.parent.write_input(SelPackList=SelPackList, check=check) - - # write MODFLOW files for the children models - for child in self.children_models: - child.write_input(SelPackList=SelPackList, check=check) - - def _padline(self, line, comment=None, line_len=79): - if len(line) < line_len: - fmt = '{:' + '{}'.format(line_len) + 's}' - line = fmt.format(line) - if comment is not None: - line += ' # {}\n'.format(comment) - return line - - def _get_path(self, bpth, pth, fpth=''): - lpth = os.path.abspath(bpth) - mpth = os.path.abspath(pth) - rpth = os.path.relpath(mpth, lpth) - if rpth == '.': - rpth = fpth - else: - rpth = os.path.join(rpth, fpth) - msg = 'namefiles must be in the same directory as ' + \ - 'the lgr control file\n' - msg += 'Control file path: {}\n'.format(lpth) - msg += 'Namefile path: {}\n'.format(mpth) - msg += 'Relative path: {}\n'.format(rpth) - raise ValueError(msg) - return rpth - - def get_namefiles(self): - """ - Get the namefiles (with path) of the parent and children models - - Returns - ------- - namefiles : list - - - Examples - -------- - - >>> import flopy - >>> lgr = flopy.modflowlgr.ModflowLgr.load(f) - >>> namefiles = lgr.get_namefiles() - - """ - pth = os.path.join(self.parent._model_ws, self.parent.namefile) - namefiles = [pth] - for child in self.children_models: - pth = os.path.join(child._model_ws, child.namefile) - namefiles.append(pth) - return namefiles - - def write_name_file(self): - """ - Write the modflow-lgr control file. - """ - fn_path = os.path.join(self.model_ws, self.namefile) - f = open(fn_path, 'w') - f.write('{}\n'.format(self.heading)) - - # dataset 1 - line = self._padline('LGR', comment='data set 1') - f.write(line) - - # dataset 2 - line = '{}'.format(self.ngrids) - line = self._padline(line, comment='data set 2 - ngridsS') - f.write(line) - - # dataset 3 - pth = self._get_path(self._model_ws, self.parent._model_ws, - fpth=self.parent.namefile) - line = self._padline(pth, comment='data set 3 - parent namefile') - f.write(line) - - # dataset 4 - line = self._padline('PARENTONLY', comment='data set 4 - gridstatus') - f.write(line) - - # dataset 5 - line = '{} {}'.format(self.iupbhsv, self.iupbfsv) - line = self._padline(line, comment='data set 5 - iupbhsv, iupbfsv') - f.write(line) - - # dataset 6 to 15 for each child - for idx, (child, child_data) in enumerate(zip(self.children_models, - self.children_data)): - # dataset 6 - pth = self._get_path(self._model_ws, child._model_ws, - fpth=child.namefile) - comment = 'data set 6 - child {} namefile'.format(idx + 1) - line = self._padline(pth, comment=comment) - f.write(line) - - # dataset 7 - comment = 'data set 7 - child {} gridstatus'.format(idx + 1) - line = self._padline('CHILDONLY', - comment=comment) - f.write(line) - - # dataset 8 - line = '{} {} {} {}'.format(child_data.ishflg, child_data.ibflg, - child_data.iucbhsv, child_data.iucbfsv) - comment = 'data set 8 - child {} '.format(idx + 1) + \ - 'ishflg, ibflg, iucbhsv, iucbfsv' - line = self._padline(line, comment=comment) - f.write(line) - - # dataset 9 - line = '{} {}'.format(child_data.mxlgriter, child_data.ioutlgr) - comment = 'data set 9 - child {} '.format(idx + 1) + \ - 'mxlgriter, ioutlgr' - line = self._padline(line, comment=comment) - f.write(line) - - # dataset 10 - line = '{} {}'.format(child_data.relaxh, child_data.relaxf) - comment = 'data set 10 - child {} '.format(idx + 1) + \ - 'relaxh, relaxf' - line = self._padline(line, comment=comment) - f.write(line) - - # dataset 11 - line = '{} {}'.format(child_data.hcloselgr, child_data.fcloselgr) - comment = 'data set 11 - child {} '.format(idx + 1) + \ - 'hcloselgr, fcloselgr' - line = self._padline(line, comment=comment) - f.write(line) - - # dataset 12 - line = '{} {} {}'.format(child_data.nplbeg + 1, - child_data.nprbeg + 1, - child_data.npcbeg + 1) - comment = 'data set 12 - child {} '.format(idx + 1) + \ - 'nplbeg, nprbeg, npcbeg' - line = self._padline(line, comment=comment) - f.write(line) - - # dataset 13 - line = '{} {} {}'.format(child_data.nplend + 1, - child_data.nprend + 1, - child_data.npcend + 1) - comment = 'data set 13 - child {} '.format(idx + 1) + \ - 'nplend, nprend, npcend' - line = self._padline(line, comment=comment) - f.write(line) - - # dataset 14 - line = '{}'.format(child_data.ncpp) - comment = 'data set 14 - child {} '.format(idx + 1) + \ - 'ncpp' - line = self._padline(line, comment=comment) - f.write(line) - - # dataset 15 - line = '' - for ndx in child_data.ncppl: - line += '{} '.format(ndx) - comment = 'data set 15 - child {} '.format(idx + 1) + \ - 'ncppl' - line = self._padline(line, comment=comment) - f.write(line) - - # close the lgr control file - f.close() - - def change_model_ws(self, new_pth=None, reset_external=False): - - """ - Change the model work space. - - Parameters - ---------- - new_pth : str - Location of new model workspace. If this path does not exist, - it will be created. (default is None, which will be assigned to - the present working directory). - - Returns - ------- - val : list of strings - Can be used to see what packages are in the model, and can then - be used with get_package to pull out individual packages. - - """ - if new_pth is None: - new_pth = os.getcwd() - if not os.path.exists(new_pth): - try: - sys.stdout.write( - '\ncreating model workspace...\n {}\n'.format(new_pth)) - os.makedirs(new_pth) - except: - line = '\n{} not valid, workspace-folder '.format(new_pth) + \ - 'was changed to {}\n'.format(os.getcwd()) - print(line) - new_pth = os.getcwd() - # --reset the model workspace - old_pth = self._model_ws - self._model_ws = new_pth - line = '\nchanging model workspace...\n {}\n'.format(new_pth) - sys.stdout.write(line) - - # reset model_ws for the parent - lpth = os.path.abspath(old_pth) - mpth = os.path.abspath(self.parent._model_ws) - rpth = os.path.relpath(mpth, lpth) - if rpth == '.': - npth = new_pth - else: - npth = os.path.join(new_pth, rpth) - self.parent.change_model_ws(new_pth=npth, - reset_external=reset_external) - # reset model_ws for the children - for child in self.children_models: - lpth = os.path.abspath(old_pth) - mpth = os.path.abspath(child._model_ws) - rpth = os.path.relpath(mpth, lpth) - if rpth == '.': - npth = new_pth - else: - npth = os.path.join(new_pth, rpth) - child.change_model_ws(new_pth=npth, - reset_external=reset_external) - - @staticmethod - def load(f, version='mflgr', exe_name='mflgr.exe', verbose=False, - model_ws='.', load_only=None, forgive=False, check=True): - """ - Load an existing model. - - Parameters - ---------- - f : filename or file handle - MODFLOW name file to load. - - model_ws : model workspace path - - load_only : (optional) filetype(s) to load (e.g. ["bas6", "lpf"]) - - forgive : flag to raise exception(s) on package load failure - good for debugging - - check : boolean - Check model input for common errors. (default True) - Returns - ------- - ml : Modflow object - - Examples - -------- - - >>> import flopy - >>> ml = flopy.modflow.Modflow.load(f) - - """ - # test if name file is passed with extension (i.e., is a valid file) - if os.path.isfile(os.path.join(model_ws, f)): - modelname = f.rpartition('.')[0] - else: - modelname = f - - openfile = not hasattr(f, 'read') - if openfile: - filename = os.path.join(model_ws, f) - f = open(filename, 'r') - - # dataset 0 -- header - header = '' - while True: - line = f.readline() - if line[0] != '#': - break - header += line.strip() - - # dataset 1 - ds1 = line.split()[0].lower() - msg = 'LGR must be entered as the first item in dataset 1\n' - msg += ' {}\n'.format(header) - assert ds1 == 'lgr', msg - - # dataset 2 - line = f.readline() - t = line.split() - ngrids = int(t[0]) - nchildren = ngrids - 1 - - # dataset 3 - line = f.readline() - t = line.split() - namefile = t[0] - pws = os.path.join(model_ws, os.path.dirname(namefile)) - pn = os.path.basename(namefile) - - # dataset 4 - line = f.readline() - t = line.split() - gridstatus = t[0].lower() - msg = "GRIDSTATUS for the parent must be 'PARENTONLY'" - assert gridstatus == 'parentonly', msg - - # dataset 5 - line = f.readline() - t = line.split() - try: - iupbhsv, iupbfsv = int(t[0]), int(t[1]) - except: - msg = 'could not read dataset 5 - IUPBHSV and IUPBFSV.' - raise ValueError(msg) - - # non-zero values for IUPBHSV and IUPBFSV in dataset 5 are not - # supported - if iupbhsv + iupbfsv > 0: - msg = 'nonzero values for IUPBHSV () '.format(iupbhsv) + \ - 'and IUPBFSV ({}) '.format(iupbfsv) + \ - 'are not supported.' - raise ValueError(msg) - - # load the parent model - parent = Modflow.load(pn, verbose=verbose, model_ws=pws, - load_only=load_only, forgive=forgive, - check=check) - - children_data = [] - children = [] - for child in range(nchildren): - # dataset 6 - line = f.readline() - t = line.split() - namefile = t[0] - cws = os.path.join(model_ws, os.path.dirname(namefile)) - cn = os.path.basename(namefile) - - # dataset 7 - line = f.readline() - t = line.split() - gridstatus = t[0].lower() - msg = "GRIDSTATUS for the parent must be 'CHILDONLY'" - assert gridstatus == 'childonly', msg - - # dataset 8 - line = f.readline() - t = line.split() - ishflg, ibflg, iucbhsv, iucbfsv = int(t[0]), int(t[1]), int( - t[2]), int(t[3]) - - # dataset 9 - line = f.readline() - t = line.split() - mxlgriter, ioutlgr = int(t[0]), int(t[1]) - - # dataset 10 - line = f.readline() - t = line.split() - relaxh, relaxf = float(t[0]), float(t[1]) - - # dataset 11 - line = f.readline() - t = line.split() - hcloselgr, fcloselgr = float(t[0]), float(t[1]) - - # dataset 12 - line = f.readline() - t = line.split() - nplbeg, nprbeg, npcbeg = int(t[0]) - 1, int(t[1]) - 1, int( - t[2]) - 1 - - # dataset 13 - line = f.readline() - t = line.split() - nplend, nprend, npcend = int(t[0]) - 1, int(t[1]) - 1, int( - t[2]) - 1 - - # dataset 14 - line = f.readline() - t = line.split() - ncpp = int(t[0]) - - # dataset 15 - line = f.readline() - t = line.split() - ncppl = [] - for idx in range(nplend + 1 - nplbeg): - ncppl.append(int(t[idx])) - - # build child data object - - children_data.append(LgrChild(ishflg=ishflg, ibflg=ibflg, - iucbhsv=iucbhsv, iucbfsv=iucbfsv, - mxlgriter=mxlgriter, ioutlgr=ioutlgr, - relaxh=relaxh, relaxf=relaxf, - hcloselgr=hcloselgr, - fcloselgr=fcloselgr, - nplbeg=nplbeg, nprbeg=nprbeg, - npcbeg=npcbeg, - nplend=nplend, nprend=nprend, - npcend=npcend, - ncpp=ncpp, ncppl=ncppl)) - # load child model - children.append(Modflow.load(cn, verbose=verbose, model_ws=cws, - load_only=load_only, forgive=forgive, - check=check)) - - if openfile: - f.close() - - lgr = ModflowLgr(version=version, exe_name=exe_name, - modelname=modelname, model_ws=model_ws, - verbose=verbose, - iupbhsv=iupbhsv, iupbfsv=iupbfsv, - parent=parent, - children=children, children_data=children_data) - - # return model object - return lgr +""" +mf module. Contains the ModflowGlobal, ModflowList, and Modflow classes. + + +""" + +import os +import sys + +from ..mbase import BaseModel +from ..modflow import Modflow + + +class LgrChild(): + def __init__(self, ishflg=1, ibflg=59, iucbhsv=0, iucbfsv=0, + mxlgriter=20, ioutlgr=1, relaxh=0.4, relaxf=0.4, + hcloselgr=5e-3, fcloselgr=5e-2, + nplbeg=0, nprbeg=0, npcbeg=0, + nplend=0, nprend=1, npcend=1, + ncpp=2, ncppl=1): + self.ishflg = ishflg + self.ibflg = ibflg + self.iucbhsv = iucbhsv + self.iucbfsv = iucbfsv + self.mxlgriter = mxlgriter + self.ioutlgr = ioutlgr + self.relaxh = relaxh + self.relaxf = relaxf + self.hcloselgr = hcloselgr + self.fcloselgr = fcloselgr + self.nplbeg = nplbeg + self.nprbeg = nprbeg + self.npcbeg = npcbeg + self.nplend = nplend + self.nprend = nprend + self.npcend = npcend + self.ncpp = ncpp + if isinstance(ncppl, int): + nlaychild = nplend - nplbeg + 1 + self.ncppl = nlaychild * [ncppl] + else: + self.ncppl = ncppl + + +class ModflowLgr(BaseModel): + """ + MODFLOW-LGR Model Class. + + Parameters + ---------- + modelname : string, optional + Name of model. This string will be used to name the MODFLOW input + that are created with write_model. (the default is 'modflowtest') + namefile_ext : string, optional + Extension for the namefile (the default is 'nam') + version : string, optional + Version of MODFLOW to use (the default is 'mf2005'). + exe_name : string, optional + The name of the executable to use (the default is + 'mf2005'). + listunit : integer, optional + Unit number for the list file (the default is 2). + model_ws : string, optional + model workspace. Directory name to create model data sets. + (default is the present working directory). + external_path : string + Location for external files (default is None). + verbose : boolean, optional + Print additional information to the screen (default is False). + load : boolean, optional + (default is True). + + Attributes + ---------- + + Methods + ------- + + See Also + -------- + + Notes + ----- + + Examples + -------- + + >>> import flopy + >>> lgr = flopy.modflowlgr.ModflowLgr(parent=parent, children=children, + >>> children_data=children_data) + + """ + + def __init__(self, modelname='modflowlgrtest', namefile_ext='lgr', + version='mflgr', exe_name='mflgr.exe', + iupbhsv=0, iupbfsv=0, + parent=None, children=None, children_data=None, model_ws='.', + external_path=None, + verbose=False, **kwargs): + BaseModel.__init__(self, modelname, namefile_ext, exe_name, model_ws, + structured=True, verbose=verbose, **kwargs) + self.version_types = {'mflgr': 'MODFLOW-LGR'} + + self.set_version(version) + + # external option stuff + self.array_free_format = True + self.array_format = 'modflow' + + self.iupbhsv = iupbhsv + self.iupbfsv = iupbfsv + + self.parent = parent + if children is not None: + if not isinstance(children, list): + children = [children] + self.children_models = children + if children_data is not None: + if not isinstance(children_data, list): + children_data = [children_data] + self.children_data = children_data + + # set the number of grids + self.children = 0 + if children is not None: + self.children += len(children) + + self.load_fail = False + # the starting external data unit number + self._next_ext_unit = 2000 + + # convert iupbhsv, iupbhsv, iucbhsv, and iucbfsv units from + # external_files to output_files + ibhsv = self.iupbhsv + ibfsv = self.iupbfsv + if ibhsv > 0: + self.parent.add_output_file(ibhsv, binflag=False) + if ibfsv > 0: + self.parent.add_output_file(ibfsv, binflag=False) + for child, child_data in zip(self.children_models, self.children_data): + ibhsv = child_data.iucbhsv + ibfsv = child_data.iucbfsv + if ibhsv > 0: + child.add_output_file(ibhsv, binflag=False) + if ibfsv > 0: + child.add_output_file(ibfsv, binflag=False) + + if external_path is not None: + if os.path.exists(os.path.join(model_ws, external_path)): + print("Note: external_path " + str(external_path) + + " already exists") + else: + os.makedirs(os.path.join(model_ws, external_path)) + self.external_path = external_path + + return + + def __repr__(self): + return 'MODFLOW-LGR model with {} grids'.format(self.ngrids) + + @property + def ngrids(self): + try: + return 1 + self.children + except: + return None + + def write_input(self, SelPackList=False, check=False): + """ + Write the input. Overrides BaseModels's write_input + + Parameters + ---------- + SelPackList : False or list of packages + + """ + if check: + # run check prior to writing input + pass + + if self.verbose: + print('\nWriting packages:') + + # write lgr file + self.write_name_file() + + # write MODFLOW files for parent model + self.parent.write_input(SelPackList=SelPackList, check=check) + + # write MODFLOW files for the children models + for child in self.children_models: + child.write_input(SelPackList=SelPackList, check=check) + + def _padline(self, line, comment=None, line_len=79): + if len(line) < line_len: + fmt = '{:' + '{}'.format(line_len) + 's}' + line = fmt.format(line) + if comment is not None: + line += ' # {}\n'.format(comment) + return line + + def _get_path(self, bpth, pth, fpth=''): + lpth = os.path.abspath(bpth) + mpth = os.path.abspath(pth) + rpth = os.path.relpath(mpth, lpth) + if rpth == '.': + rpth = fpth + else: + rpth = os.path.join(rpth, fpth) + msg = 'namefiles must be in the same directory as ' + \ + 'the lgr control file\n' + msg += 'Control file path: {}\n'.format(lpth) + msg += 'Namefile path: {}\n'.format(mpth) + msg += 'Relative path: {}\n'.format(rpth) + raise ValueError(msg) + return rpth + + def get_namefiles(self): + """ + Get the namefiles (with path) of the parent and children models + + Returns + ------- + namefiles : list + + + Examples + -------- + + >>> import flopy + >>> lgr = flopy.modflowlgr.ModflowLgr.load(f) + >>> namefiles = lgr.get_namefiles() + + """ + pth = os.path.join(self.parent._model_ws, self.parent.namefile) + namefiles = [pth] + for child in self.children_models: + pth = os.path.join(child._model_ws, child.namefile) + namefiles.append(pth) + return namefiles + + def write_name_file(self): + """ + Write the modflow-lgr control file. + """ + fn_path = os.path.join(self.model_ws, self.namefile) + f = open(fn_path, 'w') + f.write('{}\n'.format(self.heading)) + + # dataset 1 + line = self._padline('LGR', comment='data set 1') + f.write(line) + + # dataset 2 + line = '{}'.format(self.ngrids) + line = self._padline(line, comment='data set 2 - ngridsS') + f.write(line) + + # dataset 3 + pth = self._get_path(self._model_ws, self.parent._model_ws, + fpth=self.parent.namefile) + line = self._padline(pth, comment='data set 3 - parent namefile') + f.write(line) + + # dataset 4 + line = self._padline('PARENTONLY', comment='data set 4 - gridstatus') + f.write(line) + + # dataset 5 + line = '{} {}'.format(self.iupbhsv, self.iupbfsv) + line = self._padline(line, comment='data set 5 - iupbhsv, iupbfsv') + f.write(line) + + # dataset 6 to 15 for each child + for idx, (child, child_data) in enumerate(zip(self.children_models, + self.children_data)): + # dataset 6 + pth = self._get_path(self._model_ws, child._model_ws, + fpth=child.namefile) + comment = 'data set 6 - child {} namefile'.format(idx + 1) + line = self._padline(pth, comment=comment) + f.write(line) + + # dataset 7 + comment = 'data set 7 - child {} gridstatus'.format(idx + 1) + line = self._padline('CHILDONLY', + comment=comment) + f.write(line) + + # dataset 8 + line = '{} {} {} {}'.format(child_data.ishflg, child_data.ibflg, + child_data.iucbhsv, child_data.iucbfsv) + comment = 'data set 8 - child {} '.format(idx + 1) + \ + 'ishflg, ibflg, iucbhsv, iucbfsv' + line = self._padline(line, comment=comment) + f.write(line) + + # dataset 9 + line = '{} {}'.format(child_data.mxlgriter, child_data.ioutlgr) + comment = 'data set 9 - child {} '.format(idx + 1) + \ + 'mxlgriter, ioutlgr' + line = self._padline(line, comment=comment) + f.write(line) + + # dataset 10 + line = '{} {}'.format(child_data.relaxh, child_data.relaxf) + comment = 'data set 10 - child {} '.format(idx + 1) + \ + 'relaxh, relaxf' + line = self._padline(line, comment=comment) + f.write(line) + + # dataset 11 + line = '{} {}'.format(child_data.hcloselgr, child_data.fcloselgr) + comment = 'data set 11 - child {} '.format(idx + 1) + \ + 'hcloselgr, fcloselgr' + line = self._padline(line, comment=comment) + f.write(line) + + # dataset 12 + line = '{} {} {}'.format(child_data.nplbeg + 1, + child_data.nprbeg + 1, + child_data.npcbeg + 1) + comment = 'data set 12 - child {} '.format(idx + 1) + \ + 'nplbeg, nprbeg, npcbeg' + line = self._padline(line, comment=comment) + f.write(line) + + # dataset 13 + line = '{} {} {}'.format(child_data.nplend + 1, + child_data.nprend + 1, + child_data.npcend + 1) + comment = 'data set 13 - child {} '.format(idx + 1) + \ + 'nplend, nprend, npcend' + line = self._padline(line, comment=comment) + f.write(line) + + # dataset 14 + line = '{}'.format(child_data.ncpp) + comment = 'data set 14 - child {} '.format(idx + 1) + \ + 'ncpp' + line = self._padline(line, comment=comment) + f.write(line) + + # dataset 15 + line = '' + for ndx in child_data.ncppl: + line += '{} '.format(ndx) + comment = 'data set 15 - child {} '.format(idx + 1) + \ + 'ncppl' + line = self._padline(line, comment=comment) + f.write(line) + + # close the lgr control file + f.close() + + def change_model_ws(self, new_pth=None, reset_external=False): + + """ + Change the model work space. + + Parameters + ---------- + new_pth : str + Location of new model workspace. If this path does not exist, + it will be created. (default is None, which will be assigned to + the present working directory). + + Returns + ------- + val : list of strings + Can be used to see what packages are in the model, and can then + be used with get_package to pull out individual packages. + + """ + if new_pth is None: + new_pth = os.getcwd() + if not os.path.exists(new_pth): + try: + sys.stdout.write( + '\ncreating model workspace...\n {}\n'.format(new_pth)) + os.makedirs(new_pth) + except: + line = '\n{} not valid, workspace-folder '.format(new_pth) + \ + 'was changed to {}\n'.format(os.getcwd()) + print(line) + new_pth = os.getcwd() + # --reset the model workspace + old_pth = self._model_ws + self._model_ws = new_pth + line = '\nchanging model workspace...\n {}\n'.format(new_pth) + sys.stdout.write(line) + + # reset model_ws for the parent + lpth = os.path.abspath(old_pth) + mpth = os.path.abspath(self.parent._model_ws) + rpth = os.path.relpath(mpth, lpth) + if rpth == '.': + npth = new_pth + else: + npth = os.path.join(new_pth, rpth) + self.parent.change_model_ws(new_pth=npth, + reset_external=reset_external) + # reset model_ws for the children + for child in self.children_models: + lpth = os.path.abspath(old_pth) + mpth = os.path.abspath(child._model_ws) + rpth = os.path.relpath(mpth, lpth) + if rpth == '.': + npth = new_pth + else: + npth = os.path.join(new_pth, rpth) + child.change_model_ws(new_pth=npth, + reset_external=reset_external) + + @staticmethod + def load(f, version='mflgr', exe_name='mflgr.exe', verbose=False, + model_ws='.', load_only=None, forgive=False, check=True): + """ + Load an existing model. + + Parameters + ---------- + f : filename or file handle + MODFLOW name file to load. + + model_ws : model workspace path + + load_only : (optional) filetype(s) to load (e.g. ["bas6", "lpf"]) + + forgive : flag to raise exception(s) on package load failure - good for debugging + + check : boolean + Check model input for common errors. (default True) + Returns + ------- + ml : Modflow object + + Examples + -------- + + >>> import flopy + >>> ml = flopy.modflow.Modflow.load(f) + + """ + # test if name file is passed with extension (i.e., is a valid file) + if os.path.isfile(os.path.join(model_ws, f)): + modelname = f.rpartition('.')[0] + else: + modelname = f + + openfile = not hasattr(f, 'read') + if openfile: + filename = os.path.join(model_ws, f) + f = open(filename, 'r') + + # dataset 0 -- header + header = '' + while True: + line = f.readline() + if line[0] != '#': + break + header += line.strip() + + # dataset 1 + ds1 = line.split()[0].lower() + msg = 'LGR must be entered as the first item in dataset 1\n' + msg += ' {}\n'.format(header) + assert ds1 == 'lgr', msg + + # dataset 2 + line = f.readline() + t = line.split() + ngrids = int(t[0]) + nchildren = ngrids - 1 + + # dataset 3 + line = f.readline() + t = line.split() + namefile = t[0] + pws = os.path.join(model_ws, os.path.dirname(namefile)) + pn = os.path.basename(namefile) + + # dataset 4 + line = f.readline() + t = line.split() + gridstatus = t[0].lower() + msg = "GRIDSTATUS for the parent must be 'PARENTONLY'" + assert gridstatus == 'parentonly', msg + + # dataset 5 + line = f.readline() + t = line.split() + try: + iupbhsv, iupbfsv = int(t[0]), int(t[1]) + except: + msg = 'could not read dataset 5 - IUPBHSV and IUPBFSV.' + raise ValueError(msg) + + # non-zero values for IUPBHSV and IUPBFSV in dataset 5 are not + # supported + if iupbhsv + iupbfsv > 0: + msg = 'nonzero values for IUPBHSV () '.format(iupbhsv) + \ + 'and IUPBFSV ({}) '.format(iupbfsv) + \ + 'are not supported.' + raise ValueError(msg) + + # load the parent model + parent = Modflow.load(pn, verbose=verbose, model_ws=pws, + load_only=load_only, forgive=forgive, + check=check) + + children_data = [] + children = [] + for child in range(nchildren): + # dataset 6 + line = f.readline() + t = line.split() + namefile = t[0] + cws = os.path.join(model_ws, os.path.dirname(namefile)) + cn = os.path.basename(namefile) + + # dataset 7 + line = f.readline() + t = line.split() + gridstatus = t[0].lower() + msg = "GRIDSTATUS for the parent must be 'CHILDONLY'" + assert gridstatus == 'childonly', msg + + # dataset 8 + line = f.readline() + t = line.split() + ishflg, ibflg, iucbhsv, iucbfsv = int(t[0]), int(t[1]), int( + t[2]), int(t[3]) + + # dataset 9 + line = f.readline() + t = line.split() + mxlgriter, ioutlgr = int(t[0]), int(t[1]) + + # dataset 10 + line = f.readline() + t = line.split() + relaxh, relaxf = float(t[0]), float(t[1]) + + # dataset 11 + line = f.readline() + t = line.split() + hcloselgr, fcloselgr = float(t[0]), float(t[1]) + + # dataset 12 + line = f.readline() + t = line.split() + nplbeg, nprbeg, npcbeg = int(t[0]) - 1, int(t[1]) - 1, int( + t[2]) - 1 + + # dataset 13 + line = f.readline() + t = line.split() + nplend, nprend, npcend = int(t[0]) - 1, int(t[1]) - 1, int( + t[2]) - 1 + + # dataset 14 + line = f.readline() + t = line.split() + ncpp = int(t[0]) + + # dataset 15 + line = f.readline() + t = line.split() + ncppl = [] + for idx in range(nplend + 1 - nplbeg): + ncppl.append(int(t[idx])) + + # build child data object + + children_data.append(LgrChild(ishflg=ishflg, ibflg=ibflg, + iucbhsv=iucbhsv, iucbfsv=iucbfsv, + mxlgriter=mxlgriter, ioutlgr=ioutlgr, + relaxh=relaxh, relaxf=relaxf, + hcloselgr=hcloselgr, + fcloselgr=fcloselgr, + nplbeg=nplbeg, nprbeg=nprbeg, + npcbeg=npcbeg, + nplend=nplend, nprend=nprend, + npcend=npcend, + ncpp=ncpp, ncppl=ncppl)) + # load child model + children.append(Modflow.load(cn, verbose=verbose, model_ws=cws, + load_only=load_only, forgive=forgive, + check=check)) + + if openfile: + f.close() + + lgr = ModflowLgr(version=version, exe_name=exe_name, + modelname=modelname, model_ws=model_ws, + verbose=verbose, + iupbhsv=iupbhsv, iupbfsv=iupbfsv, + parent=parent, + children=children, children_data=children_data) + + # return model object + return lgr diff --git a/flopy/modpath/__init__.py b/flopy/modpath/__init__.py index f284d41dfd..2fc6016bfb 100644 --- a/flopy/modpath/__init__.py +++ b/flopy/modpath/__init__.py @@ -1,11 +1,11 @@ -from .mp import Modpath -from .mpbas import ModpathBas -from .mpsim import ModpathSim -from .mp7 import Modpath7 -from .mp7bas import Modpath7Bas -from .mp7sim import Modpath7Sim -from .mp7particlegroup import ParticleGroup, ParticleGroupLRCTemplate, \ - ParticleGroupNodeTemplate -from .mp7particledata import ParticleData, FaceDataType, CellDataType, \ - LRCParticleData, NodeParticleData - +from .mp import Modpath +from .mpbas import ModpathBas +from .mpsim import ModpathSim +from .mp7 import Modpath7 +from .mp7bas import Modpath7Bas +from .mp7sim import Modpath7Sim +from .mp7particlegroup import ParticleGroup, ParticleGroupLRCTemplate, \ + ParticleGroupNodeTemplate +from .mp7particledata import ParticleData, FaceDataType, CellDataType, \ + LRCParticleData, NodeParticleData + diff --git a/flopy/modpath/mp.py b/flopy/modpath/mp.py index 4da95a14b7..d972ee5edb 100644 --- a/flopy/modpath/mp.py +++ b/flopy/modpath/mp.py @@ -1,389 +1,389 @@ -import numpy as np -from ..mbase import BaseModel -from ..pakbase import Package -from .mpsim import ModpathSim -from .mpbas import ModpathBas -import os - - -class ModpathList(Package): - """ - List package class - """ - - def __init__(self, model, extension='list', listunit=7): - """ - Package constructor. - - """ - # Call ancestor's init to set self.parent, extension, name and - # unit number - Package.__init__(self, model, extension, 'LIST', listunit) - # self.parent.add_package(self) This package is not added to the base - # model so that it is not included in get_name_file_entries() - return - - def write_file(self): - # Not implemented for list class - return - - -class Modpath(BaseModel): - """ - Modpath base class - - """ - - def __init__(self, modelname='modpathtest', simfile_ext='mpsim', - namefile_ext='mpnam', - version='modpath', exe_name='mp6.exe', modflowmodel=None, - dis_file=None, dis_unit=87, head_file=None, budget_file=None, - model_ws=None, external_path=None, verbose=False, - load=True, listunit=7): - """ - Model constructor. - - """ - BaseModel.__init__(self, modelname, simfile_ext, exe_name, - model_ws=model_ws, verbose=verbose) - - self.version_types = {'modpath': 'MODPATH'} - self.set_version(version) - - self.__mf = modflowmodel - self.lst = ModpathList(self, listunit=listunit) - self.mpnamefile = '{}.{}'.format(self.name, namefile_ext) - self.mpbas_file = '{}.mpbas'.format(modelname) - if self.__mf is not None: - # ensure that user-specified files are used - iu = self.__mf.oc.iuhead - head_file = self.__mf.get_output(unit=iu) - p = self.__mf.get_package('LPF') - if p is None: - p = self.__mf.get_package('BCF6') - if p is None: - p = self.__mf.get_package('UPW') - if p is None: - msg = 'LPF, BCF6, or UPW packages must be included in the ' + \ - 'passed MODFLOW model' - raise Exception(msg) - iu = p.ipakcb - budget_file = self.__mf.get_output(unit=iu) - dis_file = self.__mf.dis.file_name[0] \ - if dis_file is None else dis_file - dis_unit = self.__mf.dis.unit_number[0] - self.head_file = head_file - self.budget_file = budget_file - self.dis_file = dis_file - self.dis_unit = dis_unit - # make sure the valid files are available - if self.head_file is None: - msg = 'the head file in the MODFLOW model or passed ' + \ - 'to __init__ cannot be None' - raise ValueError(msg) - if self.budget_file is None: - msg = 'the budget file in the MODFLOW model or passed ' + \ - 'to __init__ cannot be None' - raise ValueError(msg) - if self.dis_file is None: - msg = 'the dis file in the MODFLOW model or passed ' + \ - 'to __init__ cannot be None' - raise ValueError(msg) - - # set the rest of the attributes - self.__sim = None - self.array_free_format = False - self.array_format = 'modflow' - self.external_path = external_path - self.external = False - self.external_fnames = [] - self.external_units = [] - self.external_binflag = [] - self.load = load - self.__next_ext_unit = 500 - if external_path is not None: - assert os.path.exists( - external_path), 'external_path does not exist' - self.external = True - return - - def __repr__(self): - return 'Modpath model' - - # function to encapsulate next_ext_unit attribute - def next_ext_unit(self): - self.__next_ext_unit += 1 - return self.__next_ext_unit - - def getsim(self): - if (self.__sim == None): - for p in (self.packagelist): - if isinstance(p, ModpathSim): - self.__sim = p - return self.__sim - - def getmf(self): - return self.__mf - - def write_name_file(self): - """ - Write the name file - - Returns - ------- - None - - """ - fn_path = os.path.join(self.model_ws, self.mpnamefile) - f_nam = open(fn_path, 'w') - f_nam.write('%s\n' % (self.heading)) - if self.mpbas_file is not None: - f_nam.write('%s %3i %s\n' % ('MPBAS', 86, self.mpbas_file)) - if self.dis_file is not None: - f_nam.write('%s %3i %s\n' % ('DIS', self.dis_unit, self.dis_file)) - if self.head_file is not None: - f_nam.write('%s %3i %s\n' % ('HEAD', 88, self.head_file)) - if self.budget_file is not None: - f_nam.write('%s %3i %s\n' % ('BUDGET', 89, self.budget_file)) - for u, f in zip(self.external_units, self.external_fnames): - f_nam.write('DATA {0:3d} '.format(u) + f + '\n') - f_nam.close() - - sim = property(getsim) # Property has no setter, so read-only - mf = property(getmf) # Property has no setter, so read-only - - def create_mpsim(self, simtype='pathline', trackdir='forward', - packages='WEL', start_time=0, default_ifaces=None, - ParticleColumnCount=4, ParticleRowCount=4, - MinRow=0, MinColumn=0, MaxRow=None, MaxColumn=None, - ): - """ - Create a MODPATH simulation file using available MODFLOW boundary - package data. - - Parameters - ---------- - simtype : str - Keyword defining the MODPATH simulation type. Available simtype's - are 'endpoint', 'pathline', and 'timeseries'. - (default is 'PATHLINE') - trackdir : str - Keyword that defines the MODPATH particle tracking direction. - Available trackdir's are 'backward' and 'forward'. - (default is 'forward') - packages : str or list of strings - Keyword defining the modflow packages used to create initial - particle locations. Supported packages are 'WEL', 'MNW2' and 'RCH'. - (default is 'WEL'). - start_time : float or tuple - Sets the value of MODPATH reference time relative to MODFLOW time. - float : value of MODFLOW simulation time at which to start the particle tracking simulation. - Sets the value of MODPATH ReferenceTimeOption to 1. - tuple : (period, step, time fraction) MODFLOW stress period, time step and fraction - between 0 and 1 at which to start the particle tracking simulation. - Sets the value of MODPATH ReferenceTimeOption to 2. - default_ifaces : list - List of cell faces (1-6; see MODPATH6 manual, fig. 7) on which to start particles. - (default is None, meaning ifaces will vary depending on packages argument above) - ParticleRowCount : int - Rows of particles to start on each cell index face (iface). - ParticleColumnCount : int - Columns of particles to start on each cell index face (iface). - - Returns - ------- - mpsim : ModpathSim object - - """ - if isinstance(packages, str): - packages = [packages] - pak_list = self.__mf.get_package_list() - - # not sure if this is the best way to handle this - ReferenceTimeOption = 1 - ref_time = 0 - ref_time_per_stp = (0, 0, 1.) - if isinstance(start_time, tuple): - ReferenceTimeOption = 2 # 1: specify value for ref. time, 2: specify kper, kstp, rel. time pos - ref_time_per_stp = start_time - else: - ref_time = start_time - - # set iface particle grids - ptrow = ParticleRowCount - ptcol = ParticleColumnCount - side_faces = [[1, ptrow, ptcol], [2, ptrow, ptcol], - [3, ptrow, ptcol], [4, ptrow, ptcol]] - top_face = [5, ptrow, ptcol] - botm_face = [6, ptrow, ptcol] - if default_ifaces is not None: - default_ifaces = [[ifc, ptrow, ptcol] for ifc in default_ifaces] - - Grid = 1 - GridCellRegionOption = 1 - PlacementOption = 1 - ReleaseStartTime = 0. - ReleaseOption = 1 - CHeadOption = 1 - nper = self.__mf.dis.nper - nlay, nrow, ncol = self.__mf.dis.nlay, \ - self.__mf.dis.nrow, \ - self.__mf.dis.ncol - arr = np.zeros((nlay, nrow, ncol), dtype=np.int) - group_name = [] - group_region = [] - group_placement = [] - ifaces = [] - face_ct = [] - strt_file = None - for package in packages: - - if package.upper() == 'WEL': - ParticleGenerationOption = 1 - if 'WEL' not in pak_list: - raise Exception( - 'Error: no well package in the passed model') - for kper in range(nper): - mflist = self.__mf.wel.stress_period_data[kper] - idx = (mflist['k'], mflist['i'], mflist['j']) - arr[idx] = 1 - ngrp = arr.sum() - icnt = 0 - for k in range(nlay): - for i in range(nrow): - for j in range(ncol): - if arr[k, i, j] < 1: - continue - group_name.append('wc{}'.format(icnt)) - group_placement.append([Grid, GridCellRegionOption, - PlacementOption, - ReleaseStartTime, - ReleaseOption, - CHeadOption]) - group_region.append([k, i, j, k, i, j]) - if default_ifaces is None: - ifaces.append( - side_faces + [top_face, botm_face]) - face_ct.append(6) - else: - ifaces.append(default_ifaces) - face_ct.append(len(default_ifaces)) - icnt += 1 - # this is kind of a band aid pending refactoring of mpsim class - elif 'MNW' in package.upper(): - ParticleGenerationOption = 1 - if 'MNW2' not in pak_list: - raise Exception( - 'Error: no MNW2 package in the passed model') - node_data = self.__mf.mnw2.get_allnode_data() - node_data.sort(order=['wellid', 'k']) - wellids = np.unique(node_data.wellid) - - def append_node(ifaces_well, wellid, node_number, k, i, j): - """add a single MNW node""" - group_region.append([k, i, j, k, i, j]) - if default_ifaces is None: - ifaces.append(ifaces_well) - face_ct.append(len(ifaces_well)) - else: - ifaces.append(default_ifaces) - face_ct.append(len(default_ifaces)) - group_name.append('{}{}'.format(wellid, node_number)) - group_placement.append([Grid, GridCellRegionOption, - PlacementOption, - ReleaseStartTime, - ReleaseOption, - CHeadOption]) - - for wellid in wellids: - nd = node_data[node_data.wellid == wellid] - k, i, j = nd.k[0], nd.i[0], nd.j[0] - if len(nd) == 1: - append_node(side_faces + [top_face, botm_face], - wellid, 0, k, i, j) - else: - append_node(side_faces + [top_face], - wellid, 0, k, i, j) - for n in range(len(nd))[1:]: - k, i, j = nd.k[n], nd.i[n], nd.j[n] - if n == len(nd) - 1: - append_node(side_faces + [botm_face], - wellid, n, k, i, j) - else: - append_node(side_faces, - wellid, n, k, i, j) - elif package.upper() == 'RCH': - ParticleGenerationOption = 1 - # for j in range(nrow): - # for i in range(ncol): - # group_name.append('rch') - group_name.append('rch') - group_placement.append([Grid, GridCellRegionOption, - PlacementOption, - ReleaseStartTime, - ReleaseOption, CHeadOption]) - group_region.append([0, 0, 0, 0, nrow - 1, ncol - 1]) - if default_ifaces is None: - face_ct.append(1) - ifaces.append([[6, 1, 1]]) - else: - ifaces.append(default_ifaces) - face_ct.append(len(default_ifaces)) - - - else: - model_ws = '' - if self.__mf is not None: - model_ws = self.__mf.model_ws - if os.path.exists(os.path.join(model_ws, package)): - print( - "detected a particle starting locations file in packages") - assert len( - packages) == 1, "if a particle starting locations file is passed" + \ - ", other packages cannot be specified" - ParticleGenerationOption = 2 - strt_file = package - else: - raise Exception( - "package '{0}' not supported".format(package)) - - SimulationType = 1 - if simtype.lower() == 'endpoint': - SimulationType = 1 - elif simtype.lower() == 'pathline': - SimulationType = 2 - elif simtype.lower() == 'timeseries': - SimulationType = 3 - if trackdir.lower() == 'forward': - TrackingDirection = 1 - elif trackdir.lower() == 'backward': - TrackingDirection = 2 - WeakSinkOption = 2 - WeakSourceOption = 1 - - StopOption = 2 - - if SimulationType == 1: - TimePointOption = 1 - else: - TimePointOption = 3 - BudgetOutputOption = 1 - ZoneArrayOption = 1 - RetardationOption = 1 - AdvectiveObservationsOption = 1 - - mpoptions = [SimulationType, TrackingDirection, WeakSinkOption, - WeakSourceOption, ReferenceTimeOption, StopOption, - ParticleGenerationOption, TimePointOption, - BudgetOutputOption, ZoneArrayOption, RetardationOption, - AdvectiveObservationsOption] - - return ModpathSim(self, - ref_time=ref_time, - ref_time_per_stp=ref_time_per_stp, - option_flags=mpoptions, - group_placement=group_placement, - group_name=group_name, - group_region=group_region, - face_ct=face_ct, ifaces=ifaces, - strt_file=strt_file) +import numpy as np +from ..mbase import BaseModel +from ..pakbase import Package +from .mpsim import ModpathSim +from .mpbas import ModpathBas +import os + + +class ModpathList(Package): + """ + List package class + """ + + def __init__(self, model, extension='list', listunit=7): + """ + Package constructor. + + """ + # Call ancestor's init to set self.parent, extension, name and + # unit number + Package.__init__(self, model, extension, 'LIST', listunit) + # self.parent.add_package(self) This package is not added to the base + # model so that it is not included in get_name_file_entries() + return + + def write_file(self): + # Not implemented for list class + return + + +class Modpath(BaseModel): + """ + Modpath base class + + """ + + def __init__(self, modelname='modpathtest', simfile_ext='mpsim', + namefile_ext='mpnam', + version='modpath', exe_name='mp6.exe', modflowmodel=None, + dis_file=None, dis_unit=87, head_file=None, budget_file=None, + model_ws=None, external_path=None, verbose=False, + load=True, listunit=7): + """ + Model constructor. + + """ + BaseModel.__init__(self, modelname, simfile_ext, exe_name, + model_ws=model_ws, verbose=verbose) + + self.version_types = {'modpath': 'MODPATH'} + self.set_version(version) + + self.__mf = modflowmodel + self.lst = ModpathList(self, listunit=listunit) + self.mpnamefile = '{}.{}'.format(self.name, namefile_ext) + self.mpbas_file = '{}.mpbas'.format(modelname) + if self.__mf is not None: + # ensure that user-specified files are used + iu = self.__mf.oc.iuhead + head_file = self.__mf.get_output(unit=iu) + p = self.__mf.get_package('LPF') + if p is None: + p = self.__mf.get_package('BCF6') + if p is None: + p = self.__mf.get_package('UPW') + if p is None: + msg = 'LPF, BCF6, or UPW packages must be included in the ' + \ + 'passed MODFLOW model' + raise Exception(msg) + iu = p.ipakcb + budget_file = self.__mf.get_output(unit=iu) + dis_file = self.__mf.dis.file_name[0] \ + if dis_file is None else dis_file + dis_unit = self.__mf.dis.unit_number[0] + self.head_file = head_file + self.budget_file = budget_file + self.dis_file = dis_file + self.dis_unit = dis_unit + # make sure the valid files are available + if self.head_file is None: + msg = 'the head file in the MODFLOW model or passed ' + \ + 'to __init__ cannot be None' + raise ValueError(msg) + if self.budget_file is None: + msg = 'the budget file in the MODFLOW model or passed ' + \ + 'to __init__ cannot be None' + raise ValueError(msg) + if self.dis_file is None: + msg = 'the dis file in the MODFLOW model or passed ' + \ + 'to __init__ cannot be None' + raise ValueError(msg) + + # set the rest of the attributes + self.__sim = None + self.array_free_format = False + self.array_format = 'modflow' + self.external_path = external_path + self.external = False + self.external_fnames = [] + self.external_units = [] + self.external_binflag = [] + self.load = load + self.__next_ext_unit = 500 + if external_path is not None: + assert os.path.exists( + external_path), 'external_path does not exist' + self.external = True + return + + def __repr__(self): + return 'Modpath model' + + # function to encapsulate next_ext_unit attribute + def next_ext_unit(self): + self.__next_ext_unit += 1 + return self.__next_ext_unit + + def getsim(self): + if (self.__sim == None): + for p in (self.packagelist): + if isinstance(p, ModpathSim): + self.__sim = p + return self.__sim + + def getmf(self): + return self.__mf + + def write_name_file(self): + """ + Write the name file + + Returns + ------- + None + + """ + fn_path = os.path.join(self.model_ws, self.mpnamefile) + f_nam = open(fn_path, 'w') + f_nam.write('%s\n' % (self.heading)) + if self.mpbas_file is not None: + f_nam.write('%s %3i %s\n' % ('MPBAS', 86, self.mpbas_file)) + if self.dis_file is not None: + f_nam.write('%s %3i %s\n' % ('DIS', self.dis_unit, self.dis_file)) + if self.head_file is not None: + f_nam.write('%s %3i %s\n' % ('HEAD', 88, self.head_file)) + if self.budget_file is not None: + f_nam.write('%s %3i %s\n' % ('BUDGET', 89, self.budget_file)) + for u, f in zip(self.external_units, self.external_fnames): + f_nam.write('DATA {0:3d} '.format(u) + f + '\n') + f_nam.close() + + sim = property(getsim) # Property has no setter, so read-only + mf = property(getmf) # Property has no setter, so read-only + + def create_mpsim(self, simtype='pathline', trackdir='forward', + packages='WEL', start_time=0, default_ifaces=None, + ParticleColumnCount=4, ParticleRowCount=4, + MinRow=0, MinColumn=0, MaxRow=None, MaxColumn=None, + ): + """ + Create a MODPATH simulation file using available MODFLOW boundary + package data. + + Parameters + ---------- + simtype : str + Keyword defining the MODPATH simulation type. Available simtype's + are 'endpoint', 'pathline', and 'timeseries'. + (default is 'PATHLINE') + trackdir : str + Keyword that defines the MODPATH particle tracking direction. + Available trackdir's are 'backward' and 'forward'. + (default is 'forward') + packages : str or list of strings + Keyword defining the modflow packages used to create initial + particle locations. Supported packages are 'WEL', 'MNW2' and 'RCH'. + (default is 'WEL'). + start_time : float or tuple + Sets the value of MODPATH reference time relative to MODFLOW time. + float : value of MODFLOW simulation time at which to start the particle tracking simulation. + Sets the value of MODPATH ReferenceTimeOption to 1. + tuple : (period, step, time fraction) MODFLOW stress period, time step and fraction + between 0 and 1 at which to start the particle tracking simulation. + Sets the value of MODPATH ReferenceTimeOption to 2. + default_ifaces : list + List of cell faces (1-6; see MODPATH6 manual, fig. 7) on which to start particles. + (default is None, meaning ifaces will vary depending on packages argument above) + ParticleRowCount : int + Rows of particles to start on each cell index face (iface). + ParticleColumnCount : int + Columns of particles to start on each cell index face (iface). + + Returns + ------- + mpsim : ModpathSim object + + """ + if isinstance(packages, str): + packages = [packages] + pak_list = self.__mf.get_package_list() + + # not sure if this is the best way to handle this + ReferenceTimeOption = 1 + ref_time = 0 + ref_time_per_stp = (0, 0, 1.) + if isinstance(start_time, tuple): + ReferenceTimeOption = 2 # 1: specify value for ref. time, 2: specify kper, kstp, rel. time pos + ref_time_per_stp = start_time + else: + ref_time = start_time + + # set iface particle grids + ptrow = ParticleRowCount + ptcol = ParticleColumnCount + side_faces = [[1, ptrow, ptcol], [2, ptrow, ptcol], + [3, ptrow, ptcol], [4, ptrow, ptcol]] + top_face = [5, ptrow, ptcol] + botm_face = [6, ptrow, ptcol] + if default_ifaces is not None: + default_ifaces = [[ifc, ptrow, ptcol] for ifc in default_ifaces] + + Grid = 1 + GridCellRegionOption = 1 + PlacementOption = 1 + ReleaseStartTime = 0. + ReleaseOption = 1 + CHeadOption = 1 + nper = self.__mf.dis.nper + nlay, nrow, ncol = self.__mf.dis.nlay, \ + self.__mf.dis.nrow, \ + self.__mf.dis.ncol + arr = np.zeros((nlay, nrow, ncol), dtype=np.int) + group_name = [] + group_region = [] + group_placement = [] + ifaces = [] + face_ct = [] + strt_file = None + for package in packages: + + if package.upper() == 'WEL': + ParticleGenerationOption = 1 + if 'WEL' not in pak_list: + raise Exception( + 'Error: no well package in the passed model') + for kper in range(nper): + mflist = self.__mf.wel.stress_period_data[kper] + idx = (mflist['k'], mflist['i'], mflist['j']) + arr[idx] = 1 + ngrp = arr.sum() + icnt = 0 + for k in range(nlay): + for i in range(nrow): + for j in range(ncol): + if arr[k, i, j] < 1: + continue + group_name.append('wc{}'.format(icnt)) + group_placement.append([Grid, GridCellRegionOption, + PlacementOption, + ReleaseStartTime, + ReleaseOption, + CHeadOption]) + group_region.append([k, i, j, k, i, j]) + if default_ifaces is None: + ifaces.append( + side_faces + [top_face, botm_face]) + face_ct.append(6) + else: + ifaces.append(default_ifaces) + face_ct.append(len(default_ifaces)) + icnt += 1 + # this is kind of a band aid pending refactoring of mpsim class + elif 'MNW' in package.upper(): + ParticleGenerationOption = 1 + if 'MNW2' not in pak_list: + raise Exception( + 'Error: no MNW2 package in the passed model') + node_data = self.__mf.mnw2.get_allnode_data() + node_data.sort(order=['wellid', 'k']) + wellids = np.unique(node_data.wellid) + + def append_node(ifaces_well, wellid, node_number, k, i, j): + """add a single MNW node""" + group_region.append([k, i, j, k, i, j]) + if default_ifaces is None: + ifaces.append(ifaces_well) + face_ct.append(len(ifaces_well)) + else: + ifaces.append(default_ifaces) + face_ct.append(len(default_ifaces)) + group_name.append('{}{}'.format(wellid, node_number)) + group_placement.append([Grid, GridCellRegionOption, + PlacementOption, + ReleaseStartTime, + ReleaseOption, + CHeadOption]) + + for wellid in wellids: + nd = node_data[node_data.wellid == wellid] + k, i, j = nd.k[0], nd.i[0], nd.j[0] + if len(nd) == 1: + append_node(side_faces + [top_face, botm_face], + wellid, 0, k, i, j) + else: + append_node(side_faces + [top_face], + wellid, 0, k, i, j) + for n in range(len(nd))[1:]: + k, i, j = nd.k[n], nd.i[n], nd.j[n] + if n == len(nd) - 1: + append_node(side_faces + [botm_face], + wellid, n, k, i, j) + else: + append_node(side_faces, + wellid, n, k, i, j) + elif package.upper() == 'RCH': + ParticleGenerationOption = 1 + # for j in range(nrow): + # for i in range(ncol): + # group_name.append('rch') + group_name.append('rch') + group_placement.append([Grid, GridCellRegionOption, + PlacementOption, + ReleaseStartTime, + ReleaseOption, CHeadOption]) + group_region.append([0, 0, 0, 0, nrow - 1, ncol - 1]) + if default_ifaces is None: + face_ct.append(1) + ifaces.append([[6, 1, 1]]) + else: + ifaces.append(default_ifaces) + face_ct.append(len(default_ifaces)) + + + else: + model_ws = '' + if self.__mf is not None: + model_ws = self.__mf.model_ws + if os.path.exists(os.path.join(model_ws, package)): + print( + "detected a particle starting locations file in packages") + assert len( + packages) == 1, "if a particle starting locations file is passed" + \ + ", other packages cannot be specified" + ParticleGenerationOption = 2 + strt_file = package + else: + raise Exception( + "package '{0}' not supported".format(package)) + + SimulationType = 1 + if simtype.lower() == 'endpoint': + SimulationType = 1 + elif simtype.lower() == 'pathline': + SimulationType = 2 + elif simtype.lower() == 'timeseries': + SimulationType = 3 + if trackdir.lower() == 'forward': + TrackingDirection = 1 + elif trackdir.lower() == 'backward': + TrackingDirection = 2 + WeakSinkOption = 2 + WeakSourceOption = 1 + + StopOption = 2 + + if SimulationType == 1: + TimePointOption = 1 + else: + TimePointOption = 3 + BudgetOutputOption = 1 + ZoneArrayOption = 1 + RetardationOption = 1 + AdvectiveObservationsOption = 1 + + mpoptions = [SimulationType, TrackingDirection, WeakSinkOption, + WeakSourceOption, ReferenceTimeOption, StopOption, + ParticleGenerationOption, TimePointOption, + BudgetOutputOption, ZoneArrayOption, RetardationOption, + AdvectiveObservationsOption] + + return ModpathSim(self, + ref_time=ref_time, + ref_time_per_stp=ref_time_per_stp, + option_flags=mpoptions, + group_placement=group_placement, + group_name=group_name, + group_region=group_region, + face_ct=face_ct, ifaces=ifaces, + strt_file=strt_file) diff --git a/flopy/modpath/mp7.py b/flopy/modpath/mp7.py index 33b7287aa5..e85070f596 100644 --- a/flopy/modpath/mp7.py +++ b/flopy/modpath/mp7.py @@ -1,463 +1,463 @@ -""" -mp7 module. Contains the Modpath7List and Modpath7 classes. - - -""" - -import numpy as np -from ..mbase import BaseModel -from ..modflow import Modflow -from ..mf6 import MFModel -from ..pakbase import Package -from .mp7bas import Modpath7Bas -from .mp7sim import Modpath7Sim -from .mp7particledata import CellDataType, NodeParticleData -from .mp7particlegroup import ParticleGroupNodeTemplate -import os - - -class Modpath7List(Package): - """ - List package class - - """ - - def __init__(self, model, extension='list', unitnumber=None): - """ - Package constructor. - - """ - if unitnumber is None: - unitnumber = model.next_unit() - - # Call ancestor's init to set self.parent, extension, name and - # unit number - Package.__init__(self, model, extension, 'LIST', unitnumber) - # self.parent.add_package(self) This package is not added to the base - # model so that it is not included in get_name_file_entries() - return - - def write_file(self): - # Not implemented for list class - return - - -class Modpath7(BaseModel): - """ - Modpath 7 base class - - Parameters - ---------- - modelname : str - Basename for MODPATH 7 input and output files (default is - 'modpath7test'). - simfile_ext : str - Filename extension of the MODPATH 7 simulation file - (default is 'mpsim'). - namefile_ext : str - Filename extension of the MODPATH 7 namefile - (default is 'mpnam'). - version : str - String that defines the MODPATH version. Valid versions are - 'modpath7' (default is 'modpath7'). - exe_name : str - The name of the executable to use (the default is - 'mp7'). - flowmodel : flopy.modflow.Modflow or flopy.mf6.MFModel object - MODFLOW model - headfilename : str - Filename of the MODFLOW output head file. If headfilename is - not provided then it will be set from the flowmodel (default - is None). - budgetfilename : str - Filename of the MODFLOW output cell-by-cell budget file. - If budgetfilename is not provided then it will be set - from the flowmodel (default is None). - model_ws : str - model workspace. Directory name to create model data sets. - (default is the current working directory). - verbose : bool - Print additional information to the screen (default is False). - - Examples - -------- - - >>> import flopy - >>> m = flopy.modflow.Modflow.load('mf2005.nam') - >>> mp = flopy.modpath.Modpath7('mf2005_mp', flowmodel=m) - - """ - - def __init__(self, modelname='modpath7test', simfile_ext='mpsim', - namefile_ext='mpnam', version='modpath7', exe_name='mp7.exe', - flowmodel=None, headfilename=None, budgetfilename=None, - model_ws=None, verbose=False): - - """ - Model constructor. - - """ - - BaseModel.__init__(self, modelname, simfile_ext, exe_name, - model_ws=model_ws, verbose=verbose) - - self.version_types = {'modpath7': 'MODPATH 7'} - self.set_version(version) - - self.lst = Modpath7List(self) - - self.mpnamefile = '{}.{}'.format(self.name, namefile_ext) - self.mpbas_file = '{}.mpbas'.format(modelname) - - if not isinstance(flowmodel, (Modflow, MFModel)): - msg = 'Modpath7: flow model is not an instance of ' + \ - 'flopy.modflow.Modflow or flopy.mf6.MFModel. ' + \ - 'Passed object of type {}'.format(type(flowmodel)) - raise TypeError(msg) - - # if a MFModel instance ensure flowmodel is a MODFLOW 6 GWF model - if isinstance(flowmodel, MFModel): - if flowmodel.model_type != 'gwf' and \ - flowmodel.model_type != 'gwf6': - msg = 'Modpath7: flow model type must be gwf. ' + \ - 'Passed model_type is {}.'.format(flowmodel.model_type) - raise TypeError(msg) - - # set flowmodel and flow_version attributes - self.flowmodel = flowmodel - self.flow_version = self.flowmodel.version - - if self.flow_version == 'mf6': - # get discretization package - ibound = None - dis = self.flowmodel.get_package('DIS') - if dis is None: - msg = 'DIS, DISV, or DISU packages must be ' + \ - 'included in the passed MODFLOW 6 model' - raise Exception(msg) - else: - if dis.package_name.lower() == 'dis': - nlay, nrow, ncol = dis.nlay.array, dis.nrow.array, \ - dis.ncol.array - shape = (nlay, nrow, ncol) - elif dis.package_name.lower() == 'disv': - nlay, ncpl = dis.nlay.array, dis.ncpl.array - shape = (nlay, ncpl) - elif dis.package_name.lower() == 'disu': - nodes = dis.nodes.array - shape = tuple(nodes, ) - else: - msg = 'DIS, DISV, or DISU packages must be ' + \ - 'included in the passed MODFLOW 6 model' - raise TypeError(msg) - - # terminate (for now) if mf6 model does not use dis or disv - if len(shape) < 2: - msg = 'DIS and DISV are currently the only supported ' + \ - 'MODFLOW 6 discretization packages that can be ' + \ - 'used with MODPATH 7' - raise TypeError(msg) - - # set ib - ib = dis.idomain.array - # set all ib to active if ib is not defined - if ib is None: - ib = np.ones(shape, np.int32) - - # set dis and grbdis file name - dis_file = None - grbdis_file = dis.filename + '.grb' - grbtag = 'GRB{}'.format(dis.package_name.upper()) - - tdis = self.flowmodel.simulation.get_package('TDIS') - if tdis is None: - msg = 'TDIS package must be ' + \ - 'included in the passed MODFLOW 6 model' - raise Exception(msg) - tdis_file = tdis.filename - - # get stress period data - nper = tdis.nper.array - perlen = [] - nstp = [] - v = tdis.perioddata.array - for pl, ns, tsmult in v: - perlen.append(pl) - nstp.append(ns) - perlen = np.array(perlen, dtype=np.float32) - nstp = np.array(nstp, dtype=np.int32) - - # get oc file - oc = self.flowmodel.get_package('OC') - if oc is not None: - # set head file name - if headfilename is None: - headfilename = oc.head_filerecord.array['headfile'][0] - - # set budget file name - if budgetfilename is None: - budgetfilename = \ - oc.budget_filerecord.array['budgetfile'][0] - else: - shape = None - # extract data from DIS or DISU files and set shape - dis = self.flowmodel.get_package('DIS') - if dis is None: - dis = self.flowmodel.get_package('DISU') - elif dis is not None and shape is None: - nlay, nrow, ncol = dis.nlay, dis.nrow, dis.ncol - shape = (nlay, nrow, ncol) - if dis is None: - msg = 'DIS, or DISU packages must be ' + \ - 'included in the passed MODFLOW model' - raise Exception(msg) - elif dis is not None and shape is None: - nlay, nodes = dis.nlay, dis.nodes - shape = (nodes,) - - # terminate (for now) if mf6 model does not use dis - if len(shape) != 3: - msg = 'DIS currently the only supported MODFLOW ' + \ - 'discretization package that can be used with ' + \ - 'MODPATH 7' - raise Exception(msg) - - # get stress period data - nper = dis.nper - perlen = dis.perlen.array - nstp = dis.nstp.array - - # set dis_file - dis_file = dis.file_name[0] - - # set grbdis_file - grbdis_file = None - grbtag = None - - # set tdis_file - tdis_file = None - - # set head file name - if headfilename is None: - iu = self.flowmodel.oc.iuhead - headfilename = self.flowmodel.get_output(unit=iu) - - # get discretization package - p = self.flowmodel.get_package('LPF') - if p is None: - p = self.flowmodel.get_package('BCF6') - if p is None: - p = self.flowmodel.get_package('UPW') - if p is None: - msg = 'LPF, BCF6, or UPW packages must be ' + \ - 'included in the passed MODFLOW model' - raise Exception(msg) - - # set budget file name - if budgetfilename is None: - iu = p.ipakcb - budgetfilename = self.flowmodel.get_output(unit=iu) - - # set hnoflo and ibound from BAS6 package - bas = self.flowmodel.get_package('BAS6') - ib = bas.ibound.array - # reset to constant values if possible - ibound = [] - for k in range(shape[0]): - i = ib[k].flatten() - if np.all(i == i[0]): - kval = i[0] - else: - kval = ib[k] - ibound.append(kval) - - # set dis_file and tdis_file - self.shape = shape - self.dis_file = dis_file - self.grbdis_file = grbdis_file - self.grbtag = grbtag - self.tdis_file = tdis_file - - # set temporal data - self.nper = nper - self.time_end = perlen.sum() - self.perlen = perlen - self.nstp = nstp - - # set output file names - self.headfilename = headfilename - self.budgetfilename = budgetfilename - - # make sure the valid files are available - if self.headfilename is None: - msg = 'the head file in the MODFLOW model or passed ' + \ - 'to __init__ cannot be None' - raise ValueError(msg) - if self.budgetfilename is None: - msg = 'the budget file in the MODFLOW model or passed ' + \ - 'to __init__ cannot be None' - raise ValueError(msg) - if self.dis_file is None and self.grbdis_file is None: - msg = 'the dis file in the MODFLOW model or passed ' + \ - 'to __init__ cannot be None' - raise ValueError(msg) - - # set ib and ibound - self.ib = ib - self.ibound = ibound - - # set file attributes - self.array_free_format = True - self.array_format = 'modflow' - self.external = False - - return - - def __repr__(self): - return 'MODPATH 7 model' - - @property - def laytyp(self): - if self.flowmodel.version == "mf6": - icelltype = self.flowmodel.npf.icelltype.array - laytyp = [icelltype[k].max() for k in - range(self.flowmodel.modelgrid.nlay)] - else: - p = self.flowmodel.get_package('BCF6') - if p is None: - laytyp = self.flowmodel.laytyp - else: - laytyp = p.laycon.array - return np.array(laytyp, dtype=np.int32) - - @property - def hdry(self): - if self.flowmodel.version == "mf6": - return None - else: - return self.flowmodel.hdry - - @property - def hnoflo(self): - if self.flowmodel.version == "mf6": - return None - else: - return self.flowmodel.hnoflo - - def write_name_file(self): - """ - Write the name file - - Returns - ------- - None - - """ - fpth = os.path.join(self.model_ws, self.mpnamefile) - f = open(fpth, 'w') - f.write('{}\n'.format(self.heading)) - if self.mpbas_file is not None: - f.write('{:10s} {}\n'.format('MPBAS', self.mpbas_file)) - if self.dis_file is not None: - f.write('{:10s} {}\n'.format('DIS', self.dis_file)) - if self.grbdis_file is not None: - f.write('{:10s} {}\n'.format(self.grbtag, self.grbdis_file)) - if self.tdis_file is not None: - f.write('{:10s} {}\n'.format('TDIS', self.tdis_file)) - if self.headfilename is not None: - f.write('{:10s} {}\n'.format('HEAD', self.headfilename)) - if self.budgetfilename is not None: - f.write('{:10s} {}\n'.format('BUDGET', self.budgetfilename)) - f.close() - - @staticmethod - def create_mp7(modelname='modpath7test', trackdir='forward', - flowmodel=None, exe_name='mp7', model_ws='.', - verbose=False, columncelldivisions=2, - rowcelldivisions=2, layercelldivisions=2, - nodes=None): - """ - Create a default MODPATH 7 model using a passed flowmodel with - 8 particles in user-specified node locations or every active model - cell. - - Parameters - ---------- - modelname : str - Basename for MODPATH 7 input and output files (default is - 'modpath7test'). - trackdir : str - Keyword that defines the MODPATH particle tracking direction. - Available trackdir's are 'backward' and 'forward'. - (default is 'forward') - flowmodel : flopy.modflow.Modflow or flopy.mf6.MFModel object - MODFLOW model - exe_name : str - The name of the executable to use (the default is 'mp7'). - model_ws : str - model workspace. Directory name to create model data sets. - (default is the current working directory). - verbose : bool - Print additional information to the screen (default is False). - columncelldivisions : int - Number of particles in a cell in the column (x-coordinate) - direction (default is 2). - rowcelldivisions : int - Number of particles in a cell in the row (y-coordinate) - direction (default is 2). - layercelldivisions : int - Number of particles in a cell in the layer (z-coordinate) - direction (default is 2). - nodes : int, list of ints, tuple of ints, or np.ndarray - Nodes (zero-based) with particles. If (default is node 0). - - Returns - ------- - mp : Modpath7 object - - Examples - -------- - - >>> import flopy - >>> m = flopy.modflow.Modflow.load('mf2005.nam') - >>> mp = flopy.modpath.Modpath7.create_mp7(flowmodel=m) - - """ - # create MODPATH 7 model instance - mp = Modpath7(modelname=modelname, flowmodel=flowmodel, - exe_name=exe_name, model_ws=model_ws, verbose=verbose) - - # set default iface for recharge and et - if mp.flow_version == 'mf6': - defaultiface = {'RCH': 6, 'EVT': 6} - else: - defaultiface = {'RECHARGE': 6, 'ET': 6} - - # create MODPATH 7 basic file and add to the MODPATH 7 - # model instance (mp) - Modpath7Bas(mp, defaultiface=defaultiface) - - # create particles - if nodes is None: - nodes = [] - node = 0 - for ib in mp.ib.flatten(): - if ib > 0: - nodes.append(node) - node += 1 - sd = CellDataType(columncelldivisions=columncelldivisions, - rowcelldivisions=rowcelldivisions, - layercelldivisions=layercelldivisions) - p = NodeParticleData(subdivisiondata=sd, nodes=nodes) - pg = ParticleGroupNodeTemplate(particledata=p) - - # create MODPATH 7 simulation file and add to the MODPATH 7 - # model instance (mp) - Modpath7Sim(mp, simulationtype='combined', - trackingdirection=trackdir, - weaksinkoption='pass_through', - weaksourceoption='pass_through', - referencetime=0., - stoptimeoption='extend', - particlegroups=pg) - return mp +""" +mp7 module. Contains the Modpath7List and Modpath7 classes. + + +""" + +import numpy as np +from ..mbase import BaseModel +from ..modflow import Modflow +from ..mf6 import MFModel +from ..pakbase import Package +from .mp7bas import Modpath7Bas +from .mp7sim import Modpath7Sim +from .mp7particledata import CellDataType, NodeParticleData +from .mp7particlegroup import ParticleGroupNodeTemplate +import os + + +class Modpath7List(Package): + """ + List package class + + """ + + def __init__(self, model, extension='list', unitnumber=None): + """ + Package constructor. + + """ + if unitnumber is None: + unitnumber = model.next_unit() + + # Call ancestor's init to set self.parent, extension, name and + # unit number + Package.__init__(self, model, extension, 'LIST', unitnumber) + # self.parent.add_package(self) This package is not added to the base + # model so that it is not included in get_name_file_entries() + return + + def write_file(self): + # Not implemented for list class + return + + +class Modpath7(BaseModel): + """ + Modpath 7 base class + + Parameters + ---------- + modelname : str + Basename for MODPATH 7 input and output files (default is + 'modpath7test'). + simfile_ext : str + Filename extension of the MODPATH 7 simulation file + (default is 'mpsim'). + namefile_ext : str + Filename extension of the MODPATH 7 namefile + (default is 'mpnam'). + version : str + String that defines the MODPATH version. Valid versions are + 'modpath7' (default is 'modpath7'). + exe_name : str + The name of the executable to use (the default is + 'mp7'). + flowmodel : flopy.modflow.Modflow or flopy.mf6.MFModel object + MODFLOW model + headfilename : str + Filename of the MODFLOW output head file. If headfilename is + not provided then it will be set from the flowmodel (default + is None). + budgetfilename : str + Filename of the MODFLOW output cell-by-cell budget file. + If budgetfilename is not provided then it will be set + from the flowmodel (default is None). + model_ws : str + model workspace. Directory name to create model data sets. + (default is the current working directory). + verbose : bool + Print additional information to the screen (default is False). + + Examples + -------- + + >>> import flopy + >>> m = flopy.modflow.Modflow.load('mf2005.nam') + >>> mp = flopy.modpath.Modpath7('mf2005_mp', flowmodel=m) + + """ + + def __init__(self, modelname='modpath7test', simfile_ext='mpsim', + namefile_ext='mpnam', version='modpath7', exe_name='mp7.exe', + flowmodel=None, headfilename=None, budgetfilename=None, + model_ws=None, verbose=False): + + """ + Model constructor. + + """ + + BaseModel.__init__(self, modelname, simfile_ext, exe_name, + model_ws=model_ws, verbose=verbose) + + self.version_types = {'modpath7': 'MODPATH 7'} + self.set_version(version) + + self.lst = Modpath7List(self) + + self.mpnamefile = '{}.{}'.format(self.name, namefile_ext) + self.mpbas_file = '{}.mpbas'.format(modelname) + + if not isinstance(flowmodel, (Modflow, MFModel)): + msg = 'Modpath7: flow model is not an instance of ' + \ + 'flopy.modflow.Modflow or flopy.mf6.MFModel. ' + \ + 'Passed object of type {}'.format(type(flowmodel)) + raise TypeError(msg) + + # if a MFModel instance ensure flowmodel is a MODFLOW 6 GWF model + if isinstance(flowmodel, MFModel): + if flowmodel.model_type != 'gwf' and \ + flowmodel.model_type != 'gwf6': + msg = 'Modpath7: flow model type must be gwf. ' + \ + 'Passed model_type is {}.'.format(flowmodel.model_type) + raise TypeError(msg) + + # set flowmodel and flow_version attributes + self.flowmodel = flowmodel + self.flow_version = self.flowmodel.version + + if self.flow_version == 'mf6': + # get discretization package + ibound = None + dis = self.flowmodel.get_package('DIS') + if dis is None: + msg = 'DIS, DISV, or DISU packages must be ' + \ + 'included in the passed MODFLOW 6 model' + raise Exception(msg) + else: + if dis.package_name.lower() == 'dis': + nlay, nrow, ncol = dis.nlay.array, dis.nrow.array, \ + dis.ncol.array + shape = (nlay, nrow, ncol) + elif dis.package_name.lower() == 'disv': + nlay, ncpl = dis.nlay.array, dis.ncpl.array + shape = (nlay, ncpl) + elif dis.package_name.lower() == 'disu': + nodes = dis.nodes.array + shape = tuple(nodes, ) + else: + msg = 'DIS, DISV, or DISU packages must be ' + \ + 'included in the passed MODFLOW 6 model' + raise TypeError(msg) + + # terminate (for now) if mf6 model does not use dis or disv + if len(shape) < 2: + msg = 'DIS and DISV are currently the only supported ' + \ + 'MODFLOW 6 discretization packages that can be ' + \ + 'used with MODPATH 7' + raise TypeError(msg) + + # set ib + ib = dis.idomain.array + # set all ib to active if ib is not defined + if ib is None: + ib = np.ones(shape, np.int32) + + # set dis and grbdis file name + dis_file = None + grbdis_file = dis.filename + '.grb' + grbtag = 'GRB{}'.format(dis.package_name.upper()) + + tdis = self.flowmodel.simulation.get_package('TDIS') + if tdis is None: + msg = 'TDIS package must be ' + \ + 'included in the passed MODFLOW 6 model' + raise Exception(msg) + tdis_file = tdis.filename + + # get stress period data + nper = tdis.nper.array + perlen = [] + nstp = [] + v = tdis.perioddata.array + for pl, ns, tsmult in v: + perlen.append(pl) + nstp.append(ns) + perlen = np.array(perlen, dtype=np.float32) + nstp = np.array(nstp, dtype=np.int32) + + # get oc file + oc = self.flowmodel.get_package('OC') + if oc is not None: + # set head file name + if headfilename is None: + headfilename = oc.head_filerecord.array['headfile'][0] + + # set budget file name + if budgetfilename is None: + budgetfilename = \ + oc.budget_filerecord.array['budgetfile'][0] + else: + shape = None + # extract data from DIS or DISU files and set shape + dis = self.flowmodel.get_package('DIS') + if dis is None: + dis = self.flowmodel.get_package('DISU') + elif dis is not None and shape is None: + nlay, nrow, ncol = dis.nlay, dis.nrow, dis.ncol + shape = (nlay, nrow, ncol) + if dis is None: + msg = 'DIS, or DISU packages must be ' + \ + 'included in the passed MODFLOW model' + raise Exception(msg) + elif dis is not None and shape is None: + nlay, nodes = dis.nlay, dis.nodes + shape = (nodes,) + + # terminate (for now) if mf6 model does not use dis + if len(shape) != 3: + msg = 'DIS currently the only supported MODFLOW ' + \ + 'discretization package that can be used with ' + \ + 'MODPATH 7' + raise Exception(msg) + + # get stress period data + nper = dis.nper + perlen = dis.perlen.array + nstp = dis.nstp.array + + # set dis_file + dis_file = dis.file_name[0] + + # set grbdis_file + grbdis_file = None + grbtag = None + + # set tdis_file + tdis_file = None + + # set head file name + if headfilename is None: + iu = self.flowmodel.oc.iuhead + headfilename = self.flowmodel.get_output(unit=iu) + + # get discretization package + p = self.flowmodel.get_package('LPF') + if p is None: + p = self.flowmodel.get_package('BCF6') + if p is None: + p = self.flowmodel.get_package('UPW') + if p is None: + msg = 'LPF, BCF6, or UPW packages must be ' + \ + 'included in the passed MODFLOW model' + raise Exception(msg) + + # set budget file name + if budgetfilename is None: + iu = p.ipakcb + budgetfilename = self.flowmodel.get_output(unit=iu) + + # set hnoflo and ibound from BAS6 package + bas = self.flowmodel.get_package('BAS6') + ib = bas.ibound.array + # reset to constant values if possible + ibound = [] + for k in range(shape[0]): + i = ib[k].flatten() + if np.all(i == i[0]): + kval = i[0] + else: + kval = ib[k] + ibound.append(kval) + + # set dis_file and tdis_file + self.shape = shape + self.dis_file = dis_file + self.grbdis_file = grbdis_file + self.grbtag = grbtag + self.tdis_file = tdis_file + + # set temporal data + self.nper = nper + self.time_end = perlen.sum() + self.perlen = perlen + self.nstp = nstp + + # set output file names + self.headfilename = headfilename + self.budgetfilename = budgetfilename + + # make sure the valid files are available + if self.headfilename is None: + msg = 'the head file in the MODFLOW model or passed ' + \ + 'to __init__ cannot be None' + raise ValueError(msg) + if self.budgetfilename is None: + msg = 'the budget file in the MODFLOW model or passed ' + \ + 'to __init__ cannot be None' + raise ValueError(msg) + if self.dis_file is None and self.grbdis_file is None: + msg = 'the dis file in the MODFLOW model or passed ' + \ + 'to __init__ cannot be None' + raise ValueError(msg) + + # set ib and ibound + self.ib = ib + self.ibound = ibound + + # set file attributes + self.array_free_format = True + self.array_format = 'modflow' + self.external = False + + return + + def __repr__(self): + return 'MODPATH 7 model' + + @property + def laytyp(self): + if self.flowmodel.version == "mf6": + icelltype = self.flowmodel.npf.icelltype.array + laytyp = [icelltype[k].max() for k in + range(self.flowmodel.modelgrid.nlay)] + else: + p = self.flowmodel.get_package('BCF6') + if p is None: + laytyp = self.flowmodel.laytyp + else: + laytyp = p.laycon.array + return np.array(laytyp, dtype=np.int32) + + @property + def hdry(self): + if self.flowmodel.version == "mf6": + return None + else: + return self.flowmodel.hdry + + @property + def hnoflo(self): + if self.flowmodel.version == "mf6": + return None + else: + return self.flowmodel.hnoflo + + def write_name_file(self): + """ + Write the name file + + Returns + ------- + None + + """ + fpth = os.path.join(self.model_ws, self.mpnamefile) + f = open(fpth, 'w') + f.write('{}\n'.format(self.heading)) + if self.mpbas_file is not None: + f.write('{:10s} {}\n'.format('MPBAS', self.mpbas_file)) + if self.dis_file is not None: + f.write('{:10s} {}\n'.format('DIS', self.dis_file)) + if self.grbdis_file is not None: + f.write('{:10s} {}\n'.format(self.grbtag, self.grbdis_file)) + if self.tdis_file is not None: + f.write('{:10s} {}\n'.format('TDIS', self.tdis_file)) + if self.headfilename is not None: + f.write('{:10s} {}\n'.format('HEAD', self.headfilename)) + if self.budgetfilename is not None: + f.write('{:10s} {}\n'.format('BUDGET', self.budgetfilename)) + f.close() + + @staticmethod + def create_mp7(modelname='modpath7test', trackdir='forward', + flowmodel=None, exe_name='mp7', model_ws='.', + verbose=False, columncelldivisions=2, + rowcelldivisions=2, layercelldivisions=2, + nodes=None): + """ + Create a default MODPATH 7 model using a passed flowmodel with + 8 particles in user-specified node locations or every active model + cell. + + Parameters + ---------- + modelname : str + Basename for MODPATH 7 input and output files (default is + 'modpath7test'). + trackdir : str + Keyword that defines the MODPATH particle tracking direction. + Available trackdir's are 'backward' and 'forward'. + (default is 'forward') + flowmodel : flopy.modflow.Modflow or flopy.mf6.MFModel object + MODFLOW model + exe_name : str + The name of the executable to use (the default is 'mp7'). + model_ws : str + model workspace. Directory name to create model data sets. + (default is the current working directory). + verbose : bool + Print additional information to the screen (default is False). + columncelldivisions : int + Number of particles in a cell in the column (x-coordinate) + direction (default is 2). + rowcelldivisions : int + Number of particles in a cell in the row (y-coordinate) + direction (default is 2). + layercelldivisions : int + Number of particles in a cell in the layer (z-coordinate) + direction (default is 2). + nodes : int, list of ints, tuple of ints, or np.ndarray + Nodes (zero-based) with particles. If (default is node 0). + + Returns + ------- + mp : Modpath7 object + + Examples + -------- + + >>> import flopy + >>> m = flopy.modflow.Modflow.load('mf2005.nam') + >>> mp = flopy.modpath.Modpath7.create_mp7(flowmodel=m) + + """ + # create MODPATH 7 model instance + mp = Modpath7(modelname=modelname, flowmodel=flowmodel, + exe_name=exe_name, model_ws=model_ws, verbose=verbose) + + # set default iface for recharge and et + if mp.flow_version == 'mf6': + defaultiface = {'RCH': 6, 'EVT': 6} + else: + defaultiface = {'RECHARGE': 6, 'ET': 6} + + # create MODPATH 7 basic file and add to the MODPATH 7 + # model instance (mp) + Modpath7Bas(mp, defaultiface=defaultiface) + + # create particles + if nodes is None: + nodes = [] + node = 0 + for ib in mp.ib.flatten(): + if ib > 0: + nodes.append(node) + node += 1 + sd = CellDataType(columncelldivisions=columncelldivisions, + rowcelldivisions=rowcelldivisions, + layercelldivisions=layercelldivisions) + p = NodeParticleData(subdivisiondata=sd, nodes=nodes) + pg = ParticleGroupNodeTemplate(particledata=p) + + # create MODPATH 7 simulation file and add to the MODPATH 7 + # model instance (mp) + Modpath7Sim(mp, simulationtype='combined', + trackingdirection=trackdir, + weaksinkoption='pass_through', + weaksourceoption='pass_through', + referencetime=0., + stoptimeoption='extend', + particlegroups=pg) + return mp diff --git a/flopy/modpath/mp7bas.py b/flopy/modpath/mp7bas.py index 4420da9659..6581cc3239 100644 --- a/flopy/modpath/mp7bas.py +++ b/flopy/modpath/mp7bas.py @@ -1,139 +1,139 @@ -""" -mp7bas module. Contains the Modpath7Bas class. - -""" -import numpy as np -from ..pakbase import Package -from ..utils import Util2d, Util3d - - -class Modpath7Bas(Package): - """ - MODPATH 7 Basic Package Class. - - Parameters - ---------- - model : model object - The model object (of type :class:`flopy.modpath.Modpath7`) to which - this package will be added. - porosity : float or array of floats (nlay, nrow, ncol) - The porosity array (the default is 0.30). - defaultiface : dict - Dictionary with keys that are the text string used by MODFLOW in - the budget output file to label flow rates for a stress package - and the values are the cell face (iface) on which to assign flows - (the default is None). - extension : str, optional - File extension (default is 'mpbas'). - - Examples - -------- - - >>> import flopy - >>> m = flopy.modflow.Modflow.load('mf2005.nam') - >>> mp = flopy.modpath.Modpath7('mf2005_mp', flowmodel=m) - >>> mpbas = flopy.modpath.Modpath7Bas(mp) - - """ - - def __init__(self, model, porosity=0.30, defaultiface=None, - extension='mpbas'): - """ - Package constructor. - - """ - - unitnumber = model.next_unit() - - Package.__init__(self, model, extension, 'MPBAS', unitnumber) - - shape = model.shape - if len(shape) == 3: - shape3d = shape - elif len(shape) == 2: - shape3d = (shape[0], 1, shape[1]) - else: - shape3d = (1, 1, shape[0]) - - self.heading = '# {} package for'.format(self.name[0]) + \ - ' {}, '.format(model.version_types[model.version]) + \ - 'generated by Flopy.' - - if model.flowmodel.version == 'mf6': - self.laytyp = Util2d(self.parent, (shape[0],), np.int32, - model.laytyp, name='bas - laytype', - locat=self.unit_number[0]) - else: - self.laytyp = Util2d(self.parent, (shape[0],), np.int32, - model.laytyp, name='bas - laytype', - locat=self.unit_number[0]) - if model.flowmodel.version != 'mf6': - self.ibound = Util3d(model, shape3d, np.int32, model.ibound, - name='IBOUND', locat=self.unit_number[0]) - - self.porosity = Util3d(model, shape3d, np.float32, porosity, - name='POROSITY', locat=self.unit_number[0]) - - # validate and set defaultiface - if defaultiface is None: - defaultifacecount = 0 - else: - if not isinstance(defaultiface, dict): - msg = 'defaultiface must be a dictionary with package ' + \ - 'name keys and values between 0 and 6' - raise ValueError(msg) - defaultifacecount = len(defaultiface.keys()) - for key, value in defaultiface.items(): - # check iface value - if value < 0 or value > 6: - msg = 'defaultiface for package {}'.format(key) + \ - 'must be between 0 and 1 ' + \ - '({} specified)'.format(value) - raise ValueError(msg) - - self.defaultifacecount = defaultifacecount - self.defaultiface = defaultiface - - self.parent.add_package(self) - - def write_file(self, check=False): - """ - Write the package file - - Parameters - ---------- - check : boolean - Check package data for common errors. (default False) - - Returns - ------- - None - - """ - # Open file for writing - f = open(self.fn_path, 'w') - f.write('# {}\n'.format(self.heading)) - if self.parent.flowmodel.version != 'mf6': - f.write('{:g} {:g}\n'.format(self.parent.hnoflo, - self.parent.hdry)) - - # default IFACE - f.write('{:<20d}{}\n'.format(self.defaultifacecount, - '# DEFAULTIFACECOUNT')) - if self.defaultifacecount > 0: - for key, value in self.defaultiface.items(): - f.write('{:20s}{}\n'.format(key, '# PACKAGE LABEL')) - f.write('{:<20d}{}\n'.format(value, '# DEFAULT IFACE VALUE')) - - # laytyp - if self.parent.flow_version != 'mf6': - f.write(self.laytyp.string) - - # ibound - if self.parent.flow_version != 'mf6': - f.write(self.ibound.get_file_entry()) - - # porosity - f.write(self.porosity.get_file_entry()) - - f.close() +""" +mp7bas module. Contains the Modpath7Bas class. + +""" +import numpy as np +from ..pakbase import Package +from ..utils import Util2d, Util3d + + +class Modpath7Bas(Package): + """ + MODPATH 7 Basic Package Class. + + Parameters + ---------- + model : model object + The model object (of type :class:`flopy.modpath.Modpath7`) to which + this package will be added. + porosity : float or array of floats (nlay, nrow, ncol) + The porosity array (the default is 0.30). + defaultiface : dict + Dictionary with keys that are the text string used by MODFLOW in + the budget output file to label flow rates for a stress package + and the values are the cell face (iface) on which to assign flows + (the default is None). + extension : str, optional + File extension (default is 'mpbas'). + + Examples + -------- + + >>> import flopy + >>> m = flopy.modflow.Modflow.load('mf2005.nam') + >>> mp = flopy.modpath.Modpath7('mf2005_mp', flowmodel=m) + >>> mpbas = flopy.modpath.Modpath7Bas(mp) + + """ + + def __init__(self, model, porosity=0.30, defaultiface=None, + extension='mpbas'): + """ + Package constructor. + + """ + + unitnumber = model.next_unit() + + Package.__init__(self, model, extension, 'MPBAS', unitnumber) + + shape = model.shape + if len(shape) == 3: + shape3d = shape + elif len(shape) == 2: + shape3d = (shape[0], 1, shape[1]) + else: + shape3d = (1, 1, shape[0]) + + self.heading = '# {} package for'.format(self.name[0]) + \ + ' {}, '.format(model.version_types[model.version]) + \ + 'generated by Flopy.' + + if model.flowmodel.version == 'mf6': + self.laytyp = Util2d(self.parent, (shape[0],), np.int32, + model.laytyp, name='bas - laytype', + locat=self.unit_number[0]) + else: + self.laytyp = Util2d(self.parent, (shape[0],), np.int32, + model.laytyp, name='bas - laytype', + locat=self.unit_number[0]) + if model.flowmodel.version != 'mf6': + self.ibound = Util3d(model, shape3d, np.int32, model.ibound, + name='IBOUND', locat=self.unit_number[0]) + + self.porosity = Util3d(model, shape3d, np.float32, porosity, + name='POROSITY', locat=self.unit_number[0]) + + # validate and set defaultiface + if defaultiface is None: + defaultifacecount = 0 + else: + if not isinstance(defaultiface, dict): + msg = 'defaultiface must be a dictionary with package ' + \ + 'name keys and values between 0 and 6' + raise ValueError(msg) + defaultifacecount = len(defaultiface.keys()) + for key, value in defaultiface.items(): + # check iface value + if value < 0 or value > 6: + msg = 'defaultiface for package {}'.format(key) + \ + 'must be between 0 and 1 ' + \ + '({} specified)'.format(value) + raise ValueError(msg) + + self.defaultifacecount = defaultifacecount + self.defaultiface = defaultiface + + self.parent.add_package(self) + + def write_file(self, check=False): + """ + Write the package file + + Parameters + ---------- + check : boolean + Check package data for common errors. (default False) + + Returns + ------- + None + + """ + # Open file for writing + f = open(self.fn_path, 'w') + f.write('# {}\n'.format(self.heading)) + if self.parent.flowmodel.version != 'mf6': + f.write('{:g} {:g}\n'.format(self.parent.hnoflo, + self.parent.hdry)) + + # default IFACE + f.write('{:<20d}{}\n'.format(self.defaultifacecount, + '# DEFAULTIFACECOUNT')) + if self.defaultifacecount > 0: + for key, value in self.defaultiface.items(): + f.write('{:20s}{}\n'.format(key, '# PACKAGE LABEL')) + f.write('{:<20d}{}\n'.format(value, '# DEFAULT IFACE VALUE')) + + # laytyp + if self.parent.flow_version != 'mf6': + f.write(self.laytyp.string) + + # ibound + if self.parent.flow_version != 'mf6': + f.write(self.ibound.get_file_entry()) + + # porosity + f.write(self.porosity.get_file_entry()) + + f.close() diff --git a/flopy/modpath/mp7sim.py b/flopy/modpath/mp7sim.py index 3114a4b039..14862f44af 100644 --- a/flopy/modpath/mp7sim.py +++ b/flopy/modpath/mp7sim.py @@ -1,630 +1,630 @@ -""" -mpsim module. Contains the ModpathSim class. Note that the user can access -the ModpathSim class as `flopy.modpath.ModpathSim`. - -Additional information for this MODFLOW/MODPATH package can be found at the -`Online MODFLOW Guide -`_. - -""" -from enum import Enum -import numpy as np -from ..pakbase import Package -from ..utils import Util2d, Util3d -from .mp7particlegroup import ParticleGroup, ParticleGroupLRCTemplate, \ - ParticleGroupNodeTemplate - - -def sim_enum_error(v, s, e): - """ - Standard enumeration error format error - Parameters - ---------- - v : str - Enumeration value - - s : str - User-defined value - - e : Enum class - Enumeration class - - Returns - ------- - - """ - msg = 'Invalid {} ({})'.format(v, s) + \ - '. Valid types are ' - for i, c in enumerate(e): - if i > 0: - msg += ', ' - msg += '"{}"'.format(c.name) - raise ValueError(msg) - - -class simType(Enum): - """ - Enumeration of different simulation types - """ - endpoint = 1 - pathline = 2 - timeseries = 3 - combined = 4 - - -class trackDir(Enum): - """ - Enumeration of different tracking directions - """ - forward = 1 - backward = 2 - - -class weakOpt(Enum): - """ - Enumeration of different weak sink and source options - """ - pass_through = 1 - stop_at = 2 - - -class budgetOpt(Enum): - """ - Enumeration of different budget output options - """ - no = 0 - summary = 1 - record_summary = 2 - - -class stopOpt(Enum): - """ - Enumeration of different stop time options - """ - total = 1 - extend = 2 - specified = 3 - - -class onoffOpt(Enum): - """ - Enumeration of on-off options - """ - off = 1 - on = 2 - - -class Modpath7Sim(Package): - """ - MODPATH Simulation File Package Class. - - Parameters - ---------- - model : model object - The model object (of type :class:`flopy.modpath.Modpath7`) to - which this package will be added. - mpnamefilename : str - Filename of the MODPATH 7 name file. If mpnamefilename is not - defined it will be generated from the model name - (default is None). - listingfilename : str - Filename of the MODPATH 7 listing file. If listingfilename is not - defined it will be generated from the model name - (default is None). - endpointfilename : str - Filename of the MODPATH 7 endpoint file. If endpointfilename is - not defined it will be generated from the model name - (default is None). - pathlinefilename : str - Filename of the MODPATH 7 pathline file. If pathlinefilename is - not defined it will be generated from the model name - (default is None). - timeseriesfilename : str - Filename of the MODPATH 7 timeseries file. If timeseriesfilename - is not defined it will be generated from the model name - (default is None). - tracefilename : str - Filename of the MODPATH 7 tracefile file. If tracefilename is not - defined it will be generated from the model name - (default is None). - simulationtype : str - MODPATH 7 simulation type. Valid simulation types are 'endpoint', - 'pathline', 'timeseries', or 'combined' (default is 'pathline'). - trackingdirection : str - MODPATH 7 tracking direction. Valid tracking directions are - 'forward' or 'backward' (default os 'forward'). - weaksinkoption : str - MODPATH 7 weak sink option. Valid weak sink options are - 'pass_through' or 'stop_at' (default value is 'stop_at'). - weaksourceoption : str - MODPATH 7 weak source option. Valid weak source options are - 'pass_through' or 'stop_at' (default value is 'stop_at'). - budgetoutputoption : str - MODPATH 7 budget output option. Valid budget output options are - 'no' - individual cell water balance errors are not computed - and budget record headers are not printed, 'summary' - a summary - of individual cell water balance errors for each time step is - printed in the listing file without record headers, or - 'record_summary' - a summary of individual cell water balance - errors for each time step is printed in the listing file with - record headers (default is 'summary'). - traceparticledata : list or tuple - List or tuple with two ints that define the particle group and - particle id (zero-based) of the specified particle that is - followed in detail. If traceparticledata is None, trace mode is - off (default is None). - budgetcellnumbers : int, list of ints, tuple of ints, or np.ndarray - Cell numbers (zero-based) for which detailed water budgets are - computed. If budgetcellnumbers is None, detailed water budgets are - not calculated (default is None). - referencetime : float, list, or tuple - Specified reference time if a float or a list/tuple with a single - float value is provided (reference time option 1). Otherwise a - list or tuple with a zero-based stress period (int) and time - step (int) and a float defining the relative time position in the - time step is provided (reference time option 2). If referencetime - is None, reference time is set to 0 (default is None). - stoptimeoption : str - String indicating how a particle tracking simulation is - terminated based on time. If stop time option is 'total', particles - will be stopped at the end of the final time step if 'forward' - tracking is simulated or at the beginning of the first time step - if backward tracking. If stop time option is 'extend', initial or - final steady-state time steps will be extended and all particles - will be tracked until they reach a termination location. If stop - time option is 'specified', particles will be tracked until they - reach a termination location or the specified stop time is reached - (default is 'extend'). - stoptime : float - User-specified value of tracking time at which to stop a particle - tracking simulation. Stop time is only used if the stop time option - is 'specified'. If stoptime is None amd the stop time option is - 'specified' particles will be terminated at the end of the last - time step if 'forward' tracking or the beginning of the first time - step if 'backward' tracking (default is None). - timepointdata : list or tuple - List or tuple with 2 items that is only used if simulationtype is - 'timeseries' or 'combined'. If the second item is a float then the - timepoint data corresponds to time point option 1 and the first - entry is the number of time points (timepointcount) and the second - entry is the time point interval. If the second item is a list, - tuple, or np.ndarray then the timepoint data corresponds to time - point option 2 and the number of time points entries - (timepointcount) in the second item and the second item is an - list, tuple, or array of user-defined time points. If Timepointdata - is None, time point option 1 is specified and the total simulation - time is split into 100 intervals (default is None). - zonedataoption : str - If zonedataoption is 'off', zone array data are not read and a zone - value of 1 is applied to all cells. If zonedataoption is 'on', - zone array data are read (default is 'off'). - stopzone : int - A zero-based specified integer zone value that indicates an - automatic stopping location for particles and is only used if - zonedataoption is 'on'. A value of -1 indicates no automatic stop - zone is used. Stopzone values less than -1 are not allowed. If - stopzone is None, stopzone is set to -1 (default is None). - zones : float or array of floats (nlay, nrow, ncol) - Array of zero-based positive integer zones that are only used if - zonedataoption is 'on' (default is 0). - retardationfactoroption : str - If retardationfactoroption is 'off', retardation array data are not - read and a retardation factor of 1 is applied to all cells. If - retardationfactoroption is 'on', retardation factor array data are - read (default is 'off'). - retardation : float or array of floats (nlay, nrow, ncol) - Array of retardation factors that are only used if - retardationfactoroption is 'on' (default is 1). - particlegroups : ParticleGroup or list of ParticleGroups - ParticleGroup or list of ParticlesGroups that contain data for - individual particle groups. If None is specified, a - particle in the center of node 0 will be created (default is None). - extension : string - Filename extension (default is 'mpsim') - - Examples - -------- - - >>> import flopy - >>> m = flopy.modflow.Modflow.load('mf2005.nam') - >>> mp = flopy.modpath.Modpath7('mf2005_mp', flowmodel=m) - >>> mpsim = flopy.modpath.Modpath7Sim(mp) - - """ - - def __init__(self, model, mpnamefilename=None, listingfilename=None, - endpointfilename=None, pathlinefilename=None, - timeseriesfilename=None, tracefilename=None, - simulationtype='pathline', trackingdirection='forward', - weaksinkoption='stop_at', weaksourceoption='stop_at', - budgetoutputoption='no', - traceparticledata=None, - budgetcellnumbers=None, referencetime=None, - stoptimeoption='extend', stoptime=None, - timepointdata=None, - zonedataoption='off', stopzone=None, zones=0, - retardationfactoroption='off', retardation=1., - particlegroups=None, - extension='mpsim'): - """ - Package constructor. - - """ - - unitnumber = model.next_unit() - - # Call ancestor's init to set self.parent, extension, name and unit number - Package.__init__(self, model, extension, 'MPSIM', unitnumber) - - self.heading = '# {} package for'.format(self.name[0]) + \ - ' {}, '.format(model.version_types[model.version]) + \ - 'generated by Flopy.' - - # set file names - if mpnamefilename is None: - mpnamefilename = '{}.{}'.format(model.name, 'mpnam') - self.mp_name_file = mpnamefilename - if listingfilename is None: - listingfilename = '{}.{}'.format(model.name, 'mplst') - self.listingfilename = listingfilename - if endpointfilename is None: - endpointfilename = '{}.{}'.format(model.name, 'mpend') - self.endpointfilename = endpointfilename - if pathlinefilename is None: - pathlinefilename = '{}.{}'.format(model.name, 'mppth') - self.pathlinefilename = pathlinefilename - if timeseriesfilename is None: - timeseriesfilename = '{}.{}'.format(model.name, 'timeseries') - self.timeseriesfilename = timeseriesfilename - if tracefilename is None: - tracefilename = '{}.{}'.format(model.name, 'trace') - self.tracefilename = tracefilename - - try: - self.simulationtype = simType[simulationtype.lower()].value - except: - sim_enum_error('simulationtype', simulationtype, simType) - try: - self.trackingdirection = trackDir[trackingdirection.lower()].value - except: - sim_enum_error('trackingdirection', trackingdirection, - trackDir) - try: - self.weaksinkoption = weakOpt[weaksinkoption.lower()].value - except: - sim_enum_error('weaksinkoption', weaksinkoption, - weakOpt) - try: - self.weaksourceoption = weakOpt[weaksourceoption.lower()].value - except: - sim_enum_error('weaksourceoption', weaksourceoption, - weakOpt) - try: - self.budgetoutputoption = \ - budgetOpt[budgetoutputoption.lower()].value - except: - sim_enum_error('budgetoutputoption', budgetoutputoption, - budgetOpt) - # tracemode - if traceparticledata is None: - tracemode = 0 - traceparticlegroup = None - traceparticleid = None - else: - tracemode = 1 - if isinstance(traceparticledata, (list, tuple)): - if len(traceparticledata) != 2: - msg = 'traceparticledata must be a list or tuple ' + \ - 'with 2 items (a integer and an integer). ' + \ - 'Passed item {}.'.format(traceparticledata) - raise ValueError(msg) - try: - traceparticlegroup = int(traceparticledata[0]) - except: - msg = 'traceparticledata[0] ' + \ - '({}) '.format(traceparticledata[0]) + \ - 'cannot be converted to a integer.' - raise ValueError(msg) - try: - traceparticleid = int(traceparticledata[1]) - except: - msg = 'traceparticledata[1] ' + \ - '({}) '.format(traceparticledata[0]) + \ - 'cannot be converted to a integer.' - raise ValueError(msg) - else: - msg = 'traceparticledata must be a list or ' + \ - 'tuple with 2 items (a integer and an integer).' - raise ValueError(msg) - - # set tracemode, traceparticlegroup, and traceparticleid - self.tracemode = tracemode - self.traceparticlegroup = traceparticlegroup - self.traceparticleid = traceparticleid - - if budgetcellnumbers is None: - BudgetCellCount = 0 - else: - if isinstance(budgetcellnumbers, int): - budgetcellnumbers = [budgetcellnumbers] - budgetcellnumbers = np.array(budgetcellnumbers, dtype=np.int32) - # validate budget cell numbers - ncells = np.prod(np.array(self.parent.shape)) - msg = '' - for cell in budgetcellnumbers: - if cell < 0 or cell >= ncells: - if msg == '': - msg = 'Specified cell number(s) exceed the ' + \ - 'number of cells in the model ' + \ - '(Valid cells = 0-{}). '.format(ncells - 1) + \ - 'Invalid cells are: ' - else: - msg += ', ' - msg += '{}'.format(cell) - if msg != '': - raise ValueError(msg) - # create Util2d object - BudgetCellCount = budgetcellnumbers.shape[0] - self.budgetcellnumbers = Util2d(self.parent, (BudgetCellCount,), - np.int32, budgetcellnumbers, - name='budgetcellnumbers', - locat=self.unit_number[0]) - self.BudgetCellCount = BudgetCellCount - - if referencetime is None: - referencetime = 0. - if isinstance(referencetime, float): - referencetime = [referencetime] - elif isinstance(referencetime, np.ndarray): - referencetime = referencetime.tolist() - if len(referencetime) == 1: - referencetimeOption = 1 - # validate referencetime data - t = referencetime[0] - if t < 0. or t > self.parent.time_end: - msg = 'referencetime must be between 0. and ' + \ - '{} '.format(self.parent.time_end) + \ - '(specified value = {}).'.format(t) - raise ValueError(msg) - elif len(referencetime) == 3: - referencetimeOption = 2 - # validate referencetime data - # StressPeriod - iper = referencetime[0] - if iper < 0 or iper >= self.parent.nper: - msg = 'StressPeriod must be between 0 and ' + \ - '{} '.format(self.parent.nper - 1) + \ - '(specified value = {}).'.format(iper) - raise ValueError(msg) - - # TimeStep - istp = referencetime[1] - maxstp = self.parent.nstp[iper] + 1 - if istp < 0 or istp >= maxstp: - msg = 'TimeStep for StressPeriod {} '.format(iper) + \ - 'must be between 0 and ' + \ - '{} '.format(maxstp - 1) + \ - '(specified value = {}).'.format(istp) - raise ValueError(msg) - - # TimeFraction - tf = referencetime[2] - if tf < 0. or tf > 1.: - msg = 'TimeFraction value must be between 0 and 1 ' + \ - '(specified value={}).'.format(tf) - raise ValueError(msg) - else: - msg = 'referencetime must be a float (referencetime) or ' + \ - 'a list with one item [referencetime] or three items ' + \ - '[StressPeriod, TimeStep, TimeFraction]. ' + \ - '{}'.format(len(referencetime)) + \ - ' items were passed as referencetime [' - for i, v in enumerate(referencetime): - if i > 0: - msg += ', ' - msg += '{}'.format(v) - msg += '].' - raise ValueError(msg) - self.referencetimeOption = referencetimeOption - self.referencetime = referencetime - - # stoptimeoption - try: - self.stoptimeoption = \ - stopOpt[stoptimeoption.lower()].value - except: - sim_enum_error('stoptimeoption', stoptimeoption, - stopOpt) - # stoptime - if self.stoptimeoption == 3: - if stoptime is None: - if self.trackingdirection == 1: - stoptime = self.parent.time_end - else: - stoptime = 0. - self.stoptime = stoptime - - # timepointdata - if timepointdata is not None: - if not isinstance(timepointdata, (list, tuple)): - msg = 'timepointdata must be a list or tuple' - raise ValueError(msg) - else: - if len(timepointdata) != 2: - msg = 'timepointdata must be a have 2 entries ' + \ - '({} provided)'.format(len(timepointdata)) - raise ValueError(msg) - else: - if isinstance(timepointdata[1], (list, tuple)): - timepointdata[1] = np.array(timepointdata[1]) - elif isinstance(timepointdata[1], float): - timepointdata[1] = np.array([timepointdata[1]]) - if timepointdata[1].shape[0] == timepointdata[0]: - timepointoption = 2 - elif timepointdata[1].shape[0] > 1: - msg = 'The number of TimePoint data ' + \ - '({}) '.format(timepointdata[1].shape[0]) + \ - 'is not equal to TimePointCount ' + \ - '({}).'.format(timepointdata[0]) - raise ValueError(msg) - else: - timepointoption = 1 - else: - timepointoption = 1 - timepointdata = [100, self.parent.time_end / 100.] - timepointdata[1] = np.array([timepointdata[1]]) - self.timepointoption = timepointoption - self.timepointdata = timepointdata - - # zonedataoption - try: - self.zonedataoption = onoffOpt[zonedataoption.lower()].value - except: - sim_enum_error('zonedataoption', zonedataoption, onoffOpt) - if self.zonedataoption == 2: - if stopzone is None: - stopzone = -1 - if stopzone < -1: - msg = 'Specified stopzone value ({}) '.format(stopzone) + \ - 'must be greater than 0.' - raise ValueError(msg) - self.stopzone = stopzone - if zones is None: - msg = "zones must be specified if zonedataoption='on'." - raise ValueError(msg) - self.zones = Util3d(model, self.parent.shape, np.int32, - zones, name='zones', locat=self.unit_number[0]) - - # retardationfactoroption - try: - self.retardationfactoroption = \ - onoffOpt[retardationfactoroption.lower()].value - except: - sim_enum_error('retardationfactoroption', - retardationfactoroption, onoffOpt) - if self.retardationfactoroption == 2: - if retardation is None: - msg = "retardation must be specified if " + \ - "retardationfactoroption='on'." - raise ValueError(msg) - self.retardation = Util3d(model, self.parent.shape, np.float32, - retardation, name='retardation', - locat=self.unit_number[0]) - # particle group data - if particlegroups is None: - particlegroups = [ParticleGroup()] - elif isinstance(particlegroups, - (ParticleGroup, - ParticleGroupLRCTemplate, - ParticleGroupNodeTemplate)): - particlegroups = [particlegroups] - self.particlegroups = particlegroups - - self.parent.add_package(self) - - def write_file(self, check=False): - """ - Write the package file - - Parameters - ---------- - check : boolean - Check package data for common errors. (default False) - - Returns - ------- - None - - """ - - f = open(self.fn_path, 'w') - # item 0 - f.write('{}\n'.format(self.heading)) - # item 1 - f.write('{}\n'.format(self.mp_name_file)) - # item 2 - f.write('{}\n'.format(self.listingfilename)) - # item 3 - f.write('{} {} {} {} {} {}\n'.format(self.simulationtype, - self.trackingdirection, - self.weaksinkoption, - self.weaksourceoption, - self.budgetoutputoption, - self.tracemode)) - # item 4 - f.write('{}\n'.format(self.endpointfilename)) - # item 5 - if self.simulationtype == 2 or self.simulationtype == 4: - f.write('{}\n'.format(self.pathlinefilename)) - # item 6 - if self.simulationtype == 3 or self.simulationtype == 4: - f.write('{}\n'.format(self.timeseriesfilename)) - # item 7 and 8 - if self.tracemode == 1: - f.write('{}\n'.format(self.tracefilename)) - f.write('{} {}\n'.format(self.traceparticlegroup + 1, - self.traceparticleid + 1)) - # item 9 - f.write('{}\n'.format(self.BudgetCellCount)) - # item 10 - if self.BudgetCellCount > 0: - v = Util2d(self.parent, (self.BudgetCellCount,), - np.int32, self.budgetcellnumbers.array + 1, - name='temp', - locat=self.unit_number[0]) - f.write(v.string) - - # item 11 - f.write('{}\n'.format(self.referencetimeOption)) - if self.referencetimeOption == 1: - # item 12 - f.write('{:g}\n'.format(self.referencetime[0])) - elif self.referencetimeOption == 2: - # item 13 - f.write('{:d} {:d} {:g}\n'.format(self.referencetime[0] + 1, - self.referencetime[1] + 1, - self.referencetime[2])) - # item 14 - f.write('{}\n'.format(self.stoptimeoption)) - if self.stoptimeoption == 3: - # item 15 - f.write('{:g}\n'.format(self.stoptime + 1)) - - # item 16 - if self.simulationtype == 3 or self.simulationtype == 4: - f.write('{}\n'.format(self.timepointoption)) - if self.timepointoption == 1: - # item 17 - f.write('{} {}\n'.format(self.timepointdata[0], - self.timepointdata[1][0])) - elif self.timepointoption == 2: - # item 18 - f.write('{}\n'.format(self.timepointdata[0])) - # item 19 - tp = self.timepointdata[1] - v = Util2d(self.parent, (tp.shape[0],), - np.float32, tp, - name='temp', - locat=self.unit_number[0]) - f.write(v.string) - - # item 20 - f.write('{}\n'.format(self.zonedataoption)) - if self.zonedataoption == 2: - # item 21 - f.write('{}\n'.format(self.stopzone)) - # item 22 - f.write(self.zones.get_file_entry()) - - # item 23 - f.write('{}\n'.format(self.retardationfactoroption)) - if self.retardationfactoroption == 2: - # item 24 - f.write(self.retardation.get_file_entry()) - - # item 25 - f.write('{}\n'.format(len(self.particlegroups))) - for pg in self.particlegroups: - pg.write(f, ws=self.parent.model_ws) - - f.close() +""" +mpsim module. Contains the ModpathSim class. Note that the user can access +the ModpathSim class as `flopy.modpath.ModpathSim`. + +Additional information for this MODFLOW/MODPATH package can be found at the +`Online MODFLOW Guide +`_. + +""" +from enum import Enum +import numpy as np +from ..pakbase import Package +from ..utils import Util2d, Util3d +from .mp7particlegroup import ParticleGroup, ParticleGroupLRCTemplate, \ + ParticleGroupNodeTemplate + + +def sim_enum_error(v, s, e): + """ + Standard enumeration error format error + Parameters + ---------- + v : str + Enumeration value + + s : str + User-defined value + + e : Enum class + Enumeration class + + Returns + ------- + + """ + msg = 'Invalid {} ({})'.format(v, s) + \ + '. Valid types are ' + for i, c in enumerate(e): + if i > 0: + msg += ', ' + msg += '"{}"'.format(c.name) + raise ValueError(msg) + + +class simType(Enum): + """ + Enumeration of different simulation types + """ + endpoint = 1 + pathline = 2 + timeseries = 3 + combined = 4 + + +class trackDir(Enum): + """ + Enumeration of different tracking directions + """ + forward = 1 + backward = 2 + + +class weakOpt(Enum): + """ + Enumeration of different weak sink and source options + """ + pass_through = 1 + stop_at = 2 + + +class budgetOpt(Enum): + """ + Enumeration of different budget output options + """ + no = 0 + summary = 1 + record_summary = 2 + + +class stopOpt(Enum): + """ + Enumeration of different stop time options + """ + total = 1 + extend = 2 + specified = 3 + + +class onoffOpt(Enum): + """ + Enumeration of on-off options + """ + off = 1 + on = 2 + + +class Modpath7Sim(Package): + """ + MODPATH Simulation File Package Class. + + Parameters + ---------- + model : model object + The model object (of type :class:`flopy.modpath.Modpath7`) to + which this package will be added. + mpnamefilename : str + Filename of the MODPATH 7 name file. If mpnamefilename is not + defined it will be generated from the model name + (default is None). + listingfilename : str + Filename of the MODPATH 7 listing file. If listingfilename is not + defined it will be generated from the model name + (default is None). + endpointfilename : str + Filename of the MODPATH 7 endpoint file. If endpointfilename is + not defined it will be generated from the model name + (default is None). + pathlinefilename : str + Filename of the MODPATH 7 pathline file. If pathlinefilename is + not defined it will be generated from the model name + (default is None). + timeseriesfilename : str + Filename of the MODPATH 7 timeseries file. If timeseriesfilename + is not defined it will be generated from the model name + (default is None). + tracefilename : str + Filename of the MODPATH 7 tracefile file. If tracefilename is not + defined it will be generated from the model name + (default is None). + simulationtype : str + MODPATH 7 simulation type. Valid simulation types are 'endpoint', + 'pathline', 'timeseries', or 'combined' (default is 'pathline'). + trackingdirection : str + MODPATH 7 tracking direction. Valid tracking directions are + 'forward' or 'backward' (default os 'forward'). + weaksinkoption : str + MODPATH 7 weak sink option. Valid weak sink options are + 'pass_through' or 'stop_at' (default value is 'stop_at'). + weaksourceoption : str + MODPATH 7 weak source option. Valid weak source options are + 'pass_through' or 'stop_at' (default value is 'stop_at'). + budgetoutputoption : str + MODPATH 7 budget output option. Valid budget output options are + 'no' - individual cell water balance errors are not computed + and budget record headers are not printed, 'summary' - a summary + of individual cell water balance errors for each time step is + printed in the listing file without record headers, or + 'record_summary' - a summary of individual cell water balance + errors for each time step is printed in the listing file with + record headers (default is 'summary'). + traceparticledata : list or tuple + List or tuple with two ints that define the particle group and + particle id (zero-based) of the specified particle that is + followed in detail. If traceparticledata is None, trace mode is + off (default is None). + budgetcellnumbers : int, list of ints, tuple of ints, or np.ndarray + Cell numbers (zero-based) for which detailed water budgets are + computed. If budgetcellnumbers is None, detailed water budgets are + not calculated (default is None). + referencetime : float, list, or tuple + Specified reference time if a float or a list/tuple with a single + float value is provided (reference time option 1). Otherwise a + list or tuple with a zero-based stress period (int) and time + step (int) and a float defining the relative time position in the + time step is provided (reference time option 2). If referencetime + is None, reference time is set to 0 (default is None). + stoptimeoption : str + String indicating how a particle tracking simulation is + terminated based on time. If stop time option is 'total', particles + will be stopped at the end of the final time step if 'forward' + tracking is simulated or at the beginning of the first time step + if backward tracking. If stop time option is 'extend', initial or + final steady-state time steps will be extended and all particles + will be tracked until they reach a termination location. If stop + time option is 'specified', particles will be tracked until they + reach a termination location or the specified stop time is reached + (default is 'extend'). + stoptime : float + User-specified value of tracking time at which to stop a particle + tracking simulation. Stop time is only used if the stop time option + is 'specified'. If stoptime is None amd the stop time option is + 'specified' particles will be terminated at the end of the last + time step if 'forward' tracking or the beginning of the first time + step if 'backward' tracking (default is None). + timepointdata : list or tuple + List or tuple with 2 items that is only used if simulationtype is + 'timeseries' or 'combined'. If the second item is a float then the + timepoint data corresponds to time point option 1 and the first + entry is the number of time points (timepointcount) and the second + entry is the time point interval. If the second item is a list, + tuple, or np.ndarray then the timepoint data corresponds to time + point option 2 and the number of time points entries + (timepointcount) in the second item and the second item is an + list, tuple, or array of user-defined time points. If Timepointdata + is None, time point option 1 is specified and the total simulation + time is split into 100 intervals (default is None). + zonedataoption : str + If zonedataoption is 'off', zone array data are not read and a zone + value of 1 is applied to all cells. If zonedataoption is 'on', + zone array data are read (default is 'off'). + stopzone : int + A zero-based specified integer zone value that indicates an + automatic stopping location for particles and is only used if + zonedataoption is 'on'. A value of -1 indicates no automatic stop + zone is used. Stopzone values less than -1 are not allowed. If + stopzone is None, stopzone is set to -1 (default is None). + zones : float or array of floats (nlay, nrow, ncol) + Array of zero-based positive integer zones that are only used if + zonedataoption is 'on' (default is 0). + retardationfactoroption : str + If retardationfactoroption is 'off', retardation array data are not + read and a retardation factor of 1 is applied to all cells. If + retardationfactoroption is 'on', retardation factor array data are + read (default is 'off'). + retardation : float or array of floats (nlay, nrow, ncol) + Array of retardation factors that are only used if + retardationfactoroption is 'on' (default is 1). + particlegroups : ParticleGroup or list of ParticleGroups + ParticleGroup or list of ParticlesGroups that contain data for + individual particle groups. If None is specified, a + particle in the center of node 0 will be created (default is None). + extension : string + Filename extension (default is 'mpsim') + + Examples + -------- + + >>> import flopy + >>> m = flopy.modflow.Modflow.load('mf2005.nam') + >>> mp = flopy.modpath.Modpath7('mf2005_mp', flowmodel=m) + >>> mpsim = flopy.modpath.Modpath7Sim(mp) + + """ + + def __init__(self, model, mpnamefilename=None, listingfilename=None, + endpointfilename=None, pathlinefilename=None, + timeseriesfilename=None, tracefilename=None, + simulationtype='pathline', trackingdirection='forward', + weaksinkoption='stop_at', weaksourceoption='stop_at', + budgetoutputoption='no', + traceparticledata=None, + budgetcellnumbers=None, referencetime=None, + stoptimeoption='extend', stoptime=None, + timepointdata=None, + zonedataoption='off', stopzone=None, zones=0, + retardationfactoroption='off', retardation=1., + particlegroups=None, + extension='mpsim'): + """ + Package constructor. + + """ + + unitnumber = model.next_unit() + + # Call ancestor's init to set self.parent, extension, name and unit number + Package.__init__(self, model, extension, 'MPSIM', unitnumber) + + self.heading = '# {} package for'.format(self.name[0]) + \ + ' {}, '.format(model.version_types[model.version]) + \ + 'generated by Flopy.' + + # set file names + if mpnamefilename is None: + mpnamefilename = '{}.{}'.format(model.name, 'mpnam') + self.mp_name_file = mpnamefilename + if listingfilename is None: + listingfilename = '{}.{}'.format(model.name, 'mplst') + self.listingfilename = listingfilename + if endpointfilename is None: + endpointfilename = '{}.{}'.format(model.name, 'mpend') + self.endpointfilename = endpointfilename + if pathlinefilename is None: + pathlinefilename = '{}.{}'.format(model.name, 'mppth') + self.pathlinefilename = pathlinefilename + if timeseriesfilename is None: + timeseriesfilename = '{}.{}'.format(model.name, 'timeseries') + self.timeseriesfilename = timeseriesfilename + if tracefilename is None: + tracefilename = '{}.{}'.format(model.name, 'trace') + self.tracefilename = tracefilename + + try: + self.simulationtype = simType[simulationtype.lower()].value + except: + sim_enum_error('simulationtype', simulationtype, simType) + try: + self.trackingdirection = trackDir[trackingdirection.lower()].value + except: + sim_enum_error('trackingdirection', trackingdirection, + trackDir) + try: + self.weaksinkoption = weakOpt[weaksinkoption.lower()].value + except: + sim_enum_error('weaksinkoption', weaksinkoption, + weakOpt) + try: + self.weaksourceoption = weakOpt[weaksourceoption.lower()].value + except: + sim_enum_error('weaksourceoption', weaksourceoption, + weakOpt) + try: + self.budgetoutputoption = \ + budgetOpt[budgetoutputoption.lower()].value + except: + sim_enum_error('budgetoutputoption', budgetoutputoption, + budgetOpt) + # tracemode + if traceparticledata is None: + tracemode = 0 + traceparticlegroup = None + traceparticleid = None + else: + tracemode = 1 + if isinstance(traceparticledata, (list, tuple)): + if len(traceparticledata) != 2: + msg = 'traceparticledata must be a list or tuple ' + \ + 'with 2 items (a integer and an integer). ' + \ + 'Passed item {}.'.format(traceparticledata) + raise ValueError(msg) + try: + traceparticlegroup = int(traceparticledata[0]) + except: + msg = 'traceparticledata[0] ' + \ + '({}) '.format(traceparticledata[0]) + \ + 'cannot be converted to a integer.' + raise ValueError(msg) + try: + traceparticleid = int(traceparticledata[1]) + except: + msg = 'traceparticledata[1] ' + \ + '({}) '.format(traceparticledata[0]) + \ + 'cannot be converted to a integer.' + raise ValueError(msg) + else: + msg = 'traceparticledata must be a list or ' + \ + 'tuple with 2 items (a integer and an integer).' + raise ValueError(msg) + + # set tracemode, traceparticlegroup, and traceparticleid + self.tracemode = tracemode + self.traceparticlegroup = traceparticlegroup + self.traceparticleid = traceparticleid + + if budgetcellnumbers is None: + BudgetCellCount = 0 + else: + if isinstance(budgetcellnumbers, int): + budgetcellnumbers = [budgetcellnumbers] + budgetcellnumbers = np.array(budgetcellnumbers, dtype=np.int32) + # validate budget cell numbers + ncells = np.prod(np.array(self.parent.shape)) + msg = '' + for cell in budgetcellnumbers: + if cell < 0 or cell >= ncells: + if msg == '': + msg = 'Specified cell number(s) exceed the ' + \ + 'number of cells in the model ' + \ + '(Valid cells = 0-{}). '.format(ncells - 1) + \ + 'Invalid cells are: ' + else: + msg += ', ' + msg += '{}'.format(cell) + if msg != '': + raise ValueError(msg) + # create Util2d object + BudgetCellCount = budgetcellnumbers.shape[0] + self.budgetcellnumbers = Util2d(self.parent, (BudgetCellCount,), + np.int32, budgetcellnumbers, + name='budgetcellnumbers', + locat=self.unit_number[0]) + self.BudgetCellCount = BudgetCellCount + + if referencetime is None: + referencetime = 0. + if isinstance(referencetime, float): + referencetime = [referencetime] + elif isinstance(referencetime, np.ndarray): + referencetime = referencetime.tolist() + if len(referencetime) == 1: + referencetimeOption = 1 + # validate referencetime data + t = referencetime[0] + if t < 0. or t > self.parent.time_end: + msg = 'referencetime must be between 0. and ' + \ + '{} '.format(self.parent.time_end) + \ + '(specified value = {}).'.format(t) + raise ValueError(msg) + elif len(referencetime) == 3: + referencetimeOption = 2 + # validate referencetime data + # StressPeriod + iper = referencetime[0] + if iper < 0 or iper >= self.parent.nper: + msg = 'StressPeriod must be between 0 and ' + \ + '{} '.format(self.parent.nper - 1) + \ + '(specified value = {}).'.format(iper) + raise ValueError(msg) + + # TimeStep + istp = referencetime[1] + maxstp = self.parent.nstp[iper] + 1 + if istp < 0 or istp >= maxstp: + msg = 'TimeStep for StressPeriod {} '.format(iper) + \ + 'must be between 0 and ' + \ + '{} '.format(maxstp - 1) + \ + '(specified value = {}).'.format(istp) + raise ValueError(msg) + + # TimeFraction + tf = referencetime[2] + if tf < 0. or tf > 1.: + msg = 'TimeFraction value must be between 0 and 1 ' + \ + '(specified value={}).'.format(tf) + raise ValueError(msg) + else: + msg = 'referencetime must be a float (referencetime) or ' + \ + 'a list with one item [referencetime] or three items ' + \ + '[StressPeriod, TimeStep, TimeFraction]. ' + \ + '{}'.format(len(referencetime)) + \ + ' items were passed as referencetime [' + for i, v in enumerate(referencetime): + if i > 0: + msg += ', ' + msg += '{}'.format(v) + msg += '].' + raise ValueError(msg) + self.referencetimeOption = referencetimeOption + self.referencetime = referencetime + + # stoptimeoption + try: + self.stoptimeoption = \ + stopOpt[stoptimeoption.lower()].value + except: + sim_enum_error('stoptimeoption', stoptimeoption, + stopOpt) + # stoptime + if self.stoptimeoption == 3: + if stoptime is None: + if self.trackingdirection == 1: + stoptime = self.parent.time_end + else: + stoptime = 0. + self.stoptime = stoptime + + # timepointdata + if timepointdata is not None: + if not isinstance(timepointdata, (list, tuple)): + msg = 'timepointdata must be a list or tuple' + raise ValueError(msg) + else: + if len(timepointdata) != 2: + msg = 'timepointdata must be a have 2 entries ' + \ + '({} provided)'.format(len(timepointdata)) + raise ValueError(msg) + else: + if isinstance(timepointdata[1], (list, tuple)): + timepointdata[1] = np.array(timepointdata[1]) + elif isinstance(timepointdata[1], float): + timepointdata[1] = np.array([timepointdata[1]]) + if timepointdata[1].shape[0] == timepointdata[0]: + timepointoption = 2 + elif timepointdata[1].shape[0] > 1: + msg = 'The number of TimePoint data ' + \ + '({}) '.format(timepointdata[1].shape[0]) + \ + 'is not equal to TimePointCount ' + \ + '({}).'.format(timepointdata[0]) + raise ValueError(msg) + else: + timepointoption = 1 + else: + timepointoption = 1 + timepointdata = [100, self.parent.time_end / 100.] + timepointdata[1] = np.array([timepointdata[1]]) + self.timepointoption = timepointoption + self.timepointdata = timepointdata + + # zonedataoption + try: + self.zonedataoption = onoffOpt[zonedataoption.lower()].value + except: + sim_enum_error('zonedataoption', zonedataoption, onoffOpt) + if self.zonedataoption == 2: + if stopzone is None: + stopzone = -1 + if stopzone < -1: + msg = 'Specified stopzone value ({}) '.format(stopzone) + \ + 'must be greater than 0.' + raise ValueError(msg) + self.stopzone = stopzone + if zones is None: + msg = "zones must be specified if zonedataoption='on'." + raise ValueError(msg) + self.zones = Util3d(model, self.parent.shape, np.int32, + zones, name='zones', locat=self.unit_number[0]) + + # retardationfactoroption + try: + self.retardationfactoroption = \ + onoffOpt[retardationfactoroption.lower()].value + except: + sim_enum_error('retardationfactoroption', + retardationfactoroption, onoffOpt) + if self.retardationfactoroption == 2: + if retardation is None: + msg = "retardation must be specified if " + \ + "retardationfactoroption='on'." + raise ValueError(msg) + self.retardation = Util3d(model, self.parent.shape, np.float32, + retardation, name='retardation', + locat=self.unit_number[0]) + # particle group data + if particlegroups is None: + particlegroups = [ParticleGroup()] + elif isinstance(particlegroups, + (ParticleGroup, + ParticleGroupLRCTemplate, + ParticleGroupNodeTemplate)): + particlegroups = [particlegroups] + self.particlegroups = particlegroups + + self.parent.add_package(self) + + def write_file(self, check=False): + """ + Write the package file + + Parameters + ---------- + check : boolean + Check package data for common errors. (default False) + + Returns + ------- + None + + """ + + f = open(self.fn_path, 'w') + # item 0 + f.write('{}\n'.format(self.heading)) + # item 1 + f.write('{}\n'.format(self.mp_name_file)) + # item 2 + f.write('{}\n'.format(self.listingfilename)) + # item 3 + f.write('{} {} {} {} {} {}\n'.format(self.simulationtype, + self.trackingdirection, + self.weaksinkoption, + self.weaksourceoption, + self.budgetoutputoption, + self.tracemode)) + # item 4 + f.write('{}\n'.format(self.endpointfilename)) + # item 5 + if self.simulationtype == 2 or self.simulationtype == 4: + f.write('{}\n'.format(self.pathlinefilename)) + # item 6 + if self.simulationtype == 3 or self.simulationtype == 4: + f.write('{}\n'.format(self.timeseriesfilename)) + # item 7 and 8 + if self.tracemode == 1: + f.write('{}\n'.format(self.tracefilename)) + f.write('{} {}\n'.format(self.traceparticlegroup + 1, + self.traceparticleid + 1)) + # item 9 + f.write('{}\n'.format(self.BudgetCellCount)) + # item 10 + if self.BudgetCellCount > 0: + v = Util2d(self.parent, (self.BudgetCellCount,), + np.int32, self.budgetcellnumbers.array + 1, + name='temp', + locat=self.unit_number[0]) + f.write(v.string) + + # item 11 + f.write('{}\n'.format(self.referencetimeOption)) + if self.referencetimeOption == 1: + # item 12 + f.write('{:g}\n'.format(self.referencetime[0])) + elif self.referencetimeOption == 2: + # item 13 + f.write('{:d} {:d} {:g}\n'.format(self.referencetime[0] + 1, + self.referencetime[1] + 1, + self.referencetime[2])) + # item 14 + f.write('{}\n'.format(self.stoptimeoption)) + if self.stoptimeoption == 3: + # item 15 + f.write('{:g}\n'.format(self.stoptime + 1)) + + # item 16 + if self.simulationtype == 3 or self.simulationtype == 4: + f.write('{}\n'.format(self.timepointoption)) + if self.timepointoption == 1: + # item 17 + f.write('{} {}\n'.format(self.timepointdata[0], + self.timepointdata[1][0])) + elif self.timepointoption == 2: + # item 18 + f.write('{}\n'.format(self.timepointdata[0])) + # item 19 + tp = self.timepointdata[1] + v = Util2d(self.parent, (tp.shape[0],), + np.float32, tp, + name='temp', + locat=self.unit_number[0]) + f.write(v.string) + + # item 20 + f.write('{}\n'.format(self.zonedataoption)) + if self.zonedataoption == 2: + # item 21 + f.write('{}\n'.format(self.stopzone)) + # item 22 + f.write(self.zones.get_file_entry()) + + # item 23 + f.write('{}\n'.format(self.retardationfactoroption)) + if self.retardationfactoroption == 2: + # item 24 + f.write(self.retardation.get_file_entry()) + + # item 25 + f.write('{}\n'.format(len(self.particlegroups))) + for pg in self.particlegroups: + pg.write(f, ws=self.parent.model_ws) + + f.close() diff --git a/flopy/modpath/mpbas.py b/flopy/modpath/mpbas.py index fd6549a8a6..f6960d0736 100644 --- a/flopy/modpath/mpbas.py +++ b/flopy/modpath/mpbas.py @@ -1,151 +1,151 @@ -""" -mpbas module. Contains the ModpathBas class. Note that the user can access -the ModpathBas class as `flopy.modflow.ModpathBas`. - -Additional information for this MODFLOW/MODPATH package can be found at the `Online -MODFLOW Guide -`_. - -""" -import numpy as np -from ..pakbase import Package -from ..utils import Util2d, Util3d - - -class ModpathBas(Package): - """ - MODPATH Basic Package Class. - - Parameters - ---------- - model : model object - The model object (of type :class:`flopy.modpath.mp.Modpath`) to which - this package will be added. - hnoflo : float - Head value assigned to inactive cells (default is -9999.). - hdry : float - Head value assigned to dry cells (default is -8888.). - def_face_ct : int - Number fo default iface codes to read (default is 0). - bud_label : str or list of strs - MODFLOW budget item to which a default iface is assigned. - def_iface : int or list of ints - Cell face (iface) on which to assign flows from MODFLOW budget file. - laytyp : int or list of ints - MODFLOW layer type (0 is convertible, 1 is confined). - ibound : array of ints, optional - The ibound array (the default is 1). - prsity : array of ints, optional - The porosity array (the default is 0.30). - prsityCB : array of ints, optional - The porosity array for confining beds (the default is 0.30). - extension : str, optional - File extension (default is 'mpbas'). - - Attributes - ---------- - heading : str - Text string written to top of package input file. - - Methods - ------- - - See Also - -------- - - Notes - ----- - - Examples - -------- - - >>> import flopy - >>> m = flopy.modpath.Modpath() - >>> mpbas = flopy.modpath.ModpathBas(m) - - """ - - def __init__(self, model, hnoflo=-9999., hdry=-8888., - def_face_ct=0, bud_label=None, def_iface=None, - laytyp=0, ibound=1, prsity=0.30, prsityCB=0.30, - extension='mpbas', unitnumber=86): - """ - Package constructor. - - """ - Package.__init__(self, model, extension, 'MPBAS', unitnumber) - nrow, ncol, nlay, nper = self.parent.mf.nrow_ncol_nlay_nper - self.parent.mf.get_name_file_entries() - self.heading1 = '# MPBAS for Modpath, generated by Flopy.' - self.heading2 = '#' - self.hnoflo = hnoflo - self.hdry = hdry - self.def_face_ct = def_face_ct - self.bud_label = bud_label - self.def_iface = def_iface - self.laytyp = laytyp - self.ibound = Util3d(model, (nlay, nrow, ncol), np.int32, ibound, - name='ibound', locat=self.unit_number[0]) - - self.prsity = prsity - self.prsityCB = prsityCB - self.prsity = Util3d(model, (nlay, nrow, ncol), np.float32, \ - prsity, name='prsity', locat=self.unit_number[0]) - self.prsityCB = Util3d(model, (nlay, nrow, ncol), np.float32, \ - prsityCB, name='prsityCB', - locat=self.unit_number[0]) - self.parent.add_package(self) - - def write_file(self): - """ - Write the package file - - Returns - ------- - None - - """ - nrow, ncol, nlay, nper = self.parent.mf.nrow_ncol_nlay_nper - ModflowDis = self.parent.mf.get_package('DIS') - # Open file for writing - f_bas = open(self.fn_path, 'w') - f_bas.write('#{0:s}\n#{1:s}\n'.format(self.heading1, self.heading2)) - f_bas.write('{0:16.6f} {1:16.6f}\n' \ - .format(self.hnoflo, self.hdry)) - f_bas.write('{0:4d}\n' \ - .format(self.def_face_ct)) - if self.def_face_ct > 0: - for i in range(self.def_face_ct): - f_bas.write('{0:20s}\n'.format(self.bud_label[i])) - f_bas.write('{0:2d}\n'.format(self.def_iface[i])) - # f_bas.write('\n') - - flow_package = self.parent.mf.get_package('BCF6') - if (flow_package != None): - lc = Util2d(self.parent, (nlay,), np.int32, \ - flow_package.laycon.get_value(), name='bas - laytype', \ - locat=self.unit_number[0]) - else: - flow_package = self.parent.mf.get_package('LPF') - if (flow_package != None): - lc = Util2d(self.parent, (nlay,), \ - np.int32, flow_package.laytyp.get_value(), \ - name='bas - laytype', locat=self.unit_number[0]) - else: - flow_package = self.parent.mf.get_package('UPW') - if (flow_package != None): - lc = Util2d(self.parent, (nlay,), \ - np.int32, flow_package.laytyp.get_value(), \ - name='bas - laytype', - locat=self.unit_number[0]) - # need to reset lc fmtin - lc.set_fmtin('(40I2)') - f_bas.write(lc.string) - # from modpath bas--uses keyword array types - f_bas.write(self.ibound.get_file_entry()) - # from MT3D bas--uses integer array types - # f_bas.write(self.ibound.get_file_entry()) - f_bas.write(self.prsity.get_file_entry()) - f_bas.write(self.prsityCB.get_file_entry()) - - f_bas.close() +""" +mpbas module. Contains the ModpathBas class. Note that the user can access +the ModpathBas class as `flopy.modflow.ModpathBas`. + +Additional information for this MODFLOW/MODPATH package can be found at the `Online +MODFLOW Guide +`_. + +""" +import numpy as np +from ..pakbase import Package +from ..utils import Util2d, Util3d + + +class ModpathBas(Package): + """ + MODPATH Basic Package Class. + + Parameters + ---------- + model : model object + The model object (of type :class:`flopy.modpath.mp.Modpath`) to which + this package will be added. + hnoflo : float + Head value assigned to inactive cells (default is -9999.). + hdry : float + Head value assigned to dry cells (default is -8888.). + def_face_ct : int + Number fo default iface codes to read (default is 0). + bud_label : str or list of strs + MODFLOW budget item to which a default iface is assigned. + def_iface : int or list of ints + Cell face (iface) on which to assign flows from MODFLOW budget file. + laytyp : int or list of ints + MODFLOW layer type (0 is convertible, 1 is confined). + ibound : array of ints, optional + The ibound array (the default is 1). + prsity : array of ints, optional + The porosity array (the default is 0.30). + prsityCB : array of ints, optional + The porosity array for confining beds (the default is 0.30). + extension : str, optional + File extension (default is 'mpbas'). + + Attributes + ---------- + heading : str + Text string written to top of package input file. + + Methods + ------- + + See Also + -------- + + Notes + ----- + + Examples + -------- + + >>> import flopy + >>> m = flopy.modpath.Modpath() + >>> mpbas = flopy.modpath.ModpathBas(m) + + """ + + def __init__(self, model, hnoflo=-9999., hdry=-8888., + def_face_ct=0, bud_label=None, def_iface=None, + laytyp=0, ibound=1, prsity=0.30, prsityCB=0.30, + extension='mpbas', unitnumber=86): + """ + Package constructor. + + """ + Package.__init__(self, model, extension, 'MPBAS', unitnumber) + nrow, ncol, nlay, nper = self.parent.mf.nrow_ncol_nlay_nper + self.parent.mf.get_name_file_entries() + self.heading1 = '# MPBAS for Modpath, generated by Flopy.' + self.heading2 = '#' + self.hnoflo = hnoflo + self.hdry = hdry + self.def_face_ct = def_face_ct + self.bud_label = bud_label + self.def_iface = def_iface + self.laytyp = laytyp + self.ibound = Util3d(model, (nlay, nrow, ncol), np.int32, ibound, + name='ibound', locat=self.unit_number[0]) + + self.prsity = prsity + self.prsityCB = prsityCB + self.prsity = Util3d(model, (nlay, nrow, ncol), np.float32, \ + prsity, name='prsity', locat=self.unit_number[0]) + self.prsityCB = Util3d(model, (nlay, nrow, ncol), np.float32, \ + prsityCB, name='prsityCB', + locat=self.unit_number[0]) + self.parent.add_package(self) + + def write_file(self): + """ + Write the package file + + Returns + ------- + None + + """ + nrow, ncol, nlay, nper = self.parent.mf.nrow_ncol_nlay_nper + ModflowDis = self.parent.mf.get_package('DIS') + # Open file for writing + f_bas = open(self.fn_path, 'w') + f_bas.write('#{0:s}\n#{1:s}\n'.format(self.heading1, self.heading2)) + f_bas.write('{0:16.6f} {1:16.6f}\n' \ + .format(self.hnoflo, self.hdry)) + f_bas.write('{0:4d}\n' \ + .format(self.def_face_ct)) + if self.def_face_ct > 0: + for i in range(self.def_face_ct): + f_bas.write('{0:20s}\n'.format(self.bud_label[i])) + f_bas.write('{0:2d}\n'.format(self.def_iface[i])) + # f_bas.write('\n') + + flow_package = self.parent.mf.get_package('BCF6') + if (flow_package != None): + lc = Util2d(self.parent, (nlay,), np.int32, \ + flow_package.laycon.get_value(), name='bas - laytype', \ + locat=self.unit_number[0]) + else: + flow_package = self.parent.mf.get_package('LPF') + if (flow_package != None): + lc = Util2d(self.parent, (nlay,), \ + np.int32, flow_package.laytyp.get_value(), \ + name='bas - laytype', locat=self.unit_number[0]) + else: + flow_package = self.parent.mf.get_package('UPW') + if (flow_package != None): + lc = Util2d(self.parent, (nlay,), \ + np.int32, flow_package.laytyp.get_value(), \ + name='bas - laytype', + locat=self.unit_number[0]) + # need to reset lc fmtin + lc.set_fmtin('(40I2)') + f_bas.write(lc.string) + # from modpath bas--uses keyword array types + f_bas.write(self.ibound.get_file_entry()) + # from MT3D bas--uses integer array types + # f_bas.write(self.ibound.get_file_entry()) + f_bas.write(self.prsity.get_file_entry()) + f_bas.write(self.prsityCB.get_file_entry()) + + f_bas.close() diff --git a/flopy/modpath/mpsim.py b/flopy/modpath/mpsim.py index 16917a122c..963430d19b 100644 --- a/flopy/modpath/mpsim.py +++ b/flopy/modpath/mpsim.py @@ -1,423 +1,423 @@ -""" -mpsim module. Contains the ModpathSim class. Note that the user can access -the ModpathSim class as `flopy.modpath.ModpathSim`. - -Additional information for this MODFLOW/MODPATH package can be found at the `Online -MODFLOW Guide -`_. - -""" -import numpy as np -from ..pakbase import Package -from ..utils import Util3d - - -class ModpathSim(Package): - """ - MODPATH Simulation File Package Class. - - Parameters - ---------- - model : model object - The model object (of type :class:`flopy.modpath.mp.Modpath`) to which - this package will be added. - extension : string - Filename extension (default is 'mpsim') - - - Attributes - ---------- - heading : str - Text string written to top of package input file. - - Methods - ------- - - See Also - -------- - - Notes - ----- - - Examples - -------- - - >>> import flopy - >>> m = flopy.modpath.Modpath() - >>> dis = flopy.modpath.ModpathSim(m) - - """ - - def __init__(self, model, mp_name_file='mp.nam', mp_list_file='mp.list', - option_flags=[1, 2, 1, 1, 1, 2, 2, 1, 2, 1, 1, 1], - ref_time=0, ref_time_per_stp=[0, 0, 1.0], stop_time=None, - group_name=['group_1'], group_placement=[[1, 1, 1, 0, 1, 1]], - release_times=[[1, 1]], - group_region=[[1, 1, 1, 1, 1, 1]], mask_nlay=[1], - mask_layer=[1], mask_1lay=[1], face_ct=[1], - ifaces=[[6, 1, 1]], part_ct=[[1, 1, 1]], - time_ct=1, release_time_incr=1, time_pts=[1], - particle_cell_cnt=[[2, 2, 2]], - cell_bd_ct=1, bud_loc=[[1, 1, 1, 1]], trace_id=1, stop_zone=1, - zone=1, retard_fac=1.0, retard_fcCB=1.0, strt_file=None, - extension='mpsim'): - - # Call ancestor's init to set self.parent, extension, name and unit number - Package.__init__(self, model, extension, 'MPSIM', 32) - nrow, ncol, nlay, nper = self.parent.mf.nrow_ncol_nlay_nper - - self.heading1 = '# MPSIM for Modpath, generated by Flopy.' - self.heading2 = '#' - self.mp_name_file = '{}.{}'.format(model.name, 'mpnam') - self.mp_list_file = '{}.{}'.format(model.name, 'mplst') - options_list = ['SimulationType', 'TrackingDirection', - 'WeakSinkOption', 'WeakSourceOption', - 'ReferenceTimeOption', 'StopOption', - 'ParticleGenerationOption', 'TimePointOption', - 'BudgetOutputOption', 'ZoneArrayOption', - 'RetardationOption', - 'AdvectiveObservationsOption'] - self.option_flags = option_flags - options_dict = dict(list(zip(options_list, option_flags))) - self.options_dict = options_dict - self.endpoint_file = '{}.{}'.format(model.name, 'mpend') - self.pathline_file = '{}.{}'.format(model.name, 'mppth') - self.time_ser_file = '{}.{}'.format(model.name, 'mp.tim_ser') - self.advobs_file = '{}.{}'.format(model.name, '.mp.advobs') - self.ref_time = ref_time - self.ref_time_per_stp = ref_time_per_stp - self.stop_time = stop_time - self.group_ct = len(group_name) - self.group_name = group_name - self.group_placement = group_placement - self.release_times = release_times - self.group_region = group_region - self.mask_nlay = mask_nlay - self.mask_layer = mask_layer - self.mask_1lay = mask_1lay - self.face_ct = face_ct - self.ifaces = ifaces - self.part_ct = part_ct - self.strt_file = '{}.{}'.format(model.name, 'loc') - if strt_file is not None: - self.strt_file = strt_file - self.time_ct = time_ct - self.release_time_incr = release_time_incr - self.time_pts = time_pts - self.particle_cell_cnt = particle_cell_cnt - self.cell_bd_ct = cell_bd_ct - self.bud_loc = bud_loc - self.trace_file = '{}.{}'.format(model.name, 'trace_file.txt') - self.trace_id = trace_id - self.stop_zone = stop_zone - self.zone = Util3d(model, (nlay, nrow, ncol), np.int32, \ - zone, name='zone', locat=self.unit_number[0]) - self.retard_fac = retard_fac - self.retard_fcCB = retard_fcCB - - # self.mask_nlay = Util3d(model,(nlay,nrow,ncol),np.int32,\ - # mask_nlay,name='mask_nlay',locat=self.unit_number[0]) - # self.mask_1lay = Util3d(model,(nlay,nrow,ncol),np.int32,\ - # mask_1lay,name='mask_1lay',locat=self.unit_number[0]) - # self.stop_zone = Util3d(model,(nlay,nrow,ncol),np.int32,\ - # stop_zone,name='stop_zone',locat=self.unit_number[0]) - # self.retard_fac = Util3d(model,(nlay,nrow,ncol),np.float32,\ - # retard_fac,name='retard_fac',locat=self.unit_number[0]) - # self.retard_fcCB = Util3d(model,(nlay,nrow,ncol),np.float32,\ - # retard_fcCB,name='retard_fcCB',locat=self.unit_number[0]) - - self.parent.add_package(self) - - def check(self, f=None, verbose=True, level=1, checktype=None): - """ - Check package data for common errors. - - Parameters - ---------- - f : str or file handle - String defining file name or file handle for summary file - of check method output. If a sting is passed a file handle - is created. If f is None, check method does not write - results to a summary file. (default is None) - verbose : bool - Boolean flag used to determine if check method results are - written to the screen - level : int - Check method analysis level. If level=0, summary checks are - performed. If level=1, full checks are performed. - - Returns - ------- - None - - Examples - -------- - """ - chk = self._get_check(f, verbose, level, checktype) - - # MODPATH apparently produces no output if stoptime > last timepoint - if self.options_dict['StopOption'] == 3 and self.options_dict[ - 'TimePointOption'] == 3: - if self.time_pts[-1] < self.stop_time: - chk._add_to_summary(type='Error', value=self.stop_time, - desc='Stop time greater than last TimePoint') - else: - chk.append_passed('Valid stop time') - chk.summarize() - return chk - - def write_file(self): - """ - Write the package file - - Returns - ------- - None - - """ - # item numbers and CamelCase variable names correspond to Modpath 6 documentation - nrow, ncol, nlay, nper = self.parent.mf.nrow_ncol_nlay_nper - - f_sim = open(self.fn_path, 'w') - # item 0 - f_sim.write('#{0:s}\n#{1:s}\n'.format(self.heading1, self.heading2)) - # item 1 - f_sim.write('{0:s}\n'.format(self.mp_name_file)) - # item 2 - f_sim.write('{0:s}\n'.format(self.mp_list_file)) - # item 3 - for i in range(12): - f_sim.write('{0:4d}'.format(self.option_flags[i])) - f_sim.write('\n') - - # item 4 - f_sim.write('{0:s}\n'.format(self.endpoint_file)) - # item 5 - if self.options_dict['SimulationType'] == 2: - f_sim.write('{0:s}\n'.format(self.pathline_file)) - # item 6 - if self.options_dict['SimulationType'] == 3: - f_sim.write('{0:s}\n'.format(self.time_ser_file)) - # item 7 - if self.options_dict['AdvectiveObservationsOption'] == 2 and \ - self.option_dict['SimulationType'] == 3: - f_sim.write('{0:s}\n'.format(self.advobs_file)) - - # item 8 - if self.options_dict['ReferenceTimeOption'] == 1: - f_sim.write('{0:f}\n'.format(self.ref_time)) - # item 9 - if self.options_dict['ReferenceTimeOption'] == 2: - Period, Step, TimeFraction = self.ref_time_per_stp - f_sim.write( - '{0:d} {1:d} {2:f}\n'.format(Period + 1, Step + 1, - TimeFraction)) - - # item 10 - if self.options_dict['StopOption'] == 3: - f_sim.write('{0:f}\n'.format(self.stop_time)) - - if self.options_dict['ParticleGenerationOption'] == 1: - # item 11 - f_sim.write('{0:d}\n'.format(self.group_ct)) - for i in range(self.group_ct): - # item 12 - f_sim.write('{0:s}\n'.format(self.group_name[i])) - # item 13 - Grid, GridCellRegionOption, PlacementOption, ReleaseStartTime, ReleaseOption, CHeadOption = \ - self.group_placement[i] - f_sim.write( - '{0:d} {1:d} {2:d} {3:f} {4:d} {5:d}\n'.format(Grid, - GridCellRegionOption, - PlacementOption, - ReleaseStartTime, - ReleaseOption, - CHeadOption)) - # item 14 - if ReleaseOption == 2: - ReleasePeriodLength, ReleaseEventCount = \ - self.release_times[i] - f_sim.write('{0:f} {1:d}\n'.format(ReleasePeriodLength, - ReleaseEventCount)) - # item 15 - if GridCellRegionOption == 1: - MinLayer, MinRow, MinColumn, MaxLayer, MaxRow, MaxColumn = \ - self.group_region[i] - f_sim.write('{0:d} {1:d} {2:d} {3:d} {4:d} {5:d}\n'.format( - MinLayer + 1, MinRow + 1, MinColumn + 1, - MaxLayer + 1, MaxRow + 1, MaxColumn + 1)) - # item 16 - if GridCellRegionOption == 2: - f_sim.write(self.mask_nlay[i].get_file_entry()) - # item 17 - if GridCellRegionOption == 3: - f_sim.write('{0:s}\n'.format(self.mask_layer[i])) - # item 18 - f_sim.write(self.mask_1lay[i].get_file_entry()) - # item 19 and 20 - if PlacementOption == 1: - f_sim.write('{0:d}\n'.format(self.face_ct[i])) - # item 20 - for j in range(self.face_ct[i]): - IFace, ParticleRowCount, ParticleColumnCount = \ - self.ifaces[i][j] - f_sim.write('{0:d} {1:d} {2:d} \n'.format(IFace, - ParticleRowCount, - ParticleColumnCount)) - # item 21 - elif PlacementOption == 2: - ParticleLayerCount, ParticleRowCount, ParticleColumnCount = \ - self.particle_cell_cnt[i] - f_sim.write( - '{0:d} {1:d} {2:d} \n'.format(ParticleLayerCount, - ParticleRowCount, - ParticleColumnCount)) - - # item 22 - if self.options_dict['ParticleGenerationOption'] == 2: - f_sim.write('{0:s}\n'.format(self.strt_file)) - - if self.options_dict['TimePointOption'] != 1: - # item 23 - if self.options_dict['TimePointOption'] == 2 or \ - self.options_dict['TimePointOption'] == 3: - f_sim.write('{0:d}\n'.format(self.time_ct)) - # item 24 - if self.options_dict['TimePointOption'] == 2: - f_sim.write('{0:f}\n'.format(self.release_time_incr)) - # item 25 - if self.options_dict['TimePointOption'] == 3: - for r in range(self.time_ct): - f_sim.write('{0:f}\n'.format(self.time_pts[r])) - - if self.options_dict['BudgetOutputOption'] != 1 or \ - self.options_dict['BudgetOutputOption'] != 2: - # item 26 - if self.options_dict['BudgetOutputOption'] == 3: - f_sim.write('{0:d}\n'.format(self.cell_bd_ct)) - # item 27 - for k in range(self.cell_bd_ct): - Grid, Layer, Row, Column = self.bud_loc[k] - f_sim.write( - '{0:d} {1:d} {2:d} {3:d} \n'.format(Grid, Layer + 1, - Row + 1, - Column + 1)) - if self.options_dict['BudgetOutputOption'] == 4: - # item 28 - f_sim.write('{0:s}\n'.format(self.trace_file)) - # item 29 - f_sim.write('{0:s}\n'.format(self.trace_id)) - - if self.options_dict['ZoneArrayOption'] != 1: - # item 30 - f_sim.write('{0:d}\n'.format(self.stop_zone)) - # item 31 - f_sim.write(self.zone.get_file_entry()) - - if self.options_dict['RetardationOption'] != 1: - # item 32 - f_sim.write(self.retard_fac.get_file_entry()) - # item 33 - f_sim.write(self.retard_fcCB.get_file_entry()) - - f_sim.close() - - -class StartingLocationsFile(Package): - """ - Class for working with MODPATH Starting Locations file for particles. - - Parameters - ---------- - model : Modpath object - The model object (of type :class:`flopy.modpath.mp.Modpath`) to which - this package will be added. - inputstyle : 1 - Input style described in MODPATH6 manual (currently only input style 1 is supported) - extension : string - Filename extension (default is 'loc') - """ - - def __init__(self, model, - inputstyle=1, - extension='loc', - verbose=False): - - Package.__init__(self, model, extension, 'LOC', 33) - - self.model = model - self.heading = '# Starting locations file for Modpath, generated by Flopy.' - self.input_style = inputstyle - if inputstyle != 1: - raise NotImplementedError - self.data = self.get_empty_starting_locations_data(0) - self.extension = extension - - self.parent.add_package( - self) # add to package list so location are written with other ModPath files - - @staticmethod - def get_dtypes(): - """ - Build numpy dtype for the MODPATH 6 starting locations file. - """ - dtype = np.dtype([("particleid", np.int), ("particlegroup", np.int), - ('initialgrid', np.int), - ('k0', np.int), ('i0', np.int), - ('j0', np.int), ('xloc0', np.float32), - ('yloc0', np.float32), ('zloc0', np.float32), - ('initialtime', np.float32), - ('label', '|S40'), ('groupname', '|S16')]) - return dtype - - @staticmethod - def get_empty_starting_locations_data(npt=0, - default_xloc0=0.5, default_yloc0=0.5, - default_zloc0=0.): - """get an empty recarray for particle starting location info. - - Parameters - ---------- - npt : int - Number of particles. Particles in array will be numbered consecutively from 1 to npt. - - """ - dtype = StartingLocationsFile.get_dtypes() - d = np.zeros(npt, dtype=dtype) - d = d.view(np.recarray) - d['particleid'] = np.arange(1, npt + 1) - d['particlegroup'] = 1 - d['initialgrid'] = 1 - d['xloc0'] = default_xloc0 - d['yloc0'] = default_yloc0 - d['zloc0'] = default_zloc0 - d['groupname'] = 'group1' - return d - - def write_file(self, data=None, float_format='{:.8f}'): - - if data is None: - data = self.data - if len(data) == 0: - print('No data to write!') - return - data = data.copy() - data['k0'] += 1 - data['i0'] += 1 - data['j0'] += 1 - with open(self.fn_path, 'w') as output: - output.write('{}\n'.format(self.heading)) - output.write('{:d}\n'.format(self.input_style)) - groups = np.unique(data.groupname) - ngroups = len(groups) - output.write('{:d}\n'.format(ngroups)) - for g in groups: - npt = len(data[data.groupname == g]) - output.write('{}\n{:d}\n'.format(g.decode(), npt)) - txt = '' - for p in data: - txt += '{:d} {:d} {:d} {:d} {:d} {:d}'.format(*list(p)[:6]) - fmtstr = ' {0} {0} {0} {0} '.format(float_format) - txt += fmtstr.format(*list(p)[6:10]) - txt += '{}\n'.format(p[10].decode()) - output.write(txt) +""" +mpsim module. Contains the ModpathSim class. Note that the user can access +the ModpathSim class as `flopy.modpath.ModpathSim`. + +Additional information for this MODFLOW/MODPATH package can be found at the `Online +MODFLOW Guide +`_. + +""" +import numpy as np +from ..pakbase import Package +from ..utils import Util3d + + +class ModpathSim(Package): + """ + MODPATH Simulation File Package Class. + + Parameters + ---------- + model : model object + The model object (of type :class:`flopy.modpath.mp.Modpath`) to which + this package will be added. + extension : string + Filename extension (default is 'mpsim') + + + Attributes + ---------- + heading : str + Text string written to top of package input file. + + Methods + ------- + + See Also + -------- + + Notes + ----- + + Examples + -------- + + >>> import flopy + >>> m = flopy.modpath.Modpath() + >>> dis = flopy.modpath.ModpathSim(m) + + """ + + def __init__(self, model, mp_name_file='mp.nam', mp_list_file='mp.list', + option_flags=[1, 2, 1, 1, 1, 2, 2, 1, 2, 1, 1, 1], + ref_time=0, ref_time_per_stp=[0, 0, 1.0], stop_time=None, + group_name=['group_1'], group_placement=[[1, 1, 1, 0, 1, 1]], + release_times=[[1, 1]], + group_region=[[1, 1, 1, 1, 1, 1]], mask_nlay=[1], + mask_layer=[1], mask_1lay=[1], face_ct=[1], + ifaces=[[6, 1, 1]], part_ct=[[1, 1, 1]], + time_ct=1, release_time_incr=1, time_pts=[1], + particle_cell_cnt=[[2, 2, 2]], + cell_bd_ct=1, bud_loc=[[1, 1, 1, 1]], trace_id=1, stop_zone=1, + zone=1, retard_fac=1.0, retard_fcCB=1.0, strt_file=None, + extension='mpsim'): + + # Call ancestor's init to set self.parent, extension, name and unit number + Package.__init__(self, model, extension, 'MPSIM', 32) + nrow, ncol, nlay, nper = self.parent.mf.nrow_ncol_nlay_nper + + self.heading1 = '# MPSIM for Modpath, generated by Flopy.' + self.heading2 = '#' + self.mp_name_file = '{}.{}'.format(model.name, 'mpnam') + self.mp_list_file = '{}.{}'.format(model.name, 'mplst') + options_list = ['SimulationType', 'TrackingDirection', + 'WeakSinkOption', 'WeakSourceOption', + 'ReferenceTimeOption', 'StopOption', + 'ParticleGenerationOption', 'TimePointOption', + 'BudgetOutputOption', 'ZoneArrayOption', + 'RetardationOption', + 'AdvectiveObservationsOption'] + self.option_flags = option_flags + options_dict = dict(list(zip(options_list, option_flags))) + self.options_dict = options_dict + self.endpoint_file = '{}.{}'.format(model.name, 'mpend') + self.pathline_file = '{}.{}'.format(model.name, 'mppth') + self.time_ser_file = '{}.{}'.format(model.name, 'mp.tim_ser') + self.advobs_file = '{}.{}'.format(model.name, '.mp.advobs') + self.ref_time = ref_time + self.ref_time_per_stp = ref_time_per_stp + self.stop_time = stop_time + self.group_ct = len(group_name) + self.group_name = group_name + self.group_placement = group_placement + self.release_times = release_times + self.group_region = group_region + self.mask_nlay = mask_nlay + self.mask_layer = mask_layer + self.mask_1lay = mask_1lay + self.face_ct = face_ct + self.ifaces = ifaces + self.part_ct = part_ct + self.strt_file = '{}.{}'.format(model.name, 'loc') + if strt_file is not None: + self.strt_file = strt_file + self.time_ct = time_ct + self.release_time_incr = release_time_incr + self.time_pts = time_pts + self.particle_cell_cnt = particle_cell_cnt + self.cell_bd_ct = cell_bd_ct + self.bud_loc = bud_loc + self.trace_file = '{}.{}'.format(model.name, 'trace_file.txt') + self.trace_id = trace_id + self.stop_zone = stop_zone + self.zone = Util3d(model, (nlay, nrow, ncol), np.int32, \ + zone, name='zone', locat=self.unit_number[0]) + self.retard_fac = retard_fac + self.retard_fcCB = retard_fcCB + + # self.mask_nlay = Util3d(model,(nlay,nrow,ncol),np.int32,\ + # mask_nlay,name='mask_nlay',locat=self.unit_number[0]) + # self.mask_1lay = Util3d(model,(nlay,nrow,ncol),np.int32,\ + # mask_1lay,name='mask_1lay',locat=self.unit_number[0]) + # self.stop_zone = Util3d(model,(nlay,nrow,ncol),np.int32,\ + # stop_zone,name='stop_zone',locat=self.unit_number[0]) + # self.retard_fac = Util3d(model,(nlay,nrow,ncol),np.float32,\ + # retard_fac,name='retard_fac',locat=self.unit_number[0]) + # self.retard_fcCB = Util3d(model,(nlay,nrow,ncol),np.float32,\ + # retard_fcCB,name='retard_fcCB',locat=self.unit_number[0]) + + self.parent.add_package(self) + + def check(self, f=None, verbose=True, level=1, checktype=None): + """ + Check package data for common errors. + + Parameters + ---------- + f : str or file handle + String defining file name or file handle for summary file + of check method output. If a sting is passed a file handle + is created. If f is None, check method does not write + results to a summary file. (default is None) + verbose : bool + Boolean flag used to determine if check method results are + written to the screen + level : int + Check method analysis level. If level=0, summary checks are + performed. If level=1, full checks are performed. + + Returns + ------- + None + + Examples + -------- + """ + chk = self._get_check(f, verbose, level, checktype) + + # MODPATH apparently produces no output if stoptime > last timepoint + if self.options_dict['StopOption'] == 3 and self.options_dict[ + 'TimePointOption'] == 3: + if self.time_pts[-1] < self.stop_time: + chk._add_to_summary(type='Error', value=self.stop_time, + desc='Stop time greater than last TimePoint') + else: + chk.append_passed('Valid stop time') + chk.summarize() + return chk + + def write_file(self): + """ + Write the package file + + Returns + ------- + None + + """ + # item numbers and CamelCase variable names correspond to Modpath 6 documentation + nrow, ncol, nlay, nper = self.parent.mf.nrow_ncol_nlay_nper + + f_sim = open(self.fn_path, 'w') + # item 0 + f_sim.write('#{0:s}\n#{1:s}\n'.format(self.heading1, self.heading2)) + # item 1 + f_sim.write('{0:s}\n'.format(self.mp_name_file)) + # item 2 + f_sim.write('{0:s}\n'.format(self.mp_list_file)) + # item 3 + for i in range(12): + f_sim.write('{0:4d}'.format(self.option_flags[i])) + f_sim.write('\n') + + # item 4 + f_sim.write('{0:s}\n'.format(self.endpoint_file)) + # item 5 + if self.options_dict['SimulationType'] == 2: + f_sim.write('{0:s}\n'.format(self.pathline_file)) + # item 6 + if self.options_dict['SimulationType'] == 3: + f_sim.write('{0:s}\n'.format(self.time_ser_file)) + # item 7 + if self.options_dict['AdvectiveObservationsOption'] == 2 and \ + self.option_dict['SimulationType'] == 3: + f_sim.write('{0:s}\n'.format(self.advobs_file)) + + # item 8 + if self.options_dict['ReferenceTimeOption'] == 1: + f_sim.write('{0:f}\n'.format(self.ref_time)) + # item 9 + if self.options_dict['ReferenceTimeOption'] == 2: + Period, Step, TimeFraction = self.ref_time_per_stp + f_sim.write( + '{0:d} {1:d} {2:f}\n'.format(Period + 1, Step + 1, + TimeFraction)) + + # item 10 + if self.options_dict['StopOption'] == 3: + f_sim.write('{0:f}\n'.format(self.stop_time)) + + if self.options_dict['ParticleGenerationOption'] == 1: + # item 11 + f_sim.write('{0:d}\n'.format(self.group_ct)) + for i in range(self.group_ct): + # item 12 + f_sim.write('{0:s}\n'.format(self.group_name[i])) + # item 13 + Grid, GridCellRegionOption, PlacementOption, ReleaseStartTime, ReleaseOption, CHeadOption = \ + self.group_placement[i] + f_sim.write( + '{0:d} {1:d} {2:d} {3:f} {4:d} {5:d}\n'.format(Grid, + GridCellRegionOption, + PlacementOption, + ReleaseStartTime, + ReleaseOption, + CHeadOption)) + # item 14 + if ReleaseOption == 2: + ReleasePeriodLength, ReleaseEventCount = \ + self.release_times[i] + f_sim.write('{0:f} {1:d}\n'.format(ReleasePeriodLength, + ReleaseEventCount)) + # item 15 + if GridCellRegionOption == 1: + MinLayer, MinRow, MinColumn, MaxLayer, MaxRow, MaxColumn = \ + self.group_region[i] + f_sim.write('{0:d} {1:d} {2:d} {3:d} {4:d} {5:d}\n'.format( + MinLayer + 1, MinRow + 1, MinColumn + 1, + MaxLayer + 1, MaxRow + 1, MaxColumn + 1)) + # item 16 + if GridCellRegionOption == 2: + f_sim.write(self.mask_nlay[i].get_file_entry()) + # item 17 + if GridCellRegionOption == 3: + f_sim.write('{0:s}\n'.format(self.mask_layer[i])) + # item 18 + f_sim.write(self.mask_1lay[i].get_file_entry()) + # item 19 and 20 + if PlacementOption == 1: + f_sim.write('{0:d}\n'.format(self.face_ct[i])) + # item 20 + for j in range(self.face_ct[i]): + IFace, ParticleRowCount, ParticleColumnCount = \ + self.ifaces[i][j] + f_sim.write('{0:d} {1:d} {2:d} \n'.format(IFace, + ParticleRowCount, + ParticleColumnCount)) + # item 21 + elif PlacementOption == 2: + ParticleLayerCount, ParticleRowCount, ParticleColumnCount = \ + self.particle_cell_cnt[i] + f_sim.write( + '{0:d} {1:d} {2:d} \n'.format(ParticleLayerCount, + ParticleRowCount, + ParticleColumnCount)) + + # item 22 + if self.options_dict['ParticleGenerationOption'] == 2: + f_sim.write('{0:s}\n'.format(self.strt_file)) + + if self.options_dict['TimePointOption'] != 1: + # item 23 + if self.options_dict['TimePointOption'] == 2 or \ + self.options_dict['TimePointOption'] == 3: + f_sim.write('{0:d}\n'.format(self.time_ct)) + # item 24 + if self.options_dict['TimePointOption'] == 2: + f_sim.write('{0:f}\n'.format(self.release_time_incr)) + # item 25 + if self.options_dict['TimePointOption'] == 3: + for r in range(self.time_ct): + f_sim.write('{0:f}\n'.format(self.time_pts[r])) + + if self.options_dict['BudgetOutputOption'] != 1 or \ + self.options_dict['BudgetOutputOption'] != 2: + # item 26 + if self.options_dict['BudgetOutputOption'] == 3: + f_sim.write('{0:d}\n'.format(self.cell_bd_ct)) + # item 27 + for k in range(self.cell_bd_ct): + Grid, Layer, Row, Column = self.bud_loc[k] + f_sim.write( + '{0:d} {1:d} {2:d} {3:d} \n'.format(Grid, Layer + 1, + Row + 1, + Column + 1)) + if self.options_dict['BudgetOutputOption'] == 4: + # item 28 + f_sim.write('{0:s}\n'.format(self.trace_file)) + # item 29 + f_sim.write('{0:s}\n'.format(self.trace_id)) + + if self.options_dict['ZoneArrayOption'] != 1: + # item 30 + f_sim.write('{0:d}\n'.format(self.stop_zone)) + # item 31 + f_sim.write(self.zone.get_file_entry()) + + if self.options_dict['RetardationOption'] != 1: + # item 32 + f_sim.write(self.retard_fac.get_file_entry()) + # item 33 + f_sim.write(self.retard_fcCB.get_file_entry()) + + f_sim.close() + + +class StartingLocationsFile(Package): + """ + Class for working with MODPATH Starting Locations file for particles. + + Parameters + ---------- + model : Modpath object + The model object (of type :class:`flopy.modpath.mp.Modpath`) to which + this package will be added. + inputstyle : 1 + Input style described in MODPATH6 manual (currently only input style 1 is supported) + extension : string + Filename extension (default is 'loc') + """ + + def __init__(self, model, + inputstyle=1, + extension='loc', + verbose=False): + + Package.__init__(self, model, extension, 'LOC', 33) + + self.model = model + self.heading = '# Starting locations file for Modpath, generated by Flopy.' + self.input_style = inputstyle + if inputstyle != 1: + raise NotImplementedError + self.data = self.get_empty_starting_locations_data(0) + self.extension = extension + + self.parent.add_package( + self) # add to package list so location are written with other ModPath files + + @staticmethod + def get_dtypes(): + """ + Build numpy dtype for the MODPATH 6 starting locations file. + """ + dtype = np.dtype([("particleid", np.int), ("particlegroup", np.int), + ('initialgrid', np.int), + ('k0', np.int), ('i0', np.int), + ('j0', np.int), ('xloc0', np.float32), + ('yloc0', np.float32), ('zloc0', np.float32), + ('initialtime', np.float32), + ('label', '|S40'), ('groupname', '|S16')]) + return dtype + + @staticmethod + def get_empty_starting_locations_data(npt=0, + default_xloc0=0.5, default_yloc0=0.5, + default_zloc0=0.): + """get an empty recarray for particle starting location info. + + Parameters + ---------- + npt : int + Number of particles. Particles in array will be numbered consecutively from 1 to npt. + + """ + dtype = StartingLocationsFile.get_dtypes() + d = np.zeros(npt, dtype=dtype) + d = d.view(np.recarray) + d['particleid'] = np.arange(1, npt + 1) + d['particlegroup'] = 1 + d['initialgrid'] = 1 + d['xloc0'] = default_xloc0 + d['yloc0'] = default_yloc0 + d['zloc0'] = default_zloc0 + d['groupname'] = 'group1' + return d + + def write_file(self, data=None, float_format='{:.8f}'): + + if data is None: + data = self.data + if len(data) == 0: + print('No data to write!') + return + data = data.copy() + data['k0'] += 1 + data['i0'] += 1 + data['j0'] += 1 + with open(self.fn_path, 'w') as output: + output.write('{}\n'.format(self.heading)) + output.write('{:d}\n'.format(self.input_style)) + groups = np.unique(data.groupname) + ngroups = len(groups) + output.write('{:d}\n'.format(ngroups)) + for g in groups: + npt = len(data[data.groupname == g]) + output.write('{}\n{:d}\n'.format(g.decode(), npt)) + txt = '' + for p in data: + txt += '{:d} {:d} {:d} {:d} {:d} {:d}'.format(*list(p)[:6]) + fmtstr = ' {0} {0} {0} {0} '.format(float_format) + txt += fmtstr.format(*list(p)[6:10]) + txt += '{}\n'.format(p[10].decode()) + output.write(txt) diff --git a/flopy/mt3d/__init__.py b/flopy/mt3d/__init__.py index 8ceb47ce59..00605ab750 100644 --- a/flopy/mt3d/__init__.py +++ b/flopy/mt3d/__init__.py @@ -1,12 +1,12 @@ -from .mt import Mt3dms -from .mtadv import Mt3dAdv -from .mtbtn import Mt3dBtn -from .mtdsp import Mt3dDsp -from .mtgcg import Mt3dGcg -from .mtphc import Mt3dPhc -from .mtrct import Mt3dRct -from .mtssm import Mt3dSsm -from .mttob import Mt3dTob -from .mtlkt import Mt3dLkt -from .mtsft import Mt3dSft +from .mt import Mt3dms +from .mtadv import Mt3dAdv +from .mtbtn import Mt3dBtn +from .mtdsp import Mt3dDsp +from .mtgcg import Mt3dGcg +from .mtphc import Mt3dPhc +from .mtrct import Mt3dRct +from .mtssm import Mt3dSsm +from .mttob import Mt3dTob +from .mtlkt import Mt3dLkt +from .mtsft import Mt3dSft from .mtuzt import Mt3dUzt \ No newline at end of file diff --git a/flopy/mt3d/mt.py b/flopy/mt3d/mt.py index 53c7716d0b..3f68822cd8 100644 --- a/flopy/mt3d/mt.py +++ b/flopy/mt3d/mt.py @@ -1,884 +1,884 @@ -import os -import sys -import numpy as np -from ..mbase import BaseModel -from ..pakbase import Package -from ..utils import mfreadnam -from .mtbtn import Mt3dBtn -from .mtadv import Mt3dAdv -from .mtdsp import Mt3dDsp -from .mtssm import Mt3dSsm -from .mtrct import Mt3dRct -from .mtgcg import Mt3dGcg -from .mttob import Mt3dTob -from .mtphc import Mt3dPhc -from .mtuzt import Mt3dUzt -from .mtsft import Mt3dSft -from .mtlkt import Mt3dLkt -from ..discretization.structuredgrid import StructuredGrid -from flopy.discretization.modeltime import ModelTime - - -class Mt3dList(Package): - """ - List package class - """ - - def __init__(self, model, extension='list', listunit=7): - # Call ancestor's init to set self.parent, extension, name and - # unit number - Package.__init__(self, model, extension, 'LIST', listunit) - # self.parent.add_package(self) This package is not added to the base - # model so that it is not included in get_name_file_entries() - return - - def __repr__(self): - return 'List package class' - - def write_file(self): - # Not implemented for list class - return - - -''' -class Mt3dms(BaseModel): - 'MT3DMS base class' - - def __init__(self, modelname='mt3dmstest', namefile_ext='nam', - modflowmodel=None, ftlfilename=None, - model_ws=None, external_path=None, verbose=False, - load=True, listunit=7, exe_name='mt3dms.exe', ): - BaseModel.__init__(self, modelname, namefile_ext, model_ws=model_ws, - exe_name=exe_name) - self.heading = '# Name file for MT3DMS, generated by Flopy.' - self.__mf = modflowmodel - self.lst = Mt3dList(self, listunit=listunit) - self.ftlfilename = ftlfilename - self.__adv = None - self.__btn = None - self.__dsp = None - self.__gcg = None - self.__rct = None - self.__ssm = None - self.array_free_format = False - self.external_path = external_path - self.external = False - self.external_fnames = [] - self.external_units = [] - self.external_binflag = [] - self.load = load - self.__next_ext_unit = 500 - if external_path is not None: - if os.path.exists(external_path): - print("Note: external_path " + str(external_path) + \ - " already exists") - # assert os.path.exists(external_path),'external_path does not exist' - else: - os.mkdir(external_path) - self.external = True - self.verbose = verbose - return - - def __repr__(self): - return 'MT3DMS model' - - def get_ncomp(self): - btn = self.get_package('BTN') - if (btn): - return btn.ncomp - else: - return 1 - - # function to encapsulate next_ext_unit attribute - def next_ext_unit(self): - self.__next_ext_unit += 1 - return self.__next_ext_unit - - def getadv(self): - if (self.__adv == None): - for p in (self.packagelist): - if isinstance(p, Mt3dAdv): - self.__adv = p - return self.__adv - - def getbtn(self): - if (self.__btn == None): - for p in (self.packagelist): - if isinstance(p, Mt3dBtn): - self.__btn = p - return self.__btn - - def getdsp(self): - if (self.__dsp == None): - for p in (self.packagelist): - if isinstance(p, Mt3dDsp): - self.__dsp = p - return self.__dsp - - def getgcg(self): - if (self.__gcg == None): - for p in (self.packagelist): - if isinstance(p, Mt3dGcg): - self.__gcg = p - return self.__gcg - - def getmf(self): - return self.__mf - - def getrct(self): - if (self.__rct == None): - for p in (self.packagelist): - if isinstance(p, Mt3dRct): - self.__rct = p - return self.__rct - - def getssm(self): - if (self.__ssm == None): - for p in (self.packagelist): - if isinstance(p, Mt3dSsm): - self.__ssm = p - return self.__ssm - - def write_name_file(self): - fn_path = os.path.join(self.model_ws, self.namefile) - f_nam = open(fn_path, 'w') - f_nam.write('%s\n' % (self.heading)) - f_nam.write('%s %3i %s\n' % (self.lst.name[0], self.lst.unit_number[0], - self.lst.file_name[0])) - if self.ftlfilename is not None: - f_nam.write('%s %3i %s\n' % ('FTL', 39, self.ftlfilename)) - f_nam.write('%s' % self.get_name_file_entries()) - for u, f in zip(self.external_units, self.external_fnames): - f_nam.write('DATA {0:3d} '.format(u) + f + '\n') - f_nam.close() - - adv = property(getadv) # Property has no setter, so read-only - btn = property(getbtn) # Property has no setter, so read-only - dsp = property(getdsp) # Property has no setter, so read-only - gcg = property(getgcg) # Property has no setter, so read-only - mf = property(getmf) # Property has no setter, so read-only - rct = property(getrct) # Property has no setter, so read-only - ssm = property(getssm) # Property has no setter, so read-only - ncomp = property(get_ncomp) -''' - - -class Mt3dms(BaseModel): - """ - MT3DMS Model Class. - - Parameters - ---------- - modelname : string, optional - Name of model. This string will be used to name the MODFLOW input - that are created with write_model. (the default is 'mt3dtest') - namefile_ext : string, optional - Extension for the namefile (the default is 'nam') - modflowmodel : flopy.modflow.mf.Modflow - This is a flopy Modflow model object upon which this Mt3dms model - is based. (the default is None) - version : string, optional - Version of MT3DMS to use (the default is 'mt3dms'). - exe_name : string, optional - The name of the executable to use (the default is - 'mt3dms.exe'). - listunit : integer, optional - Unit number for the list file (the default is 2). - model_ws : string, optional - model workspace. Directory name to create model data sets. - (default is the present working directory). - external_path : string - Location for external files (default is None). - verbose : boolean, optional - Print additional information to the screen (default is False). - load : boolean, optional - (default is True). - silent : integer - (default is 0) - - Attributes - ---------- - - Methods - ------- - - See Also - -------- - - Notes - ----- - - Examples - -------- - - >>> import flopy - >>> m = flopy.mt3d.mt.Mt3dms() - - """ - - def __init__(self, modelname='mt3dtest', namefile_ext='nam', - modflowmodel=None, ftlfilename="mt3d_link.ftl", ftlfree=False, - version='mt3dms', exe_name='mt3dms.exe', - structured=True, listunit=None, ftlunit=None, - model_ws='.', external_path=None, - verbose=False, load=True, silent=0): - - # Call constructor for parent object - BaseModel.__init__(self, modelname, namefile_ext, exe_name, model_ws, - structured=structured, verbose=verbose) - - # Set attributes - self.version_types = {'mt3dms': 'MT3DMS', 'mt3d-usgs': 'MT3D-USGS'} - - self.set_version(version.lower()) - - if listunit is None: - listunit = 16 - - if ftlunit is None: - ftlunit = 10 - - self.lst = Mt3dList(self, listunit=listunit) - self.mf = modflowmodel - self.ftlfilename = ftlfilename - self.ftlfree = ftlfree - self.ftlunit = ftlunit - self.free_format = None - - # Check whether specified ftlfile exists in model directory; if not, - # warn user - if os.path.isfile(os.path.join(self.model_ws, - str(modelname + '.' + namefile_ext))): - with open(os.path.join(self.model_ws, str( - modelname + '.' + namefile_ext))) as nm_file: - for line in nm_file: - if line[0:3] == 'FTL': - ftlfilename = line.strip().split()[2] - break - if ftlfilename is None: - print("User specified FTL file does not exist in model directory") - print("MT3D will not work without a linker file") - else: - if os.path.isfile(os.path.join(self.model_ws, ftlfilename)): - # Check that the FTL present in the directory is of the format - # specified by the user, i.e., is same as ftlfree - # Do this by checking whether the first non-blank character is - # an apostrophe. - # If code lands here, then ftlfilename exists, open and read - # first 4 characters - f = open(os.path.join(self.model_ws, ftlfilename), 'rb') - c = f.read(4) - if isinstance(c, bytes): - c = c.decode() - - # if first non-blank char is an apostrophe, then formatted, - # otherwise binary - if (c.strip()[0] == "'" and self.ftlfree) or \ - (c.strip()[0] != "'" and not self.ftlfree): - pass - else: - msg = "Specified value of ftlfree conflicts with FTL " + \ - "file format" - print(msg) - msg = 'Switching ftlfree from ' + \ - '{} '.format(str(self.ftlfree)) + \ - 'to {}'.format(str(not self.ftlfree)) - print(msg) - self.ftlfree = not self.ftlfree # Flip the bool - - # external option stuff - self.array_free_format = False - self.array_format = 'mt3d' - self.external_fnames = [] - self.external_units = [] - self.external_binflag = [] - self.external = False - self.load = load - # the starting external data unit number - self._next_ext_unit = 2000 - if external_path is not None: - # assert model_ws == '.', "ERROR: external cannot be used " + \ - # "with model_ws" - - # external_path = os.path.join(model_ws, external_path) - if os.path.exists(external_path): - print("Note: external_path " + str(external_path) + - " already exists") - # assert os.path.exists(external_path),'external_path does not exist' - else: - os.mkdir(external_path) - self.external = True - self.external_path = external_path - self.verbose = verbose - self.silent = silent - - # Create a dictionary to map package with package object. - # This is used for loading models. - self.mfnam_packages = { - 'btn': Mt3dBtn, - 'adv': Mt3dAdv, - 'dsp': Mt3dDsp, - 'ssm': Mt3dSsm, - 'rct': Mt3dRct, - 'gcg': Mt3dGcg, - 'tob': Mt3dTob, - 'phc': Mt3dPhc, - 'lkt': Mt3dLkt, - 'sft': Mt3dSft, - 'uzt2': Mt3dUzt - } - return - - def __repr__(self): - return 'MT3DMS model' - - @property - def modeltime(self): - # build model time - data_frame = {'perlen': self.mf.dis.perlen.array, - 'nstp': self.mf.dis.nstp.array, - 'tsmult': self.mf.dis.tsmult.array} - self._model_time = ModelTime(data_frame, - self.mf.dis.itmuni_dict[ - self.mf.dis.itmuni], - self.dis.start_datetime, - self.dis.steady.array) - return self._model_time - - @property - def modelgrid(self): - if not self._mg_resync: - return self._modelgrid - - if self.btn is not None: - ibound = self.btn.icbund.array - delc = self.btn.delc.array - delr = self.btn.delr.array - top = self.btn.htop.array - botm = np.subtract(top, self.btn.dz.array.cumsum(axis=0)) - nlay = self.btn.nlay - else: - delc = self.mf.dis.delc.array - delr = self.mf.dis.delr.array - top = self.mf.dis.top.array - botm = self.mf.dis.botm.array - nlay = self.mf.nlay - if self.mf.bas6 is not None: - ibound = self.mf.bas6.ibound.array - else: - ibound = None - # build grid - self._modelgrid = StructuredGrid(delc=delc, - delr=delr, - top=top, - botm=botm, - idomain=ibound, - proj4=self._modelgrid.proj4, - epsg=self._modelgrid.epsg, - xoff=self._modelgrid.xoffset, - yoff=self._modelgrid.yoffset, - angrot=self._modelgrid.angrot, - nlay=nlay) - - # resolve offsets - xoff = self._modelgrid.xoffset - if xoff is None: - if self._xul is not None: - xoff = self._modelgrid._xul_to_xll(self._xul) - else: - xoff = self.mf._modelgrid.xoffset - if xoff is None: - # incase mf._modelgrid.xoffset is not set but mf._xul is - if self.mf._xul is not None: - xoff = self._modelgrid._xul_to_xll(self.mf._xul) - else: - xoff = 0.0 - yoff = self._modelgrid.yoffset - if yoff is None: - if self._yul is not None: - yoff = self._modelgrid._yul_to_yll(self._yul) - else: - yoff = self.mf._modelgrid.yoffset - if yoff is None: - # incase mf._modelgrid.yoffset is not set but mf._yul is - if self.mf._yul is not None: - yoff = self._modelgrid._yul_to_yll(self.mf._yul) - else: - yoff = 0.0 - proj4 = self._modelgrid.proj4 - if proj4 is None: - proj4 = self.mf._modelgrid.proj4 - epsg = self._modelgrid.epsg - if epsg is None: - epsg = self.mf._modelgrid.epsg - angrot = self._modelgrid.angrot - if angrot is None or angrot == 0.0: # angrot normally defaulted to 0.0 - if self.mf._modelgrid.angrot is not None: - angrot = self.mf._modelgrid.angrot - else: - angrot = 0.0 - - self._modelgrid.set_coord_info(xoff, yoff, angrot, epsg, proj4) - self._mg_resync = not self._modelgrid.is_complete - return self._modelgrid - - @property - def solver_tols(self): - if self.gcg is not None: - return self.gcg.cclose, -999 - return None - - @property - def sr(self): - if self.mf is not None: - return self.mf.sr - return None - - @property - def nlay(self): - if (self.btn): - return self.btn.nlay - else: - return 0 - - @property - def nrow(self): - if (self.btn): - return self.btn.nrow - else: - return 0 - - @property - def ncol(self): - if (self.btn): - return self.btn.ncol - else: - return 0 - - @property - def nper(self): - if (self.btn): - return self.btn.nper - else: - return 0 - - @property - def ncomp(self): - if (self.btn): - return self.btn.ncomp - else: - return 1 - - @property - def mcomp(self): - if (self.btn): - return self.btn.mcomp - else: - return 1 - - def get_nrow_ncol_nlay_nper(self): - if (self.btn): - return self.btn.nrow, self.btn.ncol, self.btn.nlay, self.btn.nper - else: - return 0, 0, 0, 0 - - # Property has no setter, so read-only - nrow_ncol_nlay_nper = property(get_nrow_ncol_nlay_nper) - - def write_name_file(self): - """ - Write the name file. - - """ - fn_path = os.path.join(self.model_ws, self.namefile) - f_nam = open(fn_path, 'w') - f_nam.write('{}\n'.format(self.heading)) - f_nam.write('{:14s} {:5d} {}\n'.format(self.lst.name[0], - self.lst.unit_number[0], - self.lst.file_name[0])) - if self.ftlfilename is not None: - ftlfmt = '' - if self.ftlfree: - ftlfmt = 'FREE' - f_nam.write('{:14s} {:5d} {} {}\n'.format('FTL', self.ftlunit, - self.ftlfilename, - ftlfmt)) - # write file entries in name file - f_nam.write('{}'.format(self.get_name_file_entries())) - - # write the external files - for u, f in zip(self.external_units, self.external_fnames): - f_nam.write('DATA {0:5d} '.format(u) + f + '\n') - - # write the output files - for u, f, b in zip(self.output_units, self.output_fnames, - self.output_binflag): - if u == 0: - continue - if b: - f_nam.write( - 'DATA(BINARY) {0:5d} '.format(u) + f + ' REPLACE\n') - else: - f_nam.write('DATA {0:5d} '.format(u) + f + '\n') - - f_nam.close() - return - - def load_results(self, **kwargs): - return - - @staticmethod - def load(f, version='mt3dms', exe_name='mt3dms.exe', verbose=False, - model_ws='.', load_only=None, forgive=False, modflowmodel=None): - """ - Load an existing model. - - Parameters - ---------- - f : string - Full path and name of MT3D name file. - - version : string - The version of MT3D (mt3dms, or mt3d-usgs) - (default is mt3dms) - - exe_name : string - The name of the executable to use if this loaded model is run. - (default is mt3dms.exe) - - verbose : bool - Write information on the load process if True. - (default is False) - - model_ws : string - The path for the model workspace. - (default is the current working directory '.') - - load_only : list of strings - Filetype(s) to load (e.g. ['btn', 'adv']) - (default is None, which means that all will be loaded) - - forgive : bool, optional - Option to raise exceptions on package load failure, which can be - useful for debugging. Default False. - - modflowmodel : flopy.modflow.mf.Modflow - This is a flopy Modflow model object upon which this Mt3dms - model is based. (the default is None) - - Returns - ------- - mt : flopy.mt3d.mt.Mt3dms - flopy Mt3d model object - - Notes - ----- - The load method does not retain the name for the MODFLOW-generated - FTL file. This can be added manually after the MT3D model has been - loaded. The syntax for doing this manually is - mt.ftlfilename = 'example.ftl' - - Examples - -------- - - >>> import flopy - >>> f = 'example.nam' - >>> mt = flopy.mt3d.mt.Mt3dms.load(f) - >>> mt.ftlfilename = 'example.ftl' - - """ - modelname, ext = os.path.splitext(f) - modelname_extension = ext[1:] # without '.' - - if verbose: - sys.stdout.write('\nCreating new model with name: {}\n{}\n\n'. - format(modelname, 50 * '-')) - mt = Mt3dms(modelname=modelname, namefile_ext=modelname_extension, - version=version, exe_name=exe_name, - verbose=verbose, model_ws=model_ws, - modflowmodel=modflowmodel) - files_successfully_loaded = [] - files_not_loaded = [] - - # read name file - namefile_path = os.path.join(mt.model_ws, f) - if not os.path.isfile(namefile_path): - raise IOError('cannot find name file: ' + str(namefile_path)) - try: - ext_unit_dict = mfreadnam.parsenamefile( - namefile_path, mt.mfnam_packages, verbose=verbose) - except Exception as e: - # print("error loading name file entries from file") - # print(str(e)) - # return None - raise Exception( - "error loading name file entries from file:\n" + str(e)) - - if mt.verbose: - print('\n{}\nExternal unit dictionary:\n{}\n{}\n'. - format(50 * '-', ext_unit_dict, 50 * '-')) - - # reset unit number for list file - unitnumber = None - for key, value in ext_unit_dict.items(): - if value.filetype == 'LIST': - unitnumber = key - filepth = os.path.basename(value.filename) - if unitnumber == 'LIST': - unitnumber = 16 - if unitnumber is not None: - mt.lst.unit_number = [unitnumber] - mt.lst.file_name = [filepth] - - # set ftl information - unitnumber = None - for key, value in ext_unit_dict.items(): - if value.filetype == 'FTL': - unitnumber = key - filepth = os.path.basename(value.filename) - if unitnumber == 'FTL': - unitnumber = 10 - if unitnumber is not None: - mt.ftlunit = unitnumber - mt.ftlfilename = filepth - - # load btn - btn = None - btn_key = None - for key, item in ext_unit_dict.items(): - if item.filetype.lower() == "btn": - btn = item - btn_key = key - break - - if btn is None: - return None - - try: - pck = btn.package.load(btn.filename, mt, - ext_unit_dict=ext_unit_dict) - except Exception as e: - raise Exception('error loading BTN: {0}'.format(str(e))) - files_successfully_loaded.append(btn.filename) - if mt.verbose: - sys.stdout.write(' {:4s} package load...success\n' - .format(pck.name[0])) - ext_unit_dict.pop(btn_key).filehandle.close() - ncomp = mt.btn.ncomp - # reserved unit numbers for .ucn, s.ucn, .obs, .mas, .cnf - poss_output_units = set(list(range(201, 201 + ncomp)) + - list(range(301, 301 + ncomp)) + - list(range(401, 401 + ncomp)) + - list(range(601, 601 + ncomp)) + [17]) - if load_only is None: - load_only = [] - for key, item in ext_unit_dict.items(): - load_only.append(item.filetype) - else: - if not isinstance(load_only, list): - load_only = [load_only] - not_found = [] - for i, filetype in enumerate(load_only): - filetype = filetype.upper() - if filetype != 'BTN': - load_only[i] = filetype - found = False - for key, item in ext_unit_dict.items(): - if item.filetype == filetype: - found = True - break - if not found: - not_found.append(filetype) - if len(not_found) > 0: - raise Exception( - "the following load_only entries were not found " - "in the ext_unit_dict: " + ','.join(not_found)) - - # try loading packages in ext_unit_dict - for key, item in ext_unit_dict.items(): - if item.package is not None: - if item.filetype in load_only: - if forgive: - try: - pck = item.package.load(item.filehandle, mt, - ext_unit_dict=ext_unit_dict) - files_successfully_loaded.append(item.filename) - if mt.verbose: - sys.stdout.write( - ' {:4s} package load...success\n' - .format(pck.name[0])) - except BaseException as o: - if mt.verbose: - sys.stdout.write( - ' {:4s} package load...failed\n {!s}\n' - .format(item.filetype, o)) - files_not_loaded.append(item.filename) - else: - pck = item.package.load(item.filehandle, mt, - ext_unit_dict=ext_unit_dict) - files_successfully_loaded.append(item.filename) - if mt.verbose: - sys.stdout.write( - ' {:4s} package load...success\n' - .format(pck.name[0])) - else: - if mt.verbose: - sys.stdout.write(' {:4s} package load...skipped\n' - .format(item.filetype)) - files_not_loaded.append(item.filename) - elif "data" not in item.filetype.lower(): - files_not_loaded.append(item.filename) - if mt.verbose: - sys.stdout.write(' {:4s} package load...skipped\n' - .format(item.filetype)) - elif "data" in item.filetype.lower(): - if mt.verbose: - sys.stdout.write(' {} file load...skipped\n {}\n' - .format(item.filetype, - os.path.basename(item.filename))) - if key in poss_output_units: - # id files specified to output unit numbers and allow to - # pass through - mt.output_fnames.append(os.path.basename(item.filename)) - mt.output_units.append(key) - mt.output_binflag.append("binary" - in item.filetype.lower()) - elif key not in mt.pop_key_list: - mt.external_fnames.append(item.filename) - mt.external_units.append(key) - mt.external_binflag.append("binary" - in item.filetype.lower()) - mt.external_output.append(False) - - # pop binary output keys and any external file units that are now - # internal - for key in mt.pop_key_list: - try: - mt.remove_external(unit=key) - item = ext_unit_dict.pop(key) - if hasattr(item.filehandle, 'close'): - item.filehandle.close() - except KeyError: - if mt.verbose: - msg = "\nWARNING:\n External file unit " + \ - "{} does not exist in ext_unit_dict.\n".format(key) - sys.stdout.write(msg) - - # write message indicating packages that were successfully loaded - if mt.verbose: - print(1 * '\n') - s = ' The following {0} packages were successfully loaded.' \ - .format(len(files_successfully_loaded)) - print(s) - for fname in files_successfully_loaded: - print(' ' + os.path.basename(fname)) - if len(files_not_loaded) > 0: - s = ' The following {0} packages were not loaded.'.format( - len(files_not_loaded)) - print(s) - for fname in files_not_loaded: - print(' ' + os.path.basename(fname)) - print('\n') - - # return model object - return mt - - @staticmethod - def load_mas(fname): - """ - Load an mt3d mas file and return a numpy recarray - - Parameters - ---------- - fname : str - name of MT3D mas file - - Returns - ------- - r : np.ndarray - - """ - if not os.path.isfile(fname): - raise Exception('Could not find file: {}'.format(fname)) - dtype = [('time', float), ('total_in', float), - ('total_out', float), - ('sources', float), ('sinks', float), - ('fluid_storage', float), - ('total_mass', float), ('error_in-out', float), - ('error_alt', float)] - r = np.loadtxt(fname, skiprows=2, dtype=dtype) - r = r.view(np.recarray) - return r - - @staticmethod - def load_obs(fname): - """ - Load an mt3d obs file and return a numpy recarray - - Parameters - ---------- - fname : str - name of MT3D obs file - - Returns - ------- - r : np.ndarray - - """ - firstline = 'STEP TOTAL TIME LOCATION OF OBSERVATION POINTS (K,I,J)' - dtype = [('step', int), ('time', float)] - nobs = 0 - obs = [] - - if not os.path.isfile(fname): - raise Exception('Could not find file: {}'.format(fname)) - with open(fname, 'r') as f: - line = f.readline() - if line.strip() != firstline: - msg = 'First line in file must be \n{}\nFound {}'.format( - firstline, line.strip()) - msg += '\n{} does not appear to be a valid MT3D OBS file'.format( - fname) - raise Exception(msg) - - # Read obs names (when break, line will have first data line) - nlineperrec = 0 - while True: - line = f.readline() - if line[0:7].strip() == '1': - break - nlineperrec += 1 - ll = line.strip().split() - while len(ll) > 0: - k = int(ll.pop(0)) - i = int(ll.pop(0)) - j = int(ll.pop(0)) - obsnam = '({}, {}, {})'.format(k, i, j) - if obsnam in obs: - obsnam += str(len(obs) + 1) # make obs name unique - obs.append(obsnam) - - icount = 0 - r = [] - while True: - ll = [] - for n in range(nlineperrec): - icount += 1 - if icount > 1: - line = f.readline() - ll.extend(line.strip().split()) - - if not line: - break - - rec = [int(ll[0])] - for val in ll[1:]: - rec.append(float(val)) - r.append(tuple(rec)) - - # add obs names to dtype - for nameob in obs: - dtype.append((nameob, float)) - r = np.array(r, dtype=dtype) - r = r.view(np.recarray) - return r +import os +import sys +import numpy as np +from ..mbase import BaseModel +from ..pakbase import Package +from ..utils import mfreadnam +from .mtbtn import Mt3dBtn +from .mtadv import Mt3dAdv +from .mtdsp import Mt3dDsp +from .mtssm import Mt3dSsm +from .mtrct import Mt3dRct +from .mtgcg import Mt3dGcg +from .mttob import Mt3dTob +from .mtphc import Mt3dPhc +from .mtuzt import Mt3dUzt +from .mtsft import Mt3dSft +from .mtlkt import Mt3dLkt +from ..discretization.structuredgrid import StructuredGrid +from flopy.discretization.modeltime import ModelTime + + +class Mt3dList(Package): + """ + List package class + """ + + def __init__(self, model, extension='list', listunit=7): + # Call ancestor's init to set self.parent, extension, name and + # unit number + Package.__init__(self, model, extension, 'LIST', listunit) + # self.parent.add_package(self) This package is not added to the base + # model so that it is not included in get_name_file_entries() + return + + def __repr__(self): + return 'List package class' + + def write_file(self): + # Not implemented for list class + return + + +''' +class Mt3dms(BaseModel): + 'MT3DMS base class' + + def __init__(self, modelname='mt3dmstest', namefile_ext='nam', + modflowmodel=None, ftlfilename=None, + model_ws=None, external_path=None, verbose=False, + load=True, listunit=7, exe_name='mt3dms.exe', ): + BaseModel.__init__(self, modelname, namefile_ext, model_ws=model_ws, + exe_name=exe_name) + self.heading = '# Name file for MT3DMS, generated by Flopy.' + self.__mf = modflowmodel + self.lst = Mt3dList(self, listunit=listunit) + self.ftlfilename = ftlfilename + self.__adv = None + self.__btn = None + self.__dsp = None + self.__gcg = None + self.__rct = None + self.__ssm = None + self.array_free_format = False + self.external_path = external_path + self.external = False + self.external_fnames = [] + self.external_units = [] + self.external_binflag = [] + self.load = load + self.__next_ext_unit = 500 + if external_path is not None: + if os.path.exists(external_path): + print("Note: external_path " + str(external_path) + \ + " already exists") + # assert os.path.exists(external_path),'external_path does not exist' + else: + os.mkdir(external_path) + self.external = True + self.verbose = verbose + return + + def __repr__(self): + return 'MT3DMS model' + + def get_ncomp(self): + btn = self.get_package('BTN') + if (btn): + return btn.ncomp + else: + return 1 + + # function to encapsulate next_ext_unit attribute + def next_ext_unit(self): + self.__next_ext_unit += 1 + return self.__next_ext_unit + + def getadv(self): + if (self.__adv == None): + for p in (self.packagelist): + if isinstance(p, Mt3dAdv): + self.__adv = p + return self.__adv + + def getbtn(self): + if (self.__btn == None): + for p in (self.packagelist): + if isinstance(p, Mt3dBtn): + self.__btn = p + return self.__btn + + def getdsp(self): + if (self.__dsp == None): + for p in (self.packagelist): + if isinstance(p, Mt3dDsp): + self.__dsp = p + return self.__dsp + + def getgcg(self): + if (self.__gcg == None): + for p in (self.packagelist): + if isinstance(p, Mt3dGcg): + self.__gcg = p + return self.__gcg + + def getmf(self): + return self.__mf + + def getrct(self): + if (self.__rct == None): + for p in (self.packagelist): + if isinstance(p, Mt3dRct): + self.__rct = p + return self.__rct + + def getssm(self): + if (self.__ssm == None): + for p in (self.packagelist): + if isinstance(p, Mt3dSsm): + self.__ssm = p + return self.__ssm + + def write_name_file(self): + fn_path = os.path.join(self.model_ws, self.namefile) + f_nam = open(fn_path, 'w') + f_nam.write('%s\n' % (self.heading)) + f_nam.write('%s %3i %s\n' % (self.lst.name[0], self.lst.unit_number[0], + self.lst.file_name[0])) + if self.ftlfilename is not None: + f_nam.write('%s %3i %s\n' % ('FTL', 39, self.ftlfilename)) + f_nam.write('%s' % self.get_name_file_entries()) + for u, f in zip(self.external_units, self.external_fnames): + f_nam.write('DATA {0:3d} '.format(u) + f + '\n') + f_nam.close() + + adv = property(getadv) # Property has no setter, so read-only + btn = property(getbtn) # Property has no setter, so read-only + dsp = property(getdsp) # Property has no setter, so read-only + gcg = property(getgcg) # Property has no setter, so read-only + mf = property(getmf) # Property has no setter, so read-only + rct = property(getrct) # Property has no setter, so read-only + ssm = property(getssm) # Property has no setter, so read-only + ncomp = property(get_ncomp) +''' + + +class Mt3dms(BaseModel): + """ + MT3DMS Model Class. + + Parameters + ---------- + modelname : string, optional + Name of model. This string will be used to name the MODFLOW input + that are created with write_model. (the default is 'mt3dtest') + namefile_ext : string, optional + Extension for the namefile (the default is 'nam') + modflowmodel : flopy.modflow.mf.Modflow + This is a flopy Modflow model object upon which this Mt3dms model + is based. (the default is None) + version : string, optional + Version of MT3DMS to use (the default is 'mt3dms'). + exe_name : string, optional + The name of the executable to use (the default is + 'mt3dms.exe'). + listunit : integer, optional + Unit number for the list file (the default is 2). + model_ws : string, optional + model workspace. Directory name to create model data sets. + (default is the present working directory). + external_path : string + Location for external files (default is None). + verbose : boolean, optional + Print additional information to the screen (default is False). + load : boolean, optional + (default is True). + silent : integer + (default is 0) + + Attributes + ---------- + + Methods + ------- + + See Also + -------- + + Notes + ----- + + Examples + -------- + + >>> import flopy + >>> m = flopy.mt3d.mt.Mt3dms() + + """ + + def __init__(self, modelname='mt3dtest', namefile_ext='nam', + modflowmodel=None, ftlfilename="mt3d_link.ftl", ftlfree=False, + version='mt3dms', exe_name='mt3dms.exe', + structured=True, listunit=None, ftlunit=None, + model_ws='.', external_path=None, + verbose=False, load=True, silent=0): + + # Call constructor for parent object + BaseModel.__init__(self, modelname, namefile_ext, exe_name, model_ws, + structured=structured, verbose=verbose) + + # Set attributes + self.version_types = {'mt3dms': 'MT3DMS', 'mt3d-usgs': 'MT3D-USGS'} + + self.set_version(version.lower()) + + if listunit is None: + listunit = 16 + + if ftlunit is None: + ftlunit = 10 + + self.lst = Mt3dList(self, listunit=listunit) + self.mf = modflowmodel + self.ftlfilename = ftlfilename + self.ftlfree = ftlfree + self.ftlunit = ftlunit + self.free_format = None + + # Check whether specified ftlfile exists in model directory; if not, + # warn user + if os.path.isfile(os.path.join(self.model_ws, + str(modelname + '.' + namefile_ext))): + with open(os.path.join(self.model_ws, str( + modelname + '.' + namefile_ext))) as nm_file: + for line in nm_file: + if line[0:3] == 'FTL': + ftlfilename = line.strip().split()[2] + break + if ftlfilename is None: + print("User specified FTL file does not exist in model directory") + print("MT3D will not work without a linker file") + else: + if os.path.isfile(os.path.join(self.model_ws, ftlfilename)): + # Check that the FTL present in the directory is of the format + # specified by the user, i.e., is same as ftlfree + # Do this by checking whether the first non-blank character is + # an apostrophe. + # If code lands here, then ftlfilename exists, open and read + # first 4 characters + f = open(os.path.join(self.model_ws, ftlfilename), 'rb') + c = f.read(4) + if isinstance(c, bytes): + c = c.decode() + + # if first non-blank char is an apostrophe, then formatted, + # otherwise binary + if (c.strip()[0] == "'" and self.ftlfree) or \ + (c.strip()[0] != "'" and not self.ftlfree): + pass + else: + msg = "Specified value of ftlfree conflicts with FTL " + \ + "file format" + print(msg) + msg = 'Switching ftlfree from ' + \ + '{} '.format(str(self.ftlfree)) + \ + 'to {}'.format(str(not self.ftlfree)) + print(msg) + self.ftlfree = not self.ftlfree # Flip the bool + + # external option stuff + self.array_free_format = False + self.array_format = 'mt3d' + self.external_fnames = [] + self.external_units = [] + self.external_binflag = [] + self.external = False + self.load = load + # the starting external data unit number + self._next_ext_unit = 2000 + if external_path is not None: + # assert model_ws == '.', "ERROR: external cannot be used " + \ + # "with model_ws" + + # external_path = os.path.join(model_ws, external_path) + if os.path.exists(external_path): + print("Note: external_path " + str(external_path) + + " already exists") + # assert os.path.exists(external_path),'external_path does not exist' + else: + os.mkdir(external_path) + self.external = True + self.external_path = external_path + self.verbose = verbose + self.silent = silent + + # Create a dictionary to map package with package object. + # This is used for loading models. + self.mfnam_packages = { + 'btn': Mt3dBtn, + 'adv': Mt3dAdv, + 'dsp': Mt3dDsp, + 'ssm': Mt3dSsm, + 'rct': Mt3dRct, + 'gcg': Mt3dGcg, + 'tob': Mt3dTob, + 'phc': Mt3dPhc, + 'lkt': Mt3dLkt, + 'sft': Mt3dSft, + 'uzt2': Mt3dUzt + } + return + + def __repr__(self): + return 'MT3DMS model' + + @property + def modeltime(self): + # build model time + data_frame = {'perlen': self.mf.dis.perlen.array, + 'nstp': self.mf.dis.nstp.array, + 'tsmult': self.mf.dis.tsmult.array} + self._model_time = ModelTime(data_frame, + self.mf.dis.itmuni_dict[ + self.mf.dis.itmuni], + self.dis.start_datetime, + self.dis.steady.array) + return self._model_time + + @property + def modelgrid(self): + if not self._mg_resync: + return self._modelgrid + + if self.btn is not None: + ibound = self.btn.icbund.array + delc = self.btn.delc.array + delr = self.btn.delr.array + top = self.btn.htop.array + botm = np.subtract(top, self.btn.dz.array.cumsum(axis=0)) + nlay = self.btn.nlay + else: + delc = self.mf.dis.delc.array + delr = self.mf.dis.delr.array + top = self.mf.dis.top.array + botm = self.mf.dis.botm.array + nlay = self.mf.nlay + if self.mf.bas6 is not None: + ibound = self.mf.bas6.ibound.array + else: + ibound = None + # build grid + self._modelgrid = StructuredGrid(delc=delc, + delr=delr, + top=top, + botm=botm, + idomain=ibound, + proj4=self._modelgrid.proj4, + epsg=self._modelgrid.epsg, + xoff=self._modelgrid.xoffset, + yoff=self._modelgrid.yoffset, + angrot=self._modelgrid.angrot, + nlay=nlay) + + # resolve offsets + xoff = self._modelgrid.xoffset + if xoff is None: + if self._xul is not None: + xoff = self._modelgrid._xul_to_xll(self._xul) + else: + xoff = self.mf._modelgrid.xoffset + if xoff is None: + # incase mf._modelgrid.xoffset is not set but mf._xul is + if self.mf._xul is not None: + xoff = self._modelgrid._xul_to_xll(self.mf._xul) + else: + xoff = 0.0 + yoff = self._modelgrid.yoffset + if yoff is None: + if self._yul is not None: + yoff = self._modelgrid._yul_to_yll(self._yul) + else: + yoff = self.mf._modelgrid.yoffset + if yoff is None: + # incase mf._modelgrid.yoffset is not set but mf._yul is + if self.mf._yul is not None: + yoff = self._modelgrid._yul_to_yll(self.mf._yul) + else: + yoff = 0.0 + proj4 = self._modelgrid.proj4 + if proj4 is None: + proj4 = self.mf._modelgrid.proj4 + epsg = self._modelgrid.epsg + if epsg is None: + epsg = self.mf._modelgrid.epsg + angrot = self._modelgrid.angrot + if angrot is None or angrot == 0.0: # angrot normally defaulted to 0.0 + if self.mf._modelgrid.angrot is not None: + angrot = self.mf._modelgrid.angrot + else: + angrot = 0.0 + + self._modelgrid.set_coord_info(xoff, yoff, angrot, epsg, proj4) + self._mg_resync = not self._modelgrid.is_complete + return self._modelgrid + + @property + def solver_tols(self): + if self.gcg is not None: + return self.gcg.cclose, -999 + return None + + @property + def sr(self): + if self.mf is not None: + return self.mf.sr + return None + + @property + def nlay(self): + if (self.btn): + return self.btn.nlay + else: + return 0 + + @property + def nrow(self): + if (self.btn): + return self.btn.nrow + else: + return 0 + + @property + def ncol(self): + if (self.btn): + return self.btn.ncol + else: + return 0 + + @property + def nper(self): + if (self.btn): + return self.btn.nper + else: + return 0 + + @property + def ncomp(self): + if (self.btn): + return self.btn.ncomp + else: + return 1 + + @property + def mcomp(self): + if (self.btn): + return self.btn.mcomp + else: + return 1 + + def get_nrow_ncol_nlay_nper(self): + if (self.btn): + return self.btn.nrow, self.btn.ncol, self.btn.nlay, self.btn.nper + else: + return 0, 0, 0, 0 + + # Property has no setter, so read-only + nrow_ncol_nlay_nper = property(get_nrow_ncol_nlay_nper) + + def write_name_file(self): + """ + Write the name file. + + """ + fn_path = os.path.join(self.model_ws, self.namefile) + f_nam = open(fn_path, 'w') + f_nam.write('{}\n'.format(self.heading)) + f_nam.write('{:14s} {:5d} {}\n'.format(self.lst.name[0], + self.lst.unit_number[0], + self.lst.file_name[0])) + if self.ftlfilename is not None: + ftlfmt = '' + if self.ftlfree: + ftlfmt = 'FREE' + f_nam.write('{:14s} {:5d} {} {}\n'.format('FTL', self.ftlunit, + self.ftlfilename, + ftlfmt)) + # write file entries in name file + f_nam.write('{}'.format(self.get_name_file_entries())) + + # write the external files + for u, f in zip(self.external_units, self.external_fnames): + f_nam.write('DATA {0:5d} '.format(u) + f + '\n') + + # write the output files + for u, f, b in zip(self.output_units, self.output_fnames, + self.output_binflag): + if u == 0: + continue + if b: + f_nam.write( + 'DATA(BINARY) {0:5d} '.format(u) + f + ' REPLACE\n') + else: + f_nam.write('DATA {0:5d} '.format(u) + f + '\n') + + f_nam.close() + return + + def load_results(self, **kwargs): + return + + @staticmethod + def load(f, version='mt3dms', exe_name='mt3dms.exe', verbose=False, + model_ws='.', load_only=None, forgive=False, modflowmodel=None): + """ + Load an existing model. + + Parameters + ---------- + f : string + Full path and name of MT3D name file. + + version : string + The version of MT3D (mt3dms, or mt3d-usgs) + (default is mt3dms) + + exe_name : string + The name of the executable to use if this loaded model is run. + (default is mt3dms.exe) + + verbose : bool + Write information on the load process if True. + (default is False) + + model_ws : string + The path for the model workspace. + (default is the current working directory '.') + + load_only : list of strings + Filetype(s) to load (e.g. ['btn', 'adv']) + (default is None, which means that all will be loaded) + + forgive : bool, optional + Option to raise exceptions on package load failure, which can be + useful for debugging. Default False. + + modflowmodel : flopy.modflow.mf.Modflow + This is a flopy Modflow model object upon which this Mt3dms + model is based. (the default is None) + + Returns + ------- + mt : flopy.mt3d.mt.Mt3dms + flopy Mt3d model object + + Notes + ----- + The load method does not retain the name for the MODFLOW-generated + FTL file. This can be added manually after the MT3D model has been + loaded. The syntax for doing this manually is + mt.ftlfilename = 'example.ftl' + + Examples + -------- + + >>> import flopy + >>> f = 'example.nam' + >>> mt = flopy.mt3d.mt.Mt3dms.load(f) + >>> mt.ftlfilename = 'example.ftl' + + """ + modelname, ext = os.path.splitext(f) + modelname_extension = ext[1:] # without '.' + + if verbose: + sys.stdout.write('\nCreating new model with name: {}\n{}\n\n'. + format(modelname, 50 * '-')) + mt = Mt3dms(modelname=modelname, namefile_ext=modelname_extension, + version=version, exe_name=exe_name, + verbose=verbose, model_ws=model_ws, + modflowmodel=modflowmodel) + files_successfully_loaded = [] + files_not_loaded = [] + + # read name file + namefile_path = os.path.join(mt.model_ws, f) + if not os.path.isfile(namefile_path): + raise IOError('cannot find name file: ' + str(namefile_path)) + try: + ext_unit_dict = mfreadnam.parsenamefile( + namefile_path, mt.mfnam_packages, verbose=verbose) + except Exception as e: + # print("error loading name file entries from file") + # print(str(e)) + # return None + raise Exception( + "error loading name file entries from file:\n" + str(e)) + + if mt.verbose: + print('\n{}\nExternal unit dictionary:\n{}\n{}\n'. + format(50 * '-', ext_unit_dict, 50 * '-')) + + # reset unit number for list file + unitnumber = None + for key, value in ext_unit_dict.items(): + if value.filetype == 'LIST': + unitnumber = key + filepth = os.path.basename(value.filename) + if unitnumber == 'LIST': + unitnumber = 16 + if unitnumber is not None: + mt.lst.unit_number = [unitnumber] + mt.lst.file_name = [filepth] + + # set ftl information + unitnumber = None + for key, value in ext_unit_dict.items(): + if value.filetype == 'FTL': + unitnumber = key + filepth = os.path.basename(value.filename) + if unitnumber == 'FTL': + unitnumber = 10 + if unitnumber is not None: + mt.ftlunit = unitnumber + mt.ftlfilename = filepth + + # load btn + btn = None + btn_key = None + for key, item in ext_unit_dict.items(): + if item.filetype.lower() == "btn": + btn = item + btn_key = key + break + + if btn is None: + return None + + try: + pck = btn.package.load(btn.filename, mt, + ext_unit_dict=ext_unit_dict) + except Exception as e: + raise Exception('error loading BTN: {0}'.format(str(e))) + files_successfully_loaded.append(btn.filename) + if mt.verbose: + sys.stdout.write(' {:4s} package load...success\n' + .format(pck.name[0])) + ext_unit_dict.pop(btn_key).filehandle.close() + ncomp = mt.btn.ncomp + # reserved unit numbers for .ucn, s.ucn, .obs, .mas, .cnf + poss_output_units = set(list(range(201, 201 + ncomp)) + + list(range(301, 301 + ncomp)) + + list(range(401, 401 + ncomp)) + + list(range(601, 601 + ncomp)) + [17]) + if load_only is None: + load_only = [] + for key, item in ext_unit_dict.items(): + load_only.append(item.filetype) + else: + if not isinstance(load_only, list): + load_only = [load_only] + not_found = [] + for i, filetype in enumerate(load_only): + filetype = filetype.upper() + if filetype != 'BTN': + load_only[i] = filetype + found = False + for key, item in ext_unit_dict.items(): + if item.filetype == filetype: + found = True + break + if not found: + not_found.append(filetype) + if len(not_found) > 0: + raise Exception( + "the following load_only entries were not found " + "in the ext_unit_dict: " + ','.join(not_found)) + + # try loading packages in ext_unit_dict + for key, item in ext_unit_dict.items(): + if item.package is not None: + if item.filetype in load_only: + if forgive: + try: + pck = item.package.load(item.filehandle, mt, + ext_unit_dict=ext_unit_dict) + files_successfully_loaded.append(item.filename) + if mt.verbose: + sys.stdout.write( + ' {:4s} package load...success\n' + .format(pck.name[0])) + except BaseException as o: + if mt.verbose: + sys.stdout.write( + ' {:4s} package load...failed\n {!s}\n' + .format(item.filetype, o)) + files_not_loaded.append(item.filename) + else: + pck = item.package.load(item.filehandle, mt, + ext_unit_dict=ext_unit_dict) + files_successfully_loaded.append(item.filename) + if mt.verbose: + sys.stdout.write( + ' {:4s} package load...success\n' + .format(pck.name[0])) + else: + if mt.verbose: + sys.stdout.write(' {:4s} package load...skipped\n' + .format(item.filetype)) + files_not_loaded.append(item.filename) + elif "data" not in item.filetype.lower(): + files_not_loaded.append(item.filename) + if mt.verbose: + sys.stdout.write(' {:4s} package load...skipped\n' + .format(item.filetype)) + elif "data" in item.filetype.lower(): + if mt.verbose: + sys.stdout.write(' {} file load...skipped\n {}\n' + .format(item.filetype, + os.path.basename(item.filename))) + if key in poss_output_units: + # id files specified to output unit numbers and allow to + # pass through + mt.output_fnames.append(os.path.basename(item.filename)) + mt.output_units.append(key) + mt.output_binflag.append("binary" + in item.filetype.lower()) + elif key not in mt.pop_key_list: + mt.external_fnames.append(item.filename) + mt.external_units.append(key) + mt.external_binflag.append("binary" + in item.filetype.lower()) + mt.external_output.append(False) + + # pop binary output keys and any external file units that are now + # internal + for key in mt.pop_key_list: + try: + mt.remove_external(unit=key) + item = ext_unit_dict.pop(key) + if hasattr(item.filehandle, 'close'): + item.filehandle.close() + except KeyError: + if mt.verbose: + msg = "\nWARNING:\n External file unit " + \ + "{} does not exist in ext_unit_dict.\n".format(key) + sys.stdout.write(msg) + + # write message indicating packages that were successfully loaded + if mt.verbose: + print(1 * '\n') + s = ' The following {0} packages were successfully loaded.' \ + .format(len(files_successfully_loaded)) + print(s) + for fname in files_successfully_loaded: + print(' ' + os.path.basename(fname)) + if len(files_not_loaded) > 0: + s = ' The following {0} packages were not loaded.'.format( + len(files_not_loaded)) + print(s) + for fname in files_not_loaded: + print(' ' + os.path.basename(fname)) + print('\n') + + # return model object + return mt + + @staticmethod + def load_mas(fname): + """ + Load an mt3d mas file and return a numpy recarray + + Parameters + ---------- + fname : str + name of MT3D mas file + + Returns + ------- + r : np.ndarray + + """ + if not os.path.isfile(fname): + raise Exception('Could not find file: {}'.format(fname)) + dtype = [('time', float), ('total_in', float), + ('total_out', float), + ('sources', float), ('sinks', float), + ('fluid_storage', float), + ('total_mass', float), ('error_in-out', float), + ('error_alt', float)] + r = np.loadtxt(fname, skiprows=2, dtype=dtype) + r = r.view(np.recarray) + return r + + @staticmethod + def load_obs(fname): + """ + Load an mt3d obs file and return a numpy recarray + + Parameters + ---------- + fname : str + name of MT3D obs file + + Returns + ------- + r : np.ndarray + + """ + firstline = 'STEP TOTAL TIME LOCATION OF OBSERVATION POINTS (K,I,J)' + dtype = [('step', int), ('time', float)] + nobs = 0 + obs = [] + + if not os.path.isfile(fname): + raise Exception('Could not find file: {}'.format(fname)) + with open(fname, 'r') as f: + line = f.readline() + if line.strip() != firstline: + msg = 'First line in file must be \n{}\nFound {}'.format( + firstline, line.strip()) + msg += '\n{} does not appear to be a valid MT3D OBS file'.format( + fname) + raise Exception(msg) + + # Read obs names (when break, line will have first data line) + nlineperrec = 0 + while True: + line = f.readline() + if line[0:7].strip() == '1': + break + nlineperrec += 1 + ll = line.strip().split() + while len(ll) > 0: + k = int(ll.pop(0)) + i = int(ll.pop(0)) + j = int(ll.pop(0)) + obsnam = '({}, {}, {})'.format(k, i, j) + if obsnam in obs: + obsnam += str(len(obs) + 1) # make obs name unique + obs.append(obsnam) + + icount = 0 + r = [] + while True: + ll = [] + for n in range(nlineperrec): + icount += 1 + if icount > 1: + line = f.readline() + ll.extend(line.strip().split()) + + if not line: + break + + rec = [int(ll[0])] + for val in ll[1:]: + rec.append(float(val)) + r.append(tuple(rec)) + + # add obs names to dtype + for nameob in obs: + dtype.append((nameob, float)) + r = np.array(r, dtype=dtype) + r = r.view(np.recarray) + return r diff --git a/flopy/mt3d/mtadv.py b/flopy/mt3d/mtadv.py index c61ae6ed97..d2eebf0667 100644 --- a/flopy/mt3d/mtadv.py +++ b/flopy/mt3d/mtadv.py @@ -1,408 +1,408 @@ -import sys -from ..pakbase import Package - - -class Mt3dAdv(Package): - """ - MT3DMS Advection Package Class. - - Parameters - ---------- - model : model object - The model object (of type :class:`flopy.mt3d.mt.Mt3dms`) to which - this package will be added. - mixelm : int - MIXELM is an integer flag for the advection solution option. - MIXELM = 0, the standard finite-difference method with upstream or - central-in-space weighting, depending on the value of NADVFD; - = 1, the forward-tracking method of characteristics (MOC); - = 2, the backward-tracking modified method of characteristics (MMOC); - = 3, the hybrid method of characteristics (HMOC) with MOC or MMOC - automatically and dynamically selected; - = -1, the third-order TVD scheme (ULTIMATE). - percel : float - PERCEL is the Courant number (i.e., the number of cells, or a - fraction of a cell) advection will be allowed in any direction in one - transport step. - For implicit finite-difference or particle-tracking-based schemes, - there is no limit on PERCEL, but for accuracy reasons, it is generally - not set much greater than one. Note, however, that the PERCEL limit is - checked over the entire model grid. Thus, even if PERCEL > 1, - advection may not be more than one cell's length at most model - locations. - For the explicit finite-difference or the third-order TVD scheme, - PERCEL is also a stability constraint which must not exceed one and - will be automatically reset to one if a value greater than one is - specified. - mxpart : int - MXPART is the maximum total number of moving particles allowed and is - used only when MIXELM = 1 or 3. - nadvfd : int - NADVFD is an integer flag indicating which weighting scheme should be - used; it is needed only when the advection term is solved using the - implicit finite- difference method. - NADVFD = 0 or 1, upstream weighting (default); = 2,central-in-space - weighting. - itrack : int - ITRACK is a flag indicating which particle-tracking algorithm is - selected for the Eulerian-Lagrangian methods. - ITRACK = 1, the first-order Euler algorithm is used. - = 2, the fourth-order Runge-Kutta algorithm is used; this option is - computationally demanding and may be needed only when PERCEL is set - greater than one. - = 3, the hybrid first- and fourth-order algorithm is used; the - Runge-Kutta algorithm is used in sink/source cells and the cells next - to sinks/sources while the Euler algorithm is used elsewhere. - wd : float - is a concentration weighting factor between 0.5 and 1. It is used for - operator splitting in the particle- tracking-based methods. The value - of 0.5 is generally adequate. The value of WD may be adjusted to - achieve better mass balance. Generally, it can be increased toward - 1.0 as advection becomes more dominant. - dceps : float - is a small Relative Cell Concentration Gradient below which advective - transport is considered - nplane : int - NPLANE is a flag indicating whether the random or - fixed pattern is selected for initial placement of moving particles. - If NPLANE = 0, the random pattern is selected for initial placement. - Particles are distributed randomly in both the horizontal and vertical - directions by calling a random number generator (Figure 18b). This - option is usually preferred and leads to smaller mass balance - discrepancy in nonuniform or diverging/converging flow fields. - If NPLANE > 0, the fixed pattern is selected for initial placement. - The value of NPLANE serves as the number of vertical 'planes' on - which initial particles are placed within each cell block (Figure 18a). - The fixed pattern may work better than the random pattern only in - relatively uniform flow fields. For two-dimensional simulations in - plan view, set NPLANE = 1. For cross sectional or three-dimensional - simulations, NPLANE = 2 is normally adequate. Increase NPLANE if more - resolution in the vertical direction is desired. - npl : int - NPL is the number of initial particles per cell to be placed at cells - where the Relative Cell Concentration Gradient is less than or equal - to DCEPS. Generally, NPL can be set to zero since advection is - considered insignificant when the Relative Cell Concentration Gradient - is less than or equal to DCEPS. Setting NPL equal to NPH causes a - uniform number of particles to be placed in every cell over the entire - grid (i.e., the uniform approach). - nph : int - NPH is the number of initial particles per cell to be placed at cells - where the Relative Cell Concentration Gradient is greater than DCEPS. - The selection of NPH depends on the nature of the flow field and also - the computer memory limitation. Generally, a smaller number should be - used in relatively uniform flow fields and a larger number should be - used in relatively nonuniform flow fields. However, values exceeding - 16 in two-dimensional simulation or 32 in three- dimensional - simulation are rarely necessary. If the random pattern is chosen, NPH - particles are randomly distributed within the cell block. If the fixed - pattern is chosen, NPH is divided by NPLANE to yield the number of - particles to be placed per vertical plane, which is rounded to one of - the values shown in Figure 30. - npmin : int - is the minimum number of particles allowed per cell. If the number of - particles in a cell at the end of a transport step is fewer than - NPMIN, new particles are inserted into that cell to maintain a - sufficient number of particles. NPMIN can be set to zero in relatively - uniform flow fields and to a number greater than zero in - diverging/converging flow fields. Generally, a value between zero and - four is adequate. - npmax : int - NPMAX is the maximum number of particles allowed per cell. If the - number of particles in a cell exceeds NPMAX, all particles are removed - from that cell and replaced by a new set of particles equal to NPH to - maintain mass balance. Generally, NPMAX can be set to approximately - two times of NPH. - interp : int - is a flag indicating the concentration interpolation method for use in - the MMOC scheme. Currently, only linear interpolation is implemented. - nlsink : int - s a flag indicating whether the random or fixed pattern is selected - for initial placement of particles to approximate sink cells in the - MMOC scheme. The convention is the same as that for NPLANE. It is - generally adequate to set NLSINK equivalent to NPLANE. - npsink : int - is the number of particles used to approximate sink cells in the MMOC - scheme. The convention is the same as that for NPH. It is generally - adequate to set NPSINK equivalent to NPH. - dchmoc : float - DCHMOC is the critical Relative Concentration Gradient for - controlling the selective use of either MOC or MMOC in the HMOC - solution scheme. - The MOC solution is selected at cells where the Relative - Concentration Gradient is greater than DCHMOC. - The MMOC solution is selected at cells where the Relative - Concentration Gradient is less than or equal to DCHMOC. - extension : string - Filename extension (default is 'adv') - unitnumber : int - File unit number (default is None). - filenames : str or list of str - Filenames to use for the package. If filenames=None the package name - will be created using the model name and package extension. If a - single string is passed the package will be set to the string. - Default is None. - - Attributes - ---------- - - Methods - ------- - - See Also - -------- - - Notes - ----- - - Examples - -------- - - >>> import flopy - >>> m = flopy.mt3d.Mt3dms() - >>> adv = flopy.mt3d.Mt3dAdv(m) - - """ - - def __init__(self, model, mixelm=3, percel=0.75, mxpart=800000, nadvfd=1, - itrack=3, wd=0.5, - dceps=1e-5, nplane=2, npl=10, nph=40, npmin=5, npmax=80, - nlsink=0, npsink=15, - dchmoc=0.0001, extension='adv', unitnumber=None, - filenames=None): - - if unitnumber is None: - unitnumber = Mt3dAdv.defaultunit() - elif unitnumber == 0: - unitnumber = Mt3dAdv.reservedunit() - - # set filenames - if filenames is None: - filenames = [None] - elif isinstance(filenames, str): - filenames = [filenames] - - # Fill namefile items - name = [Mt3dAdv.ftype()] - units = [unitnumber] - extra = [''] - - # set package name - fname = [filenames[0]] - - # Call ancestor's init to set self.parent, extension, name and unit number - Package.__init__(self, model, extension=extension, name=name, - unit_number=units, extra=extra, filenames=fname) - - self.mixelm = mixelm - self.percel = percel - self.mxpart = mxpart - self.nadvfd = nadvfd - self.mixelm = mixelm - self.itrack = itrack - self.wd = wd - self.dceps = dceps - self.nplane = nplane - self.npl = npl - self.nph = nph - self.npmin = npmin - self.npmax = npmax - self.interp = 1 # Command-line 'interp' might once be needed if MT3DMS is updated to include other interpolation method - self.nlsink = nlsink - self.npsink = npsink - self.dchmoc = dchmoc - self.parent.add_package(self) - return - - def write_file(self): - """ - Write the package file - - Returns - ------- - None - - """ - f_adv = open(self.fn_path, 'w') - f_adv.write('%10i%10f%10i%10i\n' % (self.mixelm, self.percel, - self.mxpart, self.nadvfd)) - if (self.mixelm > 0): - f_adv.write('%10i%10f\n' % (self.itrack, self.wd)) - if ((self.mixelm == 1) or (self.mixelm == 3)): - f_adv.write('%10.4e%10i%10i%10i%10i%10i\n' % (self.dceps, - self.nplane, - self.npl, self.nph, - self.npmin, - self.npmax)) - if ((self.mixelm == 2) or (self.mixelm == 3)): - f_adv.write('%10i%10i%10i\n' % (self.interp, self.nlsink, - self.npsink)) - if (self.mixelm == 3): - f_adv.write('%10f\n' % (self.dchmoc)) - f_adv.close() - return - - @staticmethod - def load(f, model, ext_unit_dict=None): - """ - Load an existing package. - - Parameters - ---------- - f : filename or file handle - File to load. - model : model object - The model object (of type :class:`flopy.mt3d.mt.Mt3dms`) to - which this package will be added. - ext_unit_dict : dictionary, optional - If the arrays in the file are specified using EXTERNAL, - or older style array control records, then `f` should be a file - handle. In this case ext_unit_dict is required, which can be - constructed using the function - :class:`flopy.utils.mfreadnam.parsenamefile`. - - Returns - ------- - adv : Mt3dAdv object - Mt3dAdv object. - - Examples - -------- - - >>> import flopy - >>> mt = flopy.mt3d.Mt3dms() - >>> adv = flopy.mt3d.Mt3dAdv.load('test.adv', m) - - """ - - if model.verbose: - sys.stdout.write('loading adv package file...\n') - - # Open file, if necessary - openfile = not hasattr(f, 'read') - if openfile: - filename = f - f = open(filename, 'r') - - # Dataset 0 -- comment line - while True: - line = f.readline() - if line[0] != '#': - break - - # Item B1: MIXELM, PERCEL, MXPART, NADVFD - line already read above - if model.verbose: - print(' loading MIXELM, PERCEL, MXPART, NADVFD...') - mixelm = int(line[0:10]) - percel = float(line[10:20]) - mxpart = 0 - if mixelm == 1 or mixelm == 3: - if len(line[20:30].strip()) > 0: - mxpart = int(line[20:30]) - nadvfd = 0 - if mixelm == 0: - if len(line[30:40].strip()) > 0: - nadvfd = int(line[30:40]) - if model.verbose: - print(' MIXELM {}'.format(mixelm)) - print(' PERCEL {}'.format(nadvfd)) - print(' MXPART {}'.format(mxpart)) - print(' NADVFD {}'.format(nadvfd)) - - # Item B2: ITRACK WD - itrack = None - wd = None - if mixelm == 1 or mixelm == 2 or mixelm == 3: - if model.verbose: - print(' loading ITRACK, WD...') - line = f.readline() - itrack = int(line[0:10]) - wd = float(line[10:20]) - if model.verbose: - print(' ITRACK {}'.format(itrack)) - print(' WD {}'.format(wd)) - - # Item B3: DCEPS, NPLANE, NPL, NPH, NPMIN, NPMAX - dceps = None - nplane = None - npl = None - nph = None - npmin = None - npmax = None - if mixelm == 1 or mixelm == 3: - if model.verbose: - print(' loading DCEPS, NPLANE, NPL, NPH, NPMIN, NPMAX...') - line = f.readline() - dceps = float(line[0:10]) - nplane = int(line[10:20]) - npl = int(line[20:30]) - nph = int(line[30:40]) - npmin = int(line[40:50]) - npmax = int(line[50:60]) - if model.verbose: - print(' DCEPS {}'.format(dceps)) - print(' NPLANE {}'.format(nplane)) - print(' NPL {}'.format(npl)) - print(' NPH {}'.format(nph)) - print(' NPMIN {}'.format(npmin)) - print(' NPMAX {}'.format(npmax)) - - # Item B4: INTERP, NLSINK, NPSINK - interp = None - nlsink = None - npsink = None - if mixelm == 2 or mixelm == 3: - if model.verbose: - print(' loading INTERP, NLSINK, NPSINK...') - line = f.readline() - interp = int(line[0:10]) - nlsink = int(line[10:20]) - npsink = int(line[20:30]) - if model.verbose: - print(' INTERP {}'.format(interp)) - print(' NLSINK {}'.format(nlsink)) - print(' NPSINK {}'.format(npsink)) - - # Item B5: DCHMOC - dchmoc = None - if mixelm == 3: - if model.verbose: - print(' loading DCHMOC...') - line = f.readline() - dchmoc = float(line[0:10]) - if model.verbose: - print(' DCHMOC {}'.format(dchmoc)) - - if openfile: - f.close() - - # set package unit number - unitnumber = None - filenames = [None] - if ext_unit_dict is not None: - unitnumber, filenames[0] = \ - model.get_ext_dict_attr(ext_unit_dict, - filetype=Mt3dAdv.ftype()) - - # Construct and return adv package - adv = Mt3dAdv(model, mixelm=mixelm, percel=percel, - mxpart=mxpart, nadvfd=nadvfd, - itrack=itrack, wd=wd, - dceps=dceps, nplane=nplane, npl=npl, nph=nph, - npmin=npmin, npmax=npmax, - nlsink=nlsink, npsink=npsink, - dchmoc=dchmoc, unitnumber=unitnumber, - filenames=filenames) - return adv - - @staticmethod - def ftype(): - return 'ADV' - - @staticmethod - def defaultunit(): - return 32 - - @staticmethod - def reservedunit(): - return 2 +import sys +from ..pakbase import Package + + +class Mt3dAdv(Package): + """ + MT3DMS Advection Package Class. + + Parameters + ---------- + model : model object + The model object (of type :class:`flopy.mt3d.mt.Mt3dms`) to which + this package will be added. + mixelm : int + MIXELM is an integer flag for the advection solution option. + MIXELM = 0, the standard finite-difference method with upstream or + central-in-space weighting, depending on the value of NADVFD; + = 1, the forward-tracking method of characteristics (MOC); + = 2, the backward-tracking modified method of characteristics (MMOC); + = 3, the hybrid method of characteristics (HMOC) with MOC or MMOC + automatically and dynamically selected; + = -1, the third-order TVD scheme (ULTIMATE). + percel : float + PERCEL is the Courant number (i.e., the number of cells, or a + fraction of a cell) advection will be allowed in any direction in one + transport step. + For implicit finite-difference or particle-tracking-based schemes, + there is no limit on PERCEL, but for accuracy reasons, it is generally + not set much greater than one. Note, however, that the PERCEL limit is + checked over the entire model grid. Thus, even if PERCEL > 1, + advection may not be more than one cell's length at most model + locations. + For the explicit finite-difference or the third-order TVD scheme, + PERCEL is also a stability constraint which must not exceed one and + will be automatically reset to one if a value greater than one is + specified. + mxpart : int + MXPART is the maximum total number of moving particles allowed and is + used only when MIXELM = 1 or 3. + nadvfd : int + NADVFD is an integer flag indicating which weighting scheme should be + used; it is needed only when the advection term is solved using the + implicit finite- difference method. + NADVFD = 0 or 1, upstream weighting (default); = 2,central-in-space + weighting. + itrack : int + ITRACK is a flag indicating which particle-tracking algorithm is + selected for the Eulerian-Lagrangian methods. + ITRACK = 1, the first-order Euler algorithm is used. + = 2, the fourth-order Runge-Kutta algorithm is used; this option is + computationally demanding and may be needed only when PERCEL is set + greater than one. + = 3, the hybrid first- and fourth-order algorithm is used; the + Runge-Kutta algorithm is used in sink/source cells and the cells next + to sinks/sources while the Euler algorithm is used elsewhere. + wd : float + is a concentration weighting factor between 0.5 and 1. It is used for + operator splitting in the particle- tracking-based methods. The value + of 0.5 is generally adequate. The value of WD may be adjusted to + achieve better mass balance. Generally, it can be increased toward + 1.0 as advection becomes more dominant. + dceps : float + is a small Relative Cell Concentration Gradient below which advective + transport is considered + nplane : int + NPLANE is a flag indicating whether the random or + fixed pattern is selected for initial placement of moving particles. + If NPLANE = 0, the random pattern is selected for initial placement. + Particles are distributed randomly in both the horizontal and vertical + directions by calling a random number generator (Figure 18b). This + option is usually preferred and leads to smaller mass balance + discrepancy in nonuniform or diverging/converging flow fields. + If NPLANE > 0, the fixed pattern is selected for initial placement. + The value of NPLANE serves as the number of vertical 'planes' on + which initial particles are placed within each cell block (Figure 18a). + The fixed pattern may work better than the random pattern only in + relatively uniform flow fields. For two-dimensional simulations in + plan view, set NPLANE = 1. For cross sectional or three-dimensional + simulations, NPLANE = 2 is normally adequate. Increase NPLANE if more + resolution in the vertical direction is desired. + npl : int + NPL is the number of initial particles per cell to be placed at cells + where the Relative Cell Concentration Gradient is less than or equal + to DCEPS. Generally, NPL can be set to zero since advection is + considered insignificant when the Relative Cell Concentration Gradient + is less than or equal to DCEPS. Setting NPL equal to NPH causes a + uniform number of particles to be placed in every cell over the entire + grid (i.e., the uniform approach). + nph : int + NPH is the number of initial particles per cell to be placed at cells + where the Relative Cell Concentration Gradient is greater than DCEPS. + The selection of NPH depends on the nature of the flow field and also + the computer memory limitation. Generally, a smaller number should be + used in relatively uniform flow fields and a larger number should be + used in relatively nonuniform flow fields. However, values exceeding + 16 in two-dimensional simulation or 32 in three- dimensional + simulation are rarely necessary. If the random pattern is chosen, NPH + particles are randomly distributed within the cell block. If the fixed + pattern is chosen, NPH is divided by NPLANE to yield the number of + particles to be placed per vertical plane, which is rounded to one of + the values shown in Figure 30. + npmin : int + is the minimum number of particles allowed per cell. If the number of + particles in a cell at the end of a transport step is fewer than + NPMIN, new particles are inserted into that cell to maintain a + sufficient number of particles. NPMIN can be set to zero in relatively + uniform flow fields and to a number greater than zero in + diverging/converging flow fields. Generally, a value between zero and + four is adequate. + npmax : int + NPMAX is the maximum number of particles allowed per cell. If the + number of particles in a cell exceeds NPMAX, all particles are removed + from that cell and replaced by a new set of particles equal to NPH to + maintain mass balance. Generally, NPMAX can be set to approximately + two times of NPH. + interp : int + is a flag indicating the concentration interpolation method for use in + the MMOC scheme. Currently, only linear interpolation is implemented. + nlsink : int + s a flag indicating whether the random or fixed pattern is selected + for initial placement of particles to approximate sink cells in the + MMOC scheme. The convention is the same as that for NPLANE. It is + generally adequate to set NLSINK equivalent to NPLANE. + npsink : int + is the number of particles used to approximate sink cells in the MMOC + scheme. The convention is the same as that for NPH. It is generally + adequate to set NPSINK equivalent to NPH. + dchmoc : float + DCHMOC is the critical Relative Concentration Gradient for + controlling the selective use of either MOC or MMOC in the HMOC + solution scheme. + The MOC solution is selected at cells where the Relative + Concentration Gradient is greater than DCHMOC. + The MMOC solution is selected at cells where the Relative + Concentration Gradient is less than or equal to DCHMOC. + extension : string + Filename extension (default is 'adv') + unitnumber : int + File unit number (default is None). + filenames : str or list of str + Filenames to use for the package. If filenames=None the package name + will be created using the model name and package extension. If a + single string is passed the package will be set to the string. + Default is None. + + Attributes + ---------- + + Methods + ------- + + See Also + -------- + + Notes + ----- + + Examples + -------- + + >>> import flopy + >>> m = flopy.mt3d.Mt3dms() + >>> adv = flopy.mt3d.Mt3dAdv(m) + + """ + + def __init__(self, model, mixelm=3, percel=0.75, mxpart=800000, nadvfd=1, + itrack=3, wd=0.5, + dceps=1e-5, nplane=2, npl=10, nph=40, npmin=5, npmax=80, + nlsink=0, npsink=15, + dchmoc=0.0001, extension='adv', unitnumber=None, + filenames=None): + + if unitnumber is None: + unitnumber = Mt3dAdv.defaultunit() + elif unitnumber == 0: + unitnumber = Mt3dAdv.reservedunit() + + # set filenames + if filenames is None: + filenames = [None] + elif isinstance(filenames, str): + filenames = [filenames] + + # Fill namefile items + name = [Mt3dAdv.ftype()] + units = [unitnumber] + extra = [''] + + # set package name + fname = [filenames[0]] + + # Call ancestor's init to set self.parent, extension, name and unit number + Package.__init__(self, model, extension=extension, name=name, + unit_number=units, extra=extra, filenames=fname) + + self.mixelm = mixelm + self.percel = percel + self.mxpart = mxpart + self.nadvfd = nadvfd + self.mixelm = mixelm + self.itrack = itrack + self.wd = wd + self.dceps = dceps + self.nplane = nplane + self.npl = npl + self.nph = nph + self.npmin = npmin + self.npmax = npmax + self.interp = 1 # Command-line 'interp' might once be needed if MT3DMS is updated to include other interpolation method + self.nlsink = nlsink + self.npsink = npsink + self.dchmoc = dchmoc + self.parent.add_package(self) + return + + def write_file(self): + """ + Write the package file + + Returns + ------- + None + + """ + f_adv = open(self.fn_path, 'w') + f_adv.write('%10i%10f%10i%10i\n' % (self.mixelm, self.percel, + self.mxpart, self.nadvfd)) + if (self.mixelm > 0): + f_adv.write('%10i%10f\n' % (self.itrack, self.wd)) + if ((self.mixelm == 1) or (self.mixelm == 3)): + f_adv.write('%10.4e%10i%10i%10i%10i%10i\n' % (self.dceps, + self.nplane, + self.npl, self.nph, + self.npmin, + self.npmax)) + if ((self.mixelm == 2) or (self.mixelm == 3)): + f_adv.write('%10i%10i%10i\n' % (self.interp, self.nlsink, + self.npsink)) + if (self.mixelm == 3): + f_adv.write('%10f\n' % (self.dchmoc)) + f_adv.close() + return + + @staticmethod + def load(f, model, ext_unit_dict=None): + """ + Load an existing package. + + Parameters + ---------- + f : filename or file handle + File to load. + model : model object + The model object (of type :class:`flopy.mt3d.mt.Mt3dms`) to + which this package will be added. + ext_unit_dict : dictionary, optional + If the arrays in the file are specified using EXTERNAL, + or older style array control records, then `f` should be a file + handle. In this case ext_unit_dict is required, which can be + constructed using the function + :class:`flopy.utils.mfreadnam.parsenamefile`. + + Returns + ------- + adv : Mt3dAdv object + Mt3dAdv object. + + Examples + -------- + + >>> import flopy + >>> mt = flopy.mt3d.Mt3dms() + >>> adv = flopy.mt3d.Mt3dAdv.load('test.adv', m) + + """ + + if model.verbose: + sys.stdout.write('loading adv package file...\n') + + # Open file, if necessary + openfile = not hasattr(f, 'read') + if openfile: + filename = f + f = open(filename, 'r') + + # Dataset 0 -- comment line + while True: + line = f.readline() + if line[0] != '#': + break + + # Item B1: MIXELM, PERCEL, MXPART, NADVFD - line already read above + if model.verbose: + print(' loading MIXELM, PERCEL, MXPART, NADVFD...') + mixelm = int(line[0:10]) + percel = float(line[10:20]) + mxpart = 0 + if mixelm == 1 or mixelm == 3: + if len(line[20:30].strip()) > 0: + mxpart = int(line[20:30]) + nadvfd = 0 + if mixelm == 0: + if len(line[30:40].strip()) > 0: + nadvfd = int(line[30:40]) + if model.verbose: + print(' MIXELM {}'.format(mixelm)) + print(' PERCEL {}'.format(nadvfd)) + print(' MXPART {}'.format(mxpart)) + print(' NADVFD {}'.format(nadvfd)) + + # Item B2: ITRACK WD + itrack = None + wd = None + if mixelm == 1 or mixelm == 2 or mixelm == 3: + if model.verbose: + print(' loading ITRACK, WD...') + line = f.readline() + itrack = int(line[0:10]) + wd = float(line[10:20]) + if model.verbose: + print(' ITRACK {}'.format(itrack)) + print(' WD {}'.format(wd)) + + # Item B3: DCEPS, NPLANE, NPL, NPH, NPMIN, NPMAX + dceps = None + nplane = None + npl = None + nph = None + npmin = None + npmax = None + if mixelm == 1 or mixelm == 3: + if model.verbose: + print(' loading DCEPS, NPLANE, NPL, NPH, NPMIN, NPMAX...') + line = f.readline() + dceps = float(line[0:10]) + nplane = int(line[10:20]) + npl = int(line[20:30]) + nph = int(line[30:40]) + npmin = int(line[40:50]) + npmax = int(line[50:60]) + if model.verbose: + print(' DCEPS {}'.format(dceps)) + print(' NPLANE {}'.format(nplane)) + print(' NPL {}'.format(npl)) + print(' NPH {}'.format(nph)) + print(' NPMIN {}'.format(npmin)) + print(' NPMAX {}'.format(npmax)) + + # Item B4: INTERP, NLSINK, NPSINK + interp = None + nlsink = None + npsink = None + if mixelm == 2 or mixelm == 3: + if model.verbose: + print(' loading INTERP, NLSINK, NPSINK...') + line = f.readline() + interp = int(line[0:10]) + nlsink = int(line[10:20]) + npsink = int(line[20:30]) + if model.verbose: + print(' INTERP {}'.format(interp)) + print(' NLSINK {}'.format(nlsink)) + print(' NPSINK {}'.format(npsink)) + + # Item B5: DCHMOC + dchmoc = None + if mixelm == 3: + if model.verbose: + print(' loading DCHMOC...') + line = f.readline() + dchmoc = float(line[0:10]) + if model.verbose: + print(' DCHMOC {}'.format(dchmoc)) + + if openfile: + f.close() + + # set package unit number + unitnumber = None + filenames = [None] + if ext_unit_dict is not None: + unitnumber, filenames[0] = \ + model.get_ext_dict_attr(ext_unit_dict, + filetype=Mt3dAdv.ftype()) + + # Construct and return adv package + adv = Mt3dAdv(model, mixelm=mixelm, percel=percel, + mxpart=mxpart, nadvfd=nadvfd, + itrack=itrack, wd=wd, + dceps=dceps, nplane=nplane, npl=npl, nph=nph, + npmin=npmin, npmax=npmax, + nlsink=nlsink, npsink=npsink, + dchmoc=dchmoc, unitnumber=unitnumber, + filenames=filenames) + return adv + + @staticmethod + def ftype(): + return 'ADV' + + @staticmethod + def defaultunit(): + return 32 + + @staticmethod + def reservedunit(): + return 2 diff --git a/flopy/mt3d/mtbtn.py b/flopy/mt3d/mtbtn.py index 395f1c7e31..00726c2b98 100644 --- a/flopy/mt3d/mtbtn.py +++ b/flopy/mt3d/mtbtn.py @@ -1,991 +1,991 @@ -""" -mtbtn module. Contains the Mt3dBtn class. Note that the user can access -the Mt3dBtn class as `flopy.mt3d.Mt3dBtn`. - -Additional information for this MT3DMS package can be found in the MT3DMS -User's Manual. - -""" - -import numpy as np -from ..pakbase import Package -from ..utils import Util2d, Util3d -import warnings - - -class Mt3dBtn(Package): - """ - Basic Transport Package Class. - - Parameters - ---------- - model : model object - The model object (of type :class:`flopy.mt3dms.mt.Mt3dms`) to which - this package will be added. - MFStyleArr : str - Specifies whether or not to read arrays using the MODFLOW array reader - format or the original MT3DMS array reader - DRYCell : str - Specifies whether or not to route mass through dry cells. When MF-NWT - is used to generate the flow-transport link file, this is a distinct - possibility. - Legacy99Stor : str - Specifies whether or not to use the storage formulation used in MT3DMS - FTLPrint : str - Specifies if flow-transport link terms (cell-by-cell flows) should be - echoed to the MT3D-USGS listing file. - NoWetDryPrint : str - Specifies whether or not to suppress wet/dry messaging in the MT3D-USGS - listing file. - OmitDryBudg : str - Specifies whether or not to include the mass flux terms through dry - cells in the mass budget written to the listing file. - AltWTSorb : str - Specifies whether or not to use the MT3DMS formulation (this keyword - omitted) for the solid phase, whereby the entire cell thickness is - available for interacting with the aqueous phase, even though the - aqueous phase may only occupy a portion of the cell's thickness. When - used, only the saturated portion of the cell is available for sorbing - ncomp : int - The total number of chemical species in the simulation. (default is - None, will be changed to 1 if sconc is single value) - mcomp : int - The total number of 'mobile' species (default is 1). mcomp must be - equal or less than ncomp. - tunit : str - The name of unit for time (default is 'D', for 'days'). Used for - identification purposes only. - lunit : str - The name of unit for length (default is 'M', for 'meters'). Used for - identification purposes only. - munit : str - The name of unit for mass (default is 'KG', for 'kilograms'). Used for - identification purposes only. - prsity : float or array of floats (nlay, nrow, ncol) - The effective porosity of the porous medium in a single porosity - system, or the mobile porosity in a dual-porosity medium (the immobile - porosity is defined through the Chemical Reaction Package. (default is - 0.25). - icbund : int or array of ints (nlay, nrow, ncol) - The icbund array specifies the boundary condition type for solute - species (shared by all species). If icbund = 0, the cell is an inactive - concentration cell; If icbund < 0, the cell is a constant-concentration - cell; If icbund > 0, the cell is an active concentration cell where the - concentration value will be calculated. (default is 1). - sconc : float, array of (nlay, nrow, ncol), or filename - sconc is the starting concentration for the first species. To specify - starting concentrations for other species in a multi-species - simulation, include additional keywords, such as sconc2, sconc3, and - so forth. - cinact : float - The value for indicating an inactive concentration cell. (default is - 1e30). - thkmin : float - The minimum saturated thickness in a cell, expressed as the decimal - fraction of its thickness, below which the cell is considered inactive. - (default is 0.01). - ifmtcn : int - A flag/format code indicating how the calculated concentration should - be printed to the standard output text file. Format codes for printing - are listed in Table 3 of the MT3DMS manual. If ifmtcn > 0 printing is - in wrap form; ifmtcn < 0 printing is in strip form; if ifmtcn = 0 - concentrations are not printed. (default is 0). - ifmtnp : int - A flag/format code indicating how the number of particles should - be printed to the standard output text file. The convention is - the same as for ifmtcn. (default is 0). - ifmtrf : int - A flag/format code indicating how the calculated retardation factor - should be printed to the standard output text file. The convention is - the same as for ifmtcn. (default is 0). - ifmtdp : int - A flag/format code indicating how the distance-weighted dispersion - coefficient should be printed to the standard output text file. The - convention is the same as for ifmtcn. (default is 0). - savucn : bool - A logical flag indicating whether the concentration solution should be - saved in an unformatted file. (default is True). - nprs : int - A flag indicating (i) the frequency of the output and - (ii) whether the output frequency is specified in terms - of total elapsed simulation time or the transport step number. If - nprs > 0 results will be saved at the times as specified in timprs; - if nprs = 0, results will not be saved except at the end of simulation; - if NPRS < 0, simulation results will be saved whenever the number of - transport steps is an even multiple of nprs. (default is 0). - timprs : list of floats - The total elapsed time at which the simulation results are saved. The - number of entries in timprs must equal nprs. (default is None). - obs: array of int - An array with the cell indices (layer, row, column) for which the - concentration is to be printed at every transport step. (default is - None). obs indices must be entered as zero-based numbers as a 1 is - added to them before writing to the btn file. - nprobs: int - An integer indicating how frequently the concentration at the specified - observation points should be saved. (default is 1). - chkmas: bool - A logical flag indicating whether a one-line summary of mass balance - information should be printed. (default is True). - nprmas: int - An integer indicating how frequently the mass budget information - should be saved. (default is 1). - dt0: float - The user-specified initial transport step size within each time-step - of the flow solution. (default is 0). - mxstrn: int - The maximum number of transport steps allowed within one time step - of the flow solution. (default is 50000). - ttsmult: float - The multiplier for successive transport steps within a flow time-step - if the GCG solver is used and the solution option for the advection - term is the standard finite-difference method. (default is 1.0). - ttsmax: float - The maximum transport step size allowed when transport step size - multiplier TTSMULT > 1.0. (default is 0). - species_names: list of str - A list of names for every species in the simulation. - extension : string - Filename extension (default is 'btn') - unitnumber : int - File unit number (default is None). - filenames : str or list of str - Filenames to use for the package. If filenames=None the package name - will be created using the model name and package extension. If a - single string is passed the package will be set to the string. - Default is None. - - Attributes - ---------- - - Methods - ------- - - See Also - -------- - - Notes - ----- - - Examples - -------- - - >>> import flopy - >>> mt = flopy.mt3dms.Mt3dms() - >>> btn = flopy.mt3dms.Mt3dBtn(mt) - - """ - - def __init__(self, model, MFStyleArr=False, DRYCell=False, - Legacy99Stor=False, FTLPrint=False, NoWetDryPrint=False, - OmitDryBud=False, AltWTSorb=False, nlay=None, nrow=None, - ncol=None, nper=None, ncomp=1, mcomp=1, tunit='D', lunit='M', - munit='KG', laycon=None, delr=None, delc=None, htop=None, - dz=None, prsity=0.30, icbund=1, - sconc=0.0, cinact=1e30, thkmin=0.01, ifmtcn=0, ifmtnp=0, - ifmtrf=0, ifmtdp=0, savucn=True, nprs=0, timprs=None, - obs=None, nprobs=1, chkmas=True, nprmas=1, - perlen=None, nstp=None, tsmult=None, ssflag=None, dt0=0, - mxstrn=50000, ttsmult=1.0, ttsmax=0, - species_names=None, extension='btn', - unitnumber=None, filenames=None, - **kwargs): - - if unitnumber is None: - unitnumber = Mt3dBtn.defaultunit() - elif unitnumber == 0: - unitnumber = Mt3dBtn.reservedunit() - - # set filenames - if filenames is None: - filenames = [None] - elif isinstance(filenames, str): - filenames = [filenames] - - # Fill namefile items - name = [Mt3dBtn.ftype()] - units = [unitnumber] - extra = [''] - - # set package name - fname = [filenames[0]] - - # Call ancestor's init to set self.parent, extension, name and unit number - Package.__init__(self, model, extension=extension, name=name, - unit_number=units, extra=extra, filenames=fname) - - # Set these variables from the Modflow model (self.parent.mf) unless - # they are specified in the constructor. - self.setmodflowvars(nlay, nrow, ncol, nper, laycon, delr, delc, htop, - dz, perlen, nstp, tsmult) - - # Make the rest of the assignments - self.heading1 = '# BTN for MT3DMS, generated by Flopy.' - self.heading2 = '#' - self.MFStyleArr = MFStyleArr - if self.MFStyleArr: - model.free_format = True - model.array_format = None - self.DRYCell = DRYCell - self.Legacy99Stor = Legacy99Stor - self.FTLPrint = FTLPrint - self.NoWetDryPrint = NoWetDryPrint - self.OmitDryBud = OmitDryBud - self.AltWTSorb = AltWTSorb - self.ncomp = ncomp - self.mcomp = mcomp - self.tunit = tunit - self.lunit = lunit - self.munit = munit - self.cinact = cinact - self.thkmin = thkmin - self.ifmtcn = ifmtcn - self.ifmtnp = ifmtnp - self.ifmtrf = ifmtrf - self.ifmtdp = ifmtdp - self.savucn = savucn - self.nprs = nprs - self.timprs = timprs - if obs is not None: - if isinstance(obs, list): - obs = np.array(obs) - if obs.ndim != 2: - raise Exception( - 'obs must be (or be convertible to) a 2d array') - self.obs = obs - self.nprobs = nprobs - self.chkmas = chkmas - self.nprmas = nprmas - if species_names is None: - species_names = [] - self.species_names = species_names - self.prsity = Util3d(model, (self.nlay, self.nrow, self.ncol), - np.float32, prsity, name='prsity', - locat=self.unit_number[0], - array_free_format=False) - self.icbund = Util3d(model, (self.nlay, self.nrow, self.ncol), - np.int32, - icbund, name='icbund', - locat=self.unit_number[0], - array_free_format=False) - self.ssflag = ssflag - self.dt0 = Util2d(model, (self.nper,), np.float32, dt0, name='dt0', - array_free_format=False) - self.mxstrn = Util2d(model, (self.nper,), np.int32, mxstrn, - name='mxstrn') - self.ttsmult = Util2d(model, (self.nper,), np.float32, ttsmult, - name='ttmult') - self.ttsmax = Util2d(model, (self.nper,), np.float32, ttsmax, - name='ttsmax') - - # Do some fancy stuff for multi-species concentrations - self.sconc = [] - u3d = Util3d(model, (self.nlay, self.nrow, self.ncol), np.float32, - sconc, name='sconc1', locat=self.unit_number[0], - array_free_format=False) - self.sconc.append(u3d) - if ncomp > 1: - for icomp in range(2, ncomp + 1): - name = "sconc" + str(icomp) - val = 0.0 - if name in kwargs: - val = kwargs.pop(name) - else: - print("BTN: setting sconc for component " + - str(icomp) + " to zero, kwarg name " + - name) - u3d = Util3d(model, (self.nlay, self.nrow, self.ncol), - np.float32, val, name=name, - locat=self.unit_number[0], - array_free_format=False) - self.sconc.append(u3d) - - # Check to make sure that all kwargs have been consumed - if len(list(kwargs.keys())) > 0: - raise Exception("BTN error: unrecognized kwargs: " + - ' '.join(list(kwargs.keys()))) - - # Finally add self to parent's package list and return - self.parent.add_package(self) - return - - def setmodflowvars(self, nlay, nrow, ncol, nper, laycon, delr, delc, htop, - dz, perlen, nstp, tsmult): - """ - Set these variables from the MODFLOW model, if it exists - - """ - # Members that may come from a modflow model (model.mf) - validmfdis = True - try: - dum = self.parent.mf.dis.nlay - mf = self.parent.mf - except: - validmfdis = False - - mfvarlist = [nlay, nrow, ncol, nper, laycon, delr, delc, htop, dz, - perlen, nstp, tsmult] - if not validmfdis: - for v in mfvarlist: - s = 'BTN error. Required input is None, but no modflow model.' - s += ' If no modflow model is passed to Mt3dms, then values ' - s += 'must be specified in the BTN constructor for: ' - s += 'nlay, nrow, ncol, nper, laycon, delr, delc, htop, dz, ' - s += 'perlen, nstp, and tsmult.' - if v is None: - raise Exception(s) - - if nlay is not None: - self.nlay = nlay - else: - self.nlay = mf.dis.nlay - - if nrow is not None: - self.nrow = nrow - else: - self.nrow = mf.dis.nrow - - if ncol is not None: - self.ncol = ncol - else: - self.ncol = mf.dis.ncol - - if nper is not None: - self.nper = nper - else: - self.nper = mf.dis.nper - - nlay = self.nlay - nrow = self.nrow - ncol = self.ncol - nper = self.nper - - if delr is not None: - self.delr = Util2d(self.parent, (ncol,), np.float32, delr, - name='delr', - locat=self.unit_number[0], - array_free_format=False) - else: - self.delr = Util2d(self.parent, (ncol,), np.float32, - mf.dis.delr.get_value(), - name='delr', - locat=self.unit_number[0], - array_free_format=False) - - if delc is not None: - self.delc = Util2d(self.parent, (nrow,), np.float32, delc, - name='delc', - locat=self.unit_number[0]) - else: - self.delc = Util2d(self.parent, (nrow,), np.float32, - mf.dis.delc.get_value(), - name='delc', - locat=self.unit_number[0], - array_free_format=False) - - if htop is not None: - self.htop = Util2d(self.parent, (nrow, ncol), np.float32, htop, - name='htop', - locat=self.unit_number[0], - array_free_format=False) - else: - self.htop = Util2d(self.parent, (nrow, ncol), np.float32, - mf.dis.top.get_value(), - name='htop', - locat=self.unit_number[0], - array_free_format=False) - - if dz is not None: - self.dz = Util3d(self.parent, (nlay, nrow, ncol), np.float32, dz, - name='dz', - locat=self.unit_number[0], - array_free_format=False) - else: - thickness = mf.dis.thickness.get_value() - self.dz = Util3d(self.parent, (nlay, nrow, ncol), np.float32, - thickness, name='dz', - locat=self.unit_number[0], - array_free_format=False) - - if perlen is not None: - self.perlen = Util2d(self.parent, (nper,), np.float32, perlen, - name='perlen', - locat=self.unit_number[0]) - else: - self.perlen = Util2d(self.parent, (nper,), np.float32, - mf.dis.perlen.get_value(), - name='perlen', - locat=self.unit_number[0]) - - if nstp is not None: - self.nstp = Util2d(self.parent, (nper,), np.int32, nstp, - name='nstp', - locat=self.unit_number[0]) - else: - self.nstp = Util2d(self.parent, (nper,), np.int32, - mf.dis.nstp.get_value(), - name='nstp', - locat=self.unit_number[0]) - - if tsmult is not None: - self.tsmult = Util2d(self.parent, (nper,), np.float32, tsmult, - name='tsmult', - locat=self.unit_number[0]) - else: - self.tsmult = Util2d(self.parent, (nper,), np.float32, - mf.dis.tsmult.get_value(), - name='tsmult', - locat=self.unit_number[0]) - - self.laycon = None - if laycon is not None: - self.laycon = Util2d(self.parent, (nlay,), np.int32, laycon, - name='laycon', - locat=self.unit_number[0]) - else: - flow_package = mf.get_package('BCF6') - if flow_package is not None: - self.laycon = Util2d(self.parent, (nlay,), np.int32, - flow_package.laycon.get_value(), - name='laycon', - locat=self.unit_number[0]) - else: - flow_package = mf.get_package('LPF') - if flow_package is not None: - self.laycon = Util2d(self.parent, (nlay,), - np.int32, - flow_package.laytyp.get_value(), - name='laycon', - locat=self.unit_number[0]) - flow_package = mf.get_package('UPW') - if flow_package is not None: - self.laycon = Util2d(self.parent, (nlay,), - np.int32, - flow_package.laytyp.get_value(), - name='laycon', - locat=self.unit_number[0]) - - s = 'BTN warning. Laycon has not been set. A modflow model with a ' - s += ' BCF or LPF package does not exist and laycon was not passed ' - s += ' to the BTN constructor. Setting laycon to 1 (convertible).' - if self.laycon is None: - warnings.warn(s) - self.laycon = Util2d(self.parent, (nlay,), np.int32, 1, - name='laycon', - locat=self.unit_number[0]) - return - - def write_file(self): - """ - Write the package file - - Returns - ------- - None - - """ - # Open file for writing - f_btn = open(self.fn_path, 'w') - - # A1,2 - f_btn.write('#{0:s}\n#{1:s}\n'.format(self.heading1, self.heading2)) - - # A3; Keywords - # Build a string of the active keywords - str1 = '' - if self.MFStyleArr: - str1 += ' MODFLOWSTYLEARRAYS' - if self.DRYCell: - str1 += ' DRYCELL' - if self.Legacy99Stor: - str1 += ' LEGACY99STORAGE' - if self.FTLPrint: - str1 += ' FTLPRINT' - if self.NoWetDryPrint: - str1 += ' NOWETDRYPRINT' - if self.OmitDryBud: - str1 += ' OMITDRYCELLBUDGET' - if self.AltWTSorb: - str1 += ' ALTWTSORB' - - if str1 != '': - f_btn.write(str1 + '\n') - - # A3 - f_btn.write('{0:10d}{1:10d}{2:10d}{3:10d}{4:10d}{5:10d}\n' - .format(self.nlay, self.nrow, self.ncol, self.nper, - self.ncomp, self.mcomp)) - - # A4 - f_btn.write('{0:4s}{1:4s}{2:4s}\n' \ - .format(self.tunit, self.lunit, self.munit)) - - # A5 - if (self.parent.adv != None): - f_btn.write('{0:2s}'.format('T')) - else: - f_btn.write('{0:2s}'.format('F')) - if (self.parent.dsp != None): - f_btn.write('{0:2s}'.format('T')) - else: - f_btn.write('{0:2s}'.format('F')) - if (self.parent.ssm != None): - f_btn.write('{0:2s}'.format('T')) - else: - f_btn.write('{0:2s}'.format('F')) - if (self.parent.rct != None): - f_btn.write('{0:2s}'.format('T')) - else: - f_btn.write('{0:2s}'.format('F')) - if (self.parent.gcg != None): - f_btn.write('{0:2s}'.format('T')) - else: - f_btn.write('{0:2s}'.format('F')) - f_btn.write('\n') - - # A6 - self.laycon.set_fmtin('(40I2)') - f_btn.write(self.laycon.string) - - # A7 - f_btn.write(self.delr.get_file_entry()) - - # A8 - f_btn.write(self.delc.get_file_entry()) - - # A9 - f_btn.write(self.htop.get_file_entry()) - - # A10 - f_btn.write(self.dz.get_file_entry()) - - # A11 - f_btn.write(self.prsity.get_file_entry()) - - # A12 - f_btn.write(self.icbund.get_file_entry()) - - # A13 - # Starting concentrations - for s in range(len(self.sconc)): - f_btn.write(self.sconc[s].get_file_entry()) - - # A14 - f_btn.write('{0:10.0E}{1:10.2E}\n' \ - .format(self.cinact, self.thkmin)) - - # A15 - f_btn.write('{0:10d}{1:10d}{2:10d}{3:10d}' \ - .format(self.ifmtcn, self.ifmtnp, self.ifmtrf, - self.ifmtdp)) - if (self.savucn == True): - ss = 'T' - else: - ss = 'F' - f_btn.write('{0:>10s}\n'.format(ss)) - - # A16, A17 - if self.timprs is None: - f_btn.write('{0:10d}\n'.format(self.nprs)) - else: - f_btn.write('{0:10d}\n'.format(len(self.timprs))) - timprs = Util2d(self.parent, (len(self.timprs),), - np.float32, self.timprs, name='timprs', - fmtin='(8G10.4)') - timprs.format.fortran = '(8G10.4)' - f_btn.write(timprs.string) - - # A18, A19 - if self.obs is None: - f_btn.write('{0:10d}{1:10d}\n'.format(0, self.nprobs)) - else: - nobs = self.obs.shape[0] - f_btn.write('{0:10d}{1:10d}\n'.format(nobs, self.nprobs)) - for i in range(nobs): - f_btn.write('{0:10d}{1:10d}{2:10d}\n' \ - .format(self.obs[i, 0] + 1, self.obs[i, 1] + 1, - self.obs[i, 2] + 1)) - - # A20 CHKMAS, NPRMAS - if (self.chkmas == True): - ss = 'T' - else: - ss = 'F' - f_btn.write('{0:>10s}{1:10d}\n'.format(ss, self.nprmas)) - - # A21, 22, 23 PERLEN, NSTP, TSMULT - for t in range(self.nper): - s = '{0:10G}{1:10d}{2:10G}'.format(self.perlen[t], - self.nstp[t], - self.tsmult[t]) - if self.ssflag is not None: - s += ' ' + self.ssflag[t] - s += '\n' - f_btn.write(s) - f_btn.write('{0:10.4G}{1:10d}{2:10.4G}{3:10.4G}\n' - .format(self.dt0[t], self.mxstrn[t], - self.ttsmult[t], self.ttsmax[t])) - f_btn.close() - return - - @staticmethod - def load(f, model, ext_unit_dict=None): - """ - Load an existing package. - - Parameters - ---------- - f : filename or file handle - File to load. - model : model object - The model object (of type :class:`flopy.mt3d.mt.Mt3dms`) to - which this package will be added. - ext_unit_dict : dictionary, optional - If the arrays in the file are specified using EXTERNAL, - or older style array control records, then `f` should be a file - handle. In this case ext_unit_dict is required, which can be - constructed using the function - :class:`flopy.utils.mfreadnam.parsenamefile`. - - Returns - ------- - btn : Mt3dBtn object - Mt3dBtn object. - - Examples - -------- - - >>> import flopy - >>> mt = flopy.mt3d.Mt3dms() - >>> btn = flopy.mt3d.Mt3dBtn.load('test.btn', mt) - - """ - openfile = not hasattr(f, 'read') - if openfile: - filename = f - f = open(filename, 'r') - - # A1 - if model.verbose: - print(' loading COMMENT LINES A1 AND A2...') - line = f.readline() - if model.verbose: - print('A1: '.format(line.strip())) - - # A2 - line = f.readline() - if model.verbose: - print('A2: '.format(line.strip())) - - # New keyword options in MT3D-USGS are found here - line = f.readline() - m_arr = line.strip().split() - - # Set default values for the keywords - MFStyleArr = False - DRYCell = False - Legacy99Stor = False - FTLPrint = False - NoWetDryPrint = False - OmitDryBud = False - AltWTSorb = False - if m_arr[ - 0].strip().isdigit() is not True: # If m_arr[0] is not a digit, it is a keyword - if model.verbose: - print(' loading optional keywords: {}'.format(line.strip())) - for i in range(0, len(m_arr)): - if m_arr[i].upper() == "MODFLOWSTYLEARRAYS": - MFStyleArr = True - model.array_format = None - model.free_format = True - if m_arr[i].upper() == "DRYCELL": - DRYCell = True - if m_arr[i].upper() == "LEGACY99STORAGE": - Legacy99Stor = True - if m_arr[i].upper() == "FTLPRINT": - FTLPrint = True - if m_arr[i].upper() == "NOWETDRYPRINT": - NoWetDryPrint = True - if m_arr[i].upper() == "OMITDRYCELLBUDGET": - OmitDryBud = True - if m_arr[i].upper() == "ALTWTSORB": - AltWTSorb = True - elif model.verbose: - print(' optional keywords not identifed/loaded') - - # A3 - if model.verbose: - print(' loading NLAY, NROW, NCOL, NPER, NCOMP, MCOMP...') - if m_arr[0].isdigit() is False: - line = f.readline() - nlay = int(line[0:10]) - nrow = int(line[10:20]) - ncol = int(line[20:30]) - nper = int(line[30:40]) - try: - ncomp = int(line[40:50]) - except: - ncomp = 1 - try: - mcomp = int(line[50:60]) - except: - mcomp = 1 - if model.verbose: - print(' NLAY {}'.format(nlay)) - print(' NROW {}'.format(nrow)) - print(' NCOL {}'.format(ncol)) - print(' NPER {}'.format(nper)) - print(' NCOMP {}'.format(ncomp)) - print(' MCOMP {}'.format(mcomp)) - - if model.verbose: - print(' loading TUNIT, LUNIT, MUNIT...') - line = f.readline() - tunit = line[0:4] - lunit = line[4:8] - munit = line[8:12] - if model.verbose: - print(' TUNIT {}'.format(tunit)) - print(' LUNIT {}'.format(lunit)) - print(' MUNIT {}'.format(munit)) - - if model.verbose: - print(' loading TRNOP...') - trnop = f.readline()[:20].strip().split() - if model.verbose: - print(' TRNOP {}'.format(trnop)) - - if model.verbose: - print(' loading LAYCON...') - laycon = Util2d.load_txt((nlay,), f, np.int32, '(40I2)') - if model.verbose: - print(' LAYCON {}'.format(laycon)) - - if model.verbose: - print(' loading DELR...') - delr = Util2d.load(f, model, (ncol,), np.float32, 'delr', - ext_unit_dict, array_format="mt3d") - if model.verbose: - print(' DELR {}'.format(delr)) - - if model.verbose: - print(' loading DELC...') - delc = Util2d.load(f, model, (nrow,), np.float32, 'delc', - ext_unit_dict, array_format="mt3d") - if model.verbose: - print(' DELC {}'.format(delc)) - - if model.verbose: - print(' loading HTOP...') - htop = Util2d.load(f, model, (nrow, ncol), np.float32, 'htop', - ext_unit_dict, array_format="mt3d") - if model.verbose: - print(' HTOP {}'.format(htop)) - - if model.verbose: - print(' loading DZ...') - dz = Util3d.load(f, model, (nlay, nrow, ncol), np.float32, 'dz', - ext_unit_dict, array_format="mt3d") - if model.verbose: - print(' DZ {}'.format(dz)) - - if model.verbose: - print(' loading PRSITY...') - prsity = Util3d.load(f, model, (nlay, nrow, ncol), np.float32, - 'prsity', - ext_unit_dict, array_format="mt3d") - if model.verbose: - print(' PRSITY {}'.format(prsity)) - - if model.verbose: - print(' loading ICBUND...') - icbund = Util3d.load(f, model, (nlay, nrow, ncol), np.int32, 'icbund', - ext_unit_dict, array_format="mt3d") - if model.verbose: - print(' ICBUND {}'.format(icbund)) - - if model.verbose: - print(' loading SCONC...') - kwargs = {} - sconc = Util3d.load(f, model, (nlay, nrow, ncol), np.float32, 'sconc1', - ext_unit_dict, array_format="mt3d") - if ncomp > 1: - for icomp in range(2, ncomp + 1): - name = "sconc" + str(icomp) - if model.verbose: - print(' loading {}...'.format(name)) - u3d = Util3d.load(f, model, (nlay, nrow, ncol), np.float32, - name, ext_unit_dict, array_format="mt3d") - kwargs[name] = u3d - if model.verbose: - print(' SCONC {}'.format(sconc)) - - if model.verbose: - print(' loading CINACT, THCKMIN...') - line = f.readline() - cinact = float(line[0:10]) - try: - thkmin = float(line[10:20]) - except: - thkmin = 0.01 - if model.verbose: - print(' CINACT {}'.format(cinact)) - print(' THKMIN {}'.format(thkmin)) - - if model.verbose: - print(' loading IFMTCN, IFMTNP, IFMTRF, IFMTDP, SAVUCN...') - line = f.readline() - ifmtcn = int(line[0:10]) - ifmtnp = int(line[10:20]) - ifmtrf = int(line[20:30]) - ifmtdp = int(line[30:40]) - savucn = False - if 't' in line[40:50].lower(): - savucn = True - if model.verbose: - print(' IFMTCN {}'.format(ifmtcn)) - print(' IFMTNP {}'.format(ifmtnp)) - print(' IFMTRF {}'.format(ifmtrf)) - print(' IFMTDP {}'.format(ifmtdp)) - print(' SAVUCN {}'.format(savucn)) - - if model.verbose: - print(' loading NPRS...') - line = f.readline() - nprs = int(line[0:10]) - if model.verbose: - print(' NPRS {}'.format(nprs)) - - timprs = None - if nprs > 0: - if model.verbose: - print(' loading TIMPRS...') - timprs = Util2d.load_txt((nprs,), f, np.float32, '(8F10.0)') - if model.verbose: - print(' TIMPRS {}'.format(timprs)) - - if model.verbose: - print(' loading NOBS, NPROBS...') - line = f.readline() - nobs = int(line[0:10]) - try: - nprobs = int(line[10:20]) - except: - nprobs = 1 - if model.verbose: - print(' NOBS {}'.format(nobs)) - print(' NPROBS {}'.format(nprobs)) - - obs = None - if nobs > 0: - if model.verbose: - print(' loading KOBS, IOBS, JOBS...') - obs = [] - for l in range(nobs): - line = f.readline() - k = int(line[0:10]) - i = int(line[10:20]) - j = int(line[20:30]) - obs.append([k, i, j]) - obs = np.array(obs) - 1 - if model.verbose: - print(' OBS {}'.format(obs)) - - if model.verbose: - print(' loading CHKMAS, NPRMAS...') - line = f.readline() - chkmas = False - if 't' in line[0:10].lower(): - chkmas = True - try: - nprmas = int(line[10:20]) - except: - nprmas = 1 - if model.verbose: - print(' CHKMAS {}'.format(chkmas)) - print(' NPRMAS {}'.format(nprmas)) - - if model.verbose: - print( - ' loading PERLEN, NSTP, TSMULT, TSLNGH, DT0, MXSTRN, TTSMULT, TTSMAX...') - dt0, mxstrn, ttsmult, ttsmax = [], [], [], [] - perlen = [] - nstp = [] - tsmult = [] - tslngh = [] - ssflag = [] - for kper in range(nper): - line = f.readline() - perlen.append(float(line[0:10])) - nstp.append(int(line[10:20])) - tsmult.append(float(line[20:30])) - sf = ' ' - ll = line[30:].strip().split() - if len(ll) > 0: - if 'sstate' in ll[0].lower(): - sf = 'SState' - ssflag.append(sf) - - if tsmult[-1] <= 0: - t = Util2d.load_txt((nstp[-1],), f, np.float32, '(8F10.0)') - tslngh.append(t) - raise Exception("tsmult <= 0 not supported") - - line = f.readline() - dt0.append(float(line[0:10])) - mxstrn.append(int(line[10:20])) - ttsmult.append(float(line[20:30])) - ttsmax.append(float(line[30:40])) - - if model.verbose: - print(' PERLEN {}'.format(perlen)) - print(' NSTP {}'.format(nstp)) - print(' TSMULT {}'.format(tsmult)) - print(' SSFLAG {}'.format(ssflag)) - print(' TSLNGH {}'.format(tslngh)) - print(' DT0 {}'.format(dt0)) - print(' MXSTRN {}'.format(mxstrn)) - print(' TTSMULT {}'.format(ttsmult)) - print(' TTSMAX {}'.format(ttsmax)) - - if openfile: - f.close() - - # set package unit number - unitnumber = None - filenames = [None] - if ext_unit_dict is not None: - unitnumber, filenames[0] = \ - model.get_ext_dict_attr(ext_unit_dict, - filetype=Mt3dBtn.ftype()) - - btn = Mt3dBtn(model, MFStyleArr=MFStyleArr, DRYCell=DRYCell, - Legacy99Stor=Legacy99Stor, FTLPrint=FTLPrint, - NoWetDryPrint=NoWetDryPrint, OmitDryBud=OmitDryBud, - AltWTSorb=AltWTSorb, - nlay=nlay, nrow=nrow, ncol=ncol, nper=nper, - ncomp=ncomp, mcomp=mcomp, tunit=tunit, - laycon=laycon, delr=delr, delc=delc, htop=htop, dz=dz, - lunit=lunit, munit=munit, prsity=prsity, icbund=icbund, - sconc=sconc, cinact=cinact, thkmin=thkmin, - ifmtcn=ifmtcn, ifmtnp=ifmtnp, ifmtrf=ifmtrf, - ifmtdp=ifmtdp, savucn=savucn, nprs=nprs, - timprs=timprs, obs=obs, nprobs=nprobs, chkmas=chkmas, - nprmas=nprmas, perlen=perlen, nstp=nstp, tsmult=tsmult, - ssflag=ssflag, dt0=dt0, mxstrn=mxstrn, ttsmult=ttsmult, - ttsmax=ttsmax, - unitnumber=unitnumber, filenames=filenames, - **kwargs) - return btn - - @staticmethod - def ftype(): - return 'BTN' - - @staticmethod - def defaultunit(): - return 31 - - @staticmethod - def reservedunit(): - return 1 +""" +mtbtn module. Contains the Mt3dBtn class. Note that the user can access +the Mt3dBtn class as `flopy.mt3d.Mt3dBtn`. + +Additional information for this MT3DMS package can be found in the MT3DMS +User's Manual. + +""" + +import numpy as np +from ..pakbase import Package +from ..utils import Util2d, Util3d +import warnings + + +class Mt3dBtn(Package): + """ + Basic Transport Package Class. + + Parameters + ---------- + model : model object + The model object (of type :class:`flopy.mt3dms.mt.Mt3dms`) to which + this package will be added. + MFStyleArr : str + Specifies whether or not to read arrays using the MODFLOW array reader + format or the original MT3DMS array reader + DRYCell : str + Specifies whether or not to route mass through dry cells. When MF-NWT + is used to generate the flow-transport link file, this is a distinct + possibility. + Legacy99Stor : str + Specifies whether or not to use the storage formulation used in MT3DMS + FTLPrint : str + Specifies if flow-transport link terms (cell-by-cell flows) should be + echoed to the MT3D-USGS listing file. + NoWetDryPrint : str + Specifies whether or not to suppress wet/dry messaging in the MT3D-USGS + listing file. + OmitDryBudg : str + Specifies whether or not to include the mass flux terms through dry + cells in the mass budget written to the listing file. + AltWTSorb : str + Specifies whether or not to use the MT3DMS formulation (this keyword + omitted) for the solid phase, whereby the entire cell thickness is + available for interacting with the aqueous phase, even though the + aqueous phase may only occupy a portion of the cell's thickness. When + used, only the saturated portion of the cell is available for sorbing + ncomp : int + The total number of chemical species in the simulation. (default is + None, will be changed to 1 if sconc is single value) + mcomp : int + The total number of 'mobile' species (default is 1). mcomp must be + equal or less than ncomp. + tunit : str + The name of unit for time (default is 'D', for 'days'). Used for + identification purposes only. + lunit : str + The name of unit for length (default is 'M', for 'meters'). Used for + identification purposes only. + munit : str + The name of unit for mass (default is 'KG', for 'kilograms'). Used for + identification purposes only. + prsity : float or array of floats (nlay, nrow, ncol) + The effective porosity of the porous medium in a single porosity + system, or the mobile porosity in a dual-porosity medium (the immobile + porosity is defined through the Chemical Reaction Package. (default is + 0.25). + icbund : int or array of ints (nlay, nrow, ncol) + The icbund array specifies the boundary condition type for solute + species (shared by all species). If icbund = 0, the cell is an inactive + concentration cell; If icbund < 0, the cell is a constant-concentration + cell; If icbund > 0, the cell is an active concentration cell where the + concentration value will be calculated. (default is 1). + sconc : float, array of (nlay, nrow, ncol), or filename + sconc is the starting concentration for the first species. To specify + starting concentrations for other species in a multi-species + simulation, include additional keywords, such as sconc2, sconc3, and + so forth. + cinact : float + The value for indicating an inactive concentration cell. (default is + 1e30). + thkmin : float + The minimum saturated thickness in a cell, expressed as the decimal + fraction of its thickness, below which the cell is considered inactive. + (default is 0.01). + ifmtcn : int + A flag/format code indicating how the calculated concentration should + be printed to the standard output text file. Format codes for printing + are listed in Table 3 of the MT3DMS manual. If ifmtcn > 0 printing is + in wrap form; ifmtcn < 0 printing is in strip form; if ifmtcn = 0 + concentrations are not printed. (default is 0). + ifmtnp : int + A flag/format code indicating how the number of particles should + be printed to the standard output text file. The convention is + the same as for ifmtcn. (default is 0). + ifmtrf : int + A flag/format code indicating how the calculated retardation factor + should be printed to the standard output text file. The convention is + the same as for ifmtcn. (default is 0). + ifmtdp : int + A flag/format code indicating how the distance-weighted dispersion + coefficient should be printed to the standard output text file. The + convention is the same as for ifmtcn. (default is 0). + savucn : bool + A logical flag indicating whether the concentration solution should be + saved in an unformatted file. (default is True). + nprs : int + A flag indicating (i) the frequency of the output and + (ii) whether the output frequency is specified in terms + of total elapsed simulation time or the transport step number. If + nprs > 0 results will be saved at the times as specified in timprs; + if nprs = 0, results will not be saved except at the end of simulation; + if NPRS < 0, simulation results will be saved whenever the number of + transport steps is an even multiple of nprs. (default is 0). + timprs : list of floats + The total elapsed time at which the simulation results are saved. The + number of entries in timprs must equal nprs. (default is None). + obs: array of int + An array with the cell indices (layer, row, column) for which the + concentration is to be printed at every transport step. (default is + None). obs indices must be entered as zero-based numbers as a 1 is + added to them before writing to the btn file. + nprobs: int + An integer indicating how frequently the concentration at the specified + observation points should be saved. (default is 1). + chkmas: bool + A logical flag indicating whether a one-line summary of mass balance + information should be printed. (default is True). + nprmas: int + An integer indicating how frequently the mass budget information + should be saved. (default is 1). + dt0: float + The user-specified initial transport step size within each time-step + of the flow solution. (default is 0). + mxstrn: int + The maximum number of transport steps allowed within one time step + of the flow solution. (default is 50000). + ttsmult: float + The multiplier for successive transport steps within a flow time-step + if the GCG solver is used and the solution option for the advection + term is the standard finite-difference method. (default is 1.0). + ttsmax: float + The maximum transport step size allowed when transport step size + multiplier TTSMULT > 1.0. (default is 0). + species_names: list of str + A list of names for every species in the simulation. + extension : string + Filename extension (default is 'btn') + unitnumber : int + File unit number (default is None). + filenames : str or list of str + Filenames to use for the package. If filenames=None the package name + will be created using the model name and package extension. If a + single string is passed the package will be set to the string. + Default is None. + + Attributes + ---------- + + Methods + ------- + + See Also + -------- + + Notes + ----- + + Examples + -------- + + >>> import flopy + >>> mt = flopy.mt3dms.Mt3dms() + >>> btn = flopy.mt3dms.Mt3dBtn(mt) + + """ + + def __init__(self, model, MFStyleArr=False, DRYCell=False, + Legacy99Stor=False, FTLPrint=False, NoWetDryPrint=False, + OmitDryBud=False, AltWTSorb=False, nlay=None, nrow=None, + ncol=None, nper=None, ncomp=1, mcomp=1, tunit='D', lunit='M', + munit='KG', laycon=None, delr=None, delc=None, htop=None, + dz=None, prsity=0.30, icbund=1, + sconc=0.0, cinact=1e30, thkmin=0.01, ifmtcn=0, ifmtnp=0, + ifmtrf=0, ifmtdp=0, savucn=True, nprs=0, timprs=None, + obs=None, nprobs=1, chkmas=True, nprmas=1, + perlen=None, nstp=None, tsmult=None, ssflag=None, dt0=0, + mxstrn=50000, ttsmult=1.0, ttsmax=0, + species_names=None, extension='btn', + unitnumber=None, filenames=None, + **kwargs): + + if unitnumber is None: + unitnumber = Mt3dBtn.defaultunit() + elif unitnumber == 0: + unitnumber = Mt3dBtn.reservedunit() + + # set filenames + if filenames is None: + filenames = [None] + elif isinstance(filenames, str): + filenames = [filenames] + + # Fill namefile items + name = [Mt3dBtn.ftype()] + units = [unitnumber] + extra = [''] + + # set package name + fname = [filenames[0]] + + # Call ancestor's init to set self.parent, extension, name and unit number + Package.__init__(self, model, extension=extension, name=name, + unit_number=units, extra=extra, filenames=fname) + + # Set these variables from the Modflow model (self.parent.mf) unless + # they are specified in the constructor. + self.setmodflowvars(nlay, nrow, ncol, nper, laycon, delr, delc, htop, + dz, perlen, nstp, tsmult) + + # Make the rest of the assignments + self.heading1 = '# BTN for MT3DMS, generated by Flopy.' + self.heading2 = '#' + self.MFStyleArr = MFStyleArr + if self.MFStyleArr: + model.free_format = True + model.array_format = None + self.DRYCell = DRYCell + self.Legacy99Stor = Legacy99Stor + self.FTLPrint = FTLPrint + self.NoWetDryPrint = NoWetDryPrint + self.OmitDryBud = OmitDryBud + self.AltWTSorb = AltWTSorb + self.ncomp = ncomp + self.mcomp = mcomp + self.tunit = tunit + self.lunit = lunit + self.munit = munit + self.cinact = cinact + self.thkmin = thkmin + self.ifmtcn = ifmtcn + self.ifmtnp = ifmtnp + self.ifmtrf = ifmtrf + self.ifmtdp = ifmtdp + self.savucn = savucn + self.nprs = nprs + self.timprs = timprs + if obs is not None: + if isinstance(obs, list): + obs = np.array(obs) + if obs.ndim != 2: + raise Exception( + 'obs must be (or be convertible to) a 2d array') + self.obs = obs + self.nprobs = nprobs + self.chkmas = chkmas + self.nprmas = nprmas + if species_names is None: + species_names = [] + self.species_names = species_names + self.prsity = Util3d(model, (self.nlay, self.nrow, self.ncol), + np.float32, prsity, name='prsity', + locat=self.unit_number[0], + array_free_format=False) + self.icbund = Util3d(model, (self.nlay, self.nrow, self.ncol), + np.int32, + icbund, name='icbund', + locat=self.unit_number[0], + array_free_format=False) + self.ssflag = ssflag + self.dt0 = Util2d(model, (self.nper,), np.float32, dt0, name='dt0', + array_free_format=False) + self.mxstrn = Util2d(model, (self.nper,), np.int32, mxstrn, + name='mxstrn') + self.ttsmult = Util2d(model, (self.nper,), np.float32, ttsmult, + name='ttmult') + self.ttsmax = Util2d(model, (self.nper,), np.float32, ttsmax, + name='ttsmax') + + # Do some fancy stuff for multi-species concentrations + self.sconc = [] + u3d = Util3d(model, (self.nlay, self.nrow, self.ncol), np.float32, + sconc, name='sconc1', locat=self.unit_number[0], + array_free_format=False) + self.sconc.append(u3d) + if ncomp > 1: + for icomp in range(2, ncomp + 1): + name = "sconc" + str(icomp) + val = 0.0 + if name in kwargs: + val = kwargs.pop(name) + else: + print("BTN: setting sconc for component " + + str(icomp) + " to zero, kwarg name " + + name) + u3d = Util3d(model, (self.nlay, self.nrow, self.ncol), + np.float32, val, name=name, + locat=self.unit_number[0], + array_free_format=False) + self.sconc.append(u3d) + + # Check to make sure that all kwargs have been consumed + if len(list(kwargs.keys())) > 0: + raise Exception("BTN error: unrecognized kwargs: " + + ' '.join(list(kwargs.keys()))) + + # Finally add self to parent's package list and return + self.parent.add_package(self) + return + + def setmodflowvars(self, nlay, nrow, ncol, nper, laycon, delr, delc, htop, + dz, perlen, nstp, tsmult): + """ + Set these variables from the MODFLOW model, if it exists + + """ + # Members that may come from a modflow model (model.mf) + validmfdis = True + try: + dum = self.parent.mf.dis.nlay + mf = self.parent.mf + except: + validmfdis = False + + mfvarlist = [nlay, nrow, ncol, nper, laycon, delr, delc, htop, dz, + perlen, nstp, tsmult] + if not validmfdis: + for v in mfvarlist: + s = 'BTN error. Required input is None, but no modflow model.' + s += ' If no modflow model is passed to Mt3dms, then values ' + s += 'must be specified in the BTN constructor for: ' + s += 'nlay, nrow, ncol, nper, laycon, delr, delc, htop, dz, ' + s += 'perlen, nstp, and tsmult.' + if v is None: + raise Exception(s) + + if nlay is not None: + self.nlay = nlay + else: + self.nlay = mf.dis.nlay + + if nrow is not None: + self.nrow = nrow + else: + self.nrow = mf.dis.nrow + + if ncol is not None: + self.ncol = ncol + else: + self.ncol = mf.dis.ncol + + if nper is not None: + self.nper = nper + else: + self.nper = mf.dis.nper + + nlay = self.nlay + nrow = self.nrow + ncol = self.ncol + nper = self.nper + + if delr is not None: + self.delr = Util2d(self.parent, (ncol,), np.float32, delr, + name='delr', + locat=self.unit_number[0], + array_free_format=False) + else: + self.delr = Util2d(self.parent, (ncol,), np.float32, + mf.dis.delr.get_value(), + name='delr', + locat=self.unit_number[0], + array_free_format=False) + + if delc is not None: + self.delc = Util2d(self.parent, (nrow,), np.float32, delc, + name='delc', + locat=self.unit_number[0]) + else: + self.delc = Util2d(self.parent, (nrow,), np.float32, + mf.dis.delc.get_value(), + name='delc', + locat=self.unit_number[0], + array_free_format=False) + + if htop is not None: + self.htop = Util2d(self.parent, (nrow, ncol), np.float32, htop, + name='htop', + locat=self.unit_number[0], + array_free_format=False) + else: + self.htop = Util2d(self.parent, (nrow, ncol), np.float32, + mf.dis.top.get_value(), + name='htop', + locat=self.unit_number[0], + array_free_format=False) + + if dz is not None: + self.dz = Util3d(self.parent, (nlay, nrow, ncol), np.float32, dz, + name='dz', + locat=self.unit_number[0], + array_free_format=False) + else: + thickness = mf.dis.thickness.get_value() + self.dz = Util3d(self.parent, (nlay, nrow, ncol), np.float32, + thickness, name='dz', + locat=self.unit_number[0], + array_free_format=False) + + if perlen is not None: + self.perlen = Util2d(self.parent, (nper,), np.float32, perlen, + name='perlen', + locat=self.unit_number[0]) + else: + self.perlen = Util2d(self.parent, (nper,), np.float32, + mf.dis.perlen.get_value(), + name='perlen', + locat=self.unit_number[0]) + + if nstp is not None: + self.nstp = Util2d(self.parent, (nper,), np.int32, nstp, + name='nstp', + locat=self.unit_number[0]) + else: + self.nstp = Util2d(self.parent, (nper,), np.int32, + mf.dis.nstp.get_value(), + name='nstp', + locat=self.unit_number[0]) + + if tsmult is not None: + self.tsmult = Util2d(self.parent, (nper,), np.float32, tsmult, + name='tsmult', + locat=self.unit_number[0]) + else: + self.tsmult = Util2d(self.parent, (nper,), np.float32, + mf.dis.tsmult.get_value(), + name='tsmult', + locat=self.unit_number[0]) + + self.laycon = None + if laycon is not None: + self.laycon = Util2d(self.parent, (nlay,), np.int32, laycon, + name='laycon', + locat=self.unit_number[0]) + else: + flow_package = mf.get_package('BCF6') + if flow_package is not None: + self.laycon = Util2d(self.parent, (nlay,), np.int32, + flow_package.laycon.get_value(), + name='laycon', + locat=self.unit_number[0]) + else: + flow_package = mf.get_package('LPF') + if flow_package is not None: + self.laycon = Util2d(self.parent, (nlay,), + np.int32, + flow_package.laytyp.get_value(), + name='laycon', + locat=self.unit_number[0]) + flow_package = mf.get_package('UPW') + if flow_package is not None: + self.laycon = Util2d(self.parent, (nlay,), + np.int32, + flow_package.laytyp.get_value(), + name='laycon', + locat=self.unit_number[0]) + + s = 'BTN warning. Laycon has not been set. A modflow model with a ' + s += ' BCF or LPF package does not exist and laycon was not passed ' + s += ' to the BTN constructor. Setting laycon to 1 (convertible).' + if self.laycon is None: + warnings.warn(s) + self.laycon = Util2d(self.parent, (nlay,), np.int32, 1, + name='laycon', + locat=self.unit_number[0]) + return + + def write_file(self): + """ + Write the package file + + Returns + ------- + None + + """ + # Open file for writing + f_btn = open(self.fn_path, 'w') + + # A1,2 + f_btn.write('#{0:s}\n#{1:s}\n'.format(self.heading1, self.heading2)) + + # A3; Keywords + # Build a string of the active keywords + str1 = '' + if self.MFStyleArr: + str1 += ' MODFLOWSTYLEARRAYS' + if self.DRYCell: + str1 += ' DRYCELL' + if self.Legacy99Stor: + str1 += ' LEGACY99STORAGE' + if self.FTLPrint: + str1 += ' FTLPRINT' + if self.NoWetDryPrint: + str1 += ' NOWETDRYPRINT' + if self.OmitDryBud: + str1 += ' OMITDRYCELLBUDGET' + if self.AltWTSorb: + str1 += ' ALTWTSORB' + + if str1 != '': + f_btn.write(str1 + '\n') + + # A3 + f_btn.write('{0:10d}{1:10d}{2:10d}{3:10d}{4:10d}{5:10d}\n' + .format(self.nlay, self.nrow, self.ncol, self.nper, + self.ncomp, self.mcomp)) + + # A4 + f_btn.write('{0:4s}{1:4s}{2:4s}\n' \ + .format(self.tunit, self.lunit, self.munit)) + + # A5 + if (self.parent.adv != None): + f_btn.write('{0:2s}'.format('T')) + else: + f_btn.write('{0:2s}'.format('F')) + if (self.parent.dsp != None): + f_btn.write('{0:2s}'.format('T')) + else: + f_btn.write('{0:2s}'.format('F')) + if (self.parent.ssm != None): + f_btn.write('{0:2s}'.format('T')) + else: + f_btn.write('{0:2s}'.format('F')) + if (self.parent.rct != None): + f_btn.write('{0:2s}'.format('T')) + else: + f_btn.write('{0:2s}'.format('F')) + if (self.parent.gcg != None): + f_btn.write('{0:2s}'.format('T')) + else: + f_btn.write('{0:2s}'.format('F')) + f_btn.write('\n') + + # A6 + self.laycon.set_fmtin('(40I2)') + f_btn.write(self.laycon.string) + + # A7 + f_btn.write(self.delr.get_file_entry()) + + # A8 + f_btn.write(self.delc.get_file_entry()) + + # A9 + f_btn.write(self.htop.get_file_entry()) + + # A10 + f_btn.write(self.dz.get_file_entry()) + + # A11 + f_btn.write(self.prsity.get_file_entry()) + + # A12 + f_btn.write(self.icbund.get_file_entry()) + + # A13 + # Starting concentrations + for s in range(len(self.sconc)): + f_btn.write(self.sconc[s].get_file_entry()) + + # A14 + f_btn.write('{0:10.0E}{1:10.2E}\n' \ + .format(self.cinact, self.thkmin)) + + # A15 + f_btn.write('{0:10d}{1:10d}{2:10d}{3:10d}' \ + .format(self.ifmtcn, self.ifmtnp, self.ifmtrf, + self.ifmtdp)) + if (self.savucn == True): + ss = 'T' + else: + ss = 'F' + f_btn.write('{0:>10s}\n'.format(ss)) + + # A16, A17 + if self.timprs is None: + f_btn.write('{0:10d}\n'.format(self.nprs)) + else: + f_btn.write('{0:10d}\n'.format(len(self.timprs))) + timprs = Util2d(self.parent, (len(self.timprs),), + np.float32, self.timprs, name='timprs', + fmtin='(8G10.4)') + timprs.format.fortran = '(8G10.4)' + f_btn.write(timprs.string) + + # A18, A19 + if self.obs is None: + f_btn.write('{0:10d}{1:10d}\n'.format(0, self.nprobs)) + else: + nobs = self.obs.shape[0] + f_btn.write('{0:10d}{1:10d}\n'.format(nobs, self.nprobs)) + for i in range(nobs): + f_btn.write('{0:10d}{1:10d}{2:10d}\n' \ + .format(self.obs[i, 0] + 1, self.obs[i, 1] + 1, + self.obs[i, 2] + 1)) + + # A20 CHKMAS, NPRMAS + if (self.chkmas == True): + ss = 'T' + else: + ss = 'F' + f_btn.write('{0:>10s}{1:10d}\n'.format(ss, self.nprmas)) + + # A21, 22, 23 PERLEN, NSTP, TSMULT + for t in range(self.nper): + s = '{0:10G}{1:10d}{2:10G}'.format(self.perlen[t], + self.nstp[t], + self.tsmult[t]) + if self.ssflag is not None: + s += ' ' + self.ssflag[t] + s += '\n' + f_btn.write(s) + f_btn.write('{0:10.4G}{1:10d}{2:10.4G}{3:10.4G}\n' + .format(self.dt0[t], self.mxstrn[t], + self.ttsmult[t], self.ttsmax[t])) + f_btn.close() + return + + @staticmethod + def load(f, model, ext_unit_dict=None): + """ + Load an existing package. + + Parameters + ---------- + f : filename or file handle + File to load. + model : model object + The model object (of type :class:`flopy.mt3d.mt.Mt3dms`) to + which this package will be added. + ext_unit_dict : dictionary, optional + If the arrays in the file are specified using EXTERNAL, + or older style array control records, then `f` should be a file + handle. In this case ext_unit_dict is required, which can be + constructed using the function + :class:`flopy.utils.mfreadnam.parsenamefile`. + + Returns + ------- + btn : Mt3dBtn object + Mt3dBtn object. + + Examples + -------- + + >>> import flopy + >>> mt = flopy.mt3d.Mt3dms() + >>> btn = flopy.mt3d.Mt3dBtn.load('test.btn', mt) + + """ + openfile = not hasattr(f, 'read') + if openfile: + filename = f + f = open(filename, 'r') + + # A1 + if model.verbose: + print(' loading COMMENT LINES A1 AND A2...') + line = f.readline() + if model.verbose: + print('A1: '.format(line.strip())) + + # A2 + line = f.readline() + if model.verbose: + print('A2: '.format(line.strip())) + + # New keyword options in MT3D-USGS are found here + line = f.readline() + m_arr = line.strip().split() + + # Set default values for the keywords + MFStyleArr = False + DRYCell = False + Legacy99Stor = False + FTLPrint = False + NoWetDryPrint = False + OmitDryBud = False + AltWTSorb = False + if m_arr[ + 0].strip().isdigit() is not True: # If m_arr[0] is not a digit, it is a keyword + if model.verbose: + print(' loading optional keywords: {}'.format(line.strip())) + for i in range(0, len(m_arr)): + if m_arr[i].upper() == "MODFLOWSTYLEARRAYS": + MFStyleArr = True + model.array_format = None + model.free_format = True + if m_arr[i].upper() == "DRYCELL": + DRYCell = True + if m_arr[i].upper() == "LEGACY99STORAGE": + Legacy99Stor = True + if m_arr[i].upper() == "FTLPRINT": + FTLPrint = True + if m_arr[i].upper() == "NOWETDRYPRINT": + NoWetDryPrint = True + if m_arr[i].upper() == "OMITDRYCELLBUDGET": + OmitDryBud = True + if m_arr[i].upper() == "ALTWTSORB": + AltWTSorb = True + elif model.verbose: + print(' optional keywords not identifed/loaded') + + # A3 + if model.verbose: + print(' loading NLAY, NROW, NCOL, NPER, NCOMP, MCOMP...') + if m_arr[0].isdigit() is False: + line = f.readline() + nlay = int(line[0:10]) + nrow = int(line[10:20]) + ncol = int(line[20:30]) + nper = int(line[30:40]) + try: + ncomp = int(line[40:50]) + except: + ncomp = 1 + try: + mcomp = int(line[50:60]) + except: + mcomp = 1 + if model.verbose: + print(' NLAY {}'.format(nlay)) + print(' NROW {}'.format(nrow)) + print(' NCOL {}'.format(ncol)) + print(' NPER {}'.format(nper)) + print(' NCOMP {}'.format(ncomp)) + print(' MCOMP {}'.format(mcomp)) + + if model.verbose: + print(' loading TUNIT, LUNIT, MUNIT...') + line = f.readline() + tunit = line[0:4] + lunit = line[4:8] + munit = line[8:12] + if model.verbose: + print(' TUNIT {}'.format(tunit)) + print(' LUNIT {}'.format(lunit)) + print(' MUNIT {}'.format(munit)) + + if model.verbose: + print(' loading TRNOP...') + trnop = f.readline()[:20].strip().split() + if model.verbose: + print(' TRNOP {}'.format(trnop)) + + if model.verbose: + print(' loading LAYCON...') + laycon = Util2d.load_txt((nlay,), f, np.int32, '(40I2)') + if model.verbose: + print(' LAYCON {}'.format(laycon)) + + if model.verbose: + print(' loading DELR...') + delr = Util2d.load(f, model, (ncol,), np.float32, 'delr', + ext_unit_dict, array_format="mt3d") + if model.verbose: + print(' DELR {}'.format(delr)) + + if model.verbose: + print(' loading DELC...') + delc = Util2d.load(f, model, (nrow,), np.float32, 'delc', + ext_unit_dict, array_format="mt3d") + if model.verbose: + print(' DELC {}'.format(delc)) + + if model.verbose: + print(' loading HTOP...') + htop = Util2d.load(f, model, (nrow, ncol), np.float32, 'htop', + ext_unit_dict, array_format="mt3d") + if model.verbose: + print(' HTOP {}'.format(htop)) + + if model.verbose: + print(' loading DZ...') + dz = Util3d.load(f, model, (nlay, nrow, ncol), np.float32, 'dz', + ext_unit_dict, array_format="mt3d") + if model.verbose: + print(' DZ {}'.format(dz)) + + if model.verbose: + print(' loading PRSITY...') + prsity = Util3d.load(f, model, (nlay, nrow, ncol), np.float32, + 'prsity', + ext_unit_dict, array_format="mt3d") + if model.verbose: + print(' PRSITY {}'.format(prsity)) + + if model.verbose: + print(' loading ICBUND...') + icbund = Util3d.load(f, model, (nlay, nrow, ncol), np.int32, 'icbund', + ext_unit_dict, array_format="mt3d") + if model.verbose: + print(' ICBUND {}'.format(icbund)) + + if model.verbose: + print(' loading SCONC...') + kwargs = {} + sconc = Util3d.load(f, model, (nlay, nrow, ncol), np.float32, 'sconc1', + ext_unit_dict, array_format="mt3d") + if ncomp > 1: + for icomp in range(2, ncomp + 1): + name = "sconc" + str(icomp) + if model.verbose: + print(' loading {}...'.format(name)) + u3d = Util3d.load(f, model, (nlay, nrow, ncol), np.float32, + name, ext_unit_dict, array_format="mt3d") + kwargs[name] = u3d + if model.verbose: + print(' SCONC {}'.format(sconc)) + + if model.verbose: + print(' loading CINACT, THCKMIN...') + line = f.readline() + cinact = float(line[0:10]) + try: + thkmin = float(line[10:20]) + except: + thkmin = 0.01 + if model.verbose: + print(' CINACT {}'.format(cinact)) + print(' THKMIN {}'.format(thkmin)) + + if model.verbose: + print(' loading IFMTCN, IFMTNP, IFMTRF, IFMTDP, SAVUCN...') + line = f.readline() + ifmtcn = int(line[0:10]) + ifmtnp = int(line[10:20]) + ifmtrf = int(line[20:30]) + ifmtdp = int(line[30:40]) + savucn = False + if 't' in line[40:50].lower(): + savucn = True + if model.verbose: + print(' IFMTCN {}'.format(ifmtcn)) + print(' IFMTNP {}'.format(ifmtnp)) + print(' IFMTRF {}'.format(ifmtrf)) + print(' IFMTDP {}'.format(ifmtdp)) + print(' SAVUCN {}'.format(savucn)) + + if model.verbose: + print(' loading NPRS...') + line = f.readline() + nprs = int(line[0:10]) + if model.verbose: + print(' NPRS {}'.format(nprs)) + + timprs = None + if nprs > 0: + if model.verbose: + print(' loading TIMPRS...') + timprs = Util2d.load_txt((nprs,), f, np.float32, '(8F10.0)') + if model.verbose: + print(' TIMPRS {}'.format(timprs)) + + if model.verbose: + print(' loading NOBS, NPROBS...') + line = f.readline() + nobs = int(line[0:10]) + try: + nprobs = int(line[10:20]) + except: + nprobs = 1 + if model.verbose: + print(' NOBS {}'.format(nobs)) + print(' NPROBS {}'.format(nprobs)) + + obs = None + if nobs > 0: + if model.verbose: + print(' loading KOBS, IOBS, JOBS...') + obs = [] + for l in range(nobs): + line = f.readline() + k = int(line[0:10]) + i = int(line[10:20]) + j = int(line[20:30]) + obs.append([k, i, j]) + obs = np.array(obs) - 1 + if model.verbose: + print(' OBS {}'.format(obs)) + + if model.verbose: + print(' loading CHKMAS, NPRMAS...') + line = f.readline() + chkmas = False + if 't' in line[0:10].lower(): + chkmas = True + try: + nprmas = int(line[10:20]) + except: + nprmas = 1 + if model.verbose: + print(' CHKMAS {}'.format(chkmas)) + print(' NPRMAS {}'.format(nprmas)) + + if model.verbose: + print( + ' loading PERLEN, NSTP, TSMULT, TSLNGH, DT0, MXSTRN, TTSMULT, TTSMAX...') + dt0, mxstrn, ttsmult, ttsmax = [], [], [], [] + perlen = [] + nstp = [] + tsmult = [] + tslngh = [] + ssflag = [] + for kper in range(nper): + line = f.readline() + perlen.append(float(line[0:10])) + nstp.append(int(line[10:20])) + tsmult.append(float(line[20:30])) + sf = ' ' + ll = line[30:].strip().split() + if len(ll) > 0: + if 'sstate' in ll[0].lower(): + sf = 'SState' + ssflag.append(sf) + + if tsmult[-1] <= 0: + t = Util2d.load_txt((nstp[-1],), f, np.float32, '(8F10.0)') + tslngh.append(t) + raise Exception("tsmult <= 0 not supported") + + line = f.readline() + dt0.append(float(line[0:10])) + mxstrn.append(int(line[10:20])) + ttsmult.append(float(line[20:30])) + ttsmax.append(float(line[30:40])) + + if model.verbose: + print(' PERLEN {}'.format(perlen)) + print(' NSTP {}'.format(nstp)) + print(' TSMULT {}'.format(tsmult)) + print(' SSFLAG {}'.format(ssflag)) + print(' TSLNGH {}'.format(tslngh)) + print(' DT0 {}'.format(dt0)) + print(' MXSTRN {}'.format(mxstrn)) + print(' TTSMULT {}'.format(ttsmult)) + print(' TTSMAX {}'.format(ttsmax)) + + if openfile: + f.close() + + # set package unit number + unitnumber = None + filenames = [None] + if ext_unit_dict is not None: + unitnumber, filenames[0] = \ + model.get_ext_dict_attr(ext_unit_dict, + filetype=Mt3dBtn.ftype()) + + btn = Mt3dBtn(model, MFStyleArr=MFStyleArr, DRYCell=DRYCell, + Legacy99Stor=Legacy99Stor, FTLPrint=FTLPrint, + NoWetDryPrint=NoWetDryPrint, OmitDryBud=OmitDryBud, + AltWTSorb=AltWTSorb, + nlay=nlay, nrow=nrow, ncol=ncol, nper=nper, + ncomp=ncomp, mcomp=mcomp, tunit=tunit, + laycon=laycon, delr=delr, delc=delc, htop=htop, dz=dz, + lunit=lunit, munit=munit, prsity=prsity, icbund=icbund, + sconc=sconc, cinact=cinact, thkmin=thkmin, + ifmtcn=ifmtcn, ifmtnp=ifmtnp, ifmtrf=ifmtrf, + ifmtdp=ifmtdp, savucn=savucn, nprs=nprs, + timprs=timprs, obs=obs, nprobs=nprobs, chkmas=chkmas, + nprmas=nprmas, perlen=perlen, nstp=nstp, tsmult=tsmult, + ssflag=ssflag, dt0=dt0, mxstrn=mxstrn, ttsmult=ttsmult, + ttsmax=ttsmax, + unitnumber=unitnumber, filenames=filenames, + **kwargs) + return btn + + @staticmethod + def ftype(): + return 'BTN' + + @staticmethod + def defaultunit(): + return 31 + + @staticmethod + def reservedunit(): + return 1 diff --git a/flopy/mt3d/mtdsp.py b/flopy/mt3d/mtdsp.py index 777c52de5e..7204928ec5 100644 --- a/flopy/mt3d/mtdsp.py +++ b/flopy/mt3d/mtdsp.py @@ -1,366 +1,366 @@ -import sys -import numpy as np -from ..pakbase import Package -from ..utils import Util2d, Util3d - - -class Mt3dDsp(Package): - """ - MT3DMS Dispersion Package Class. - - Parameters - ---------- - model : model object - The model object (of type :class:`flopy.mt3d.mt.Mt3dms`) to which - this package will be added. - al : float or array of floats (nlay, nrow, ncol) - AL is the longitudinal dispersivity, for every cell of the model grid - (unit, L). - (default is 0.01) - trpt : float or array of floats (nlay) - s a 1D real array defining the ratio of the horizontal transverse - dispersivity to the longitudinal dispersivity. Each value - in the array corresponds to one model layer. Some recent field - studies suggest that TRPT is generally not greater than 0.1. - (default is 0.1) - trpv : float or array of floats (nlay) - is the ratio of the vertical transverse dispersivity to the - longitudinal dispersivity. Each value in the array corresponds to one - model layer. Some recent field studies suggest that TRPT is generally - not greater than 0.01. Set TRPV equal to TRPT to use the standard - isotropic dispersion model (Equation 10 in Chapter 2). Otherwise, - the modified isotropic dispersion model is used (Equation 11 in - Chapter 2). - (default is 0.01) - dmcoef : float or array of floats (nlay) or (nlay, nrow, ncol) if the - multiDiff option is used. - DMCOEF is the effective molecular diffusion coefficient (unit, L2T-1). - Set DMCOEF = 0 if the effect of molecular diffusion is considered - unimportant. Each value in the array corresponds to one model layer. - The value for dmcoef applies only to species 1. See kwargs for - entering dmcoef for other species. - (default is 1.e-9). - multiDiff : boolean - To activate the component-dependent diffusion option, a keyword - input record must be inserted to the beginning of the Dispersion - (DSP) input file. The symbol $ in the first column of an input line - signifies a keyword input record containing one or more predefined - keywords. Above the keyword input record, comment lines marked by the - symbol # in the first column are allowed. Comment lines are processed - but have no effect on the simulation. Furthermore, blank lines are - also acceptable above the keyword input record. Below the keyword - input record, the format of the DSP input file must remain unchanged - from the previous versions except for the diffusion coefficient as - explained below. If no keyword input record is specified, the input - file remains backward compatible with all previous versions of MT3DMS. - The predefined keyword for the component-dependent diffusion option - is MultiDiffusion. The keyword is case insensitive so - ''MultiDiffusion'' is equivalent to either ''Multidiffusion'' or - ''multidiffusion''. If this keyword is specified in the keyword input - record that has been inserted into the beginning of the DSP input - file, the component-dependent diffusion option has been activated and - the user needs to specify one diffusion coefficient for each mobile - solute component and at each model cell. This is done by specifying - one mobile component at a time, from the first component to the last - component (MCOMP). For each mobile component, the real array reader - utility (RARRAY) is used to input the 3-D diffusion coefficient - array, one model layer at a time. - (default is False) - extension : string - Filename extension (default is 'dsp') - unitnumber : int - File unit number (default is None). - filenames : str or list of str - Filenames to use for the package. If filenames=None the package name - will be created using the model name and package extension. If a - single string is passed the package will be set to the string. - Default is None. - kwargs : dictionary - If a multi-species simulation, then dmcoef values can be specified for - other species as dmcoef2, dmcoef3, etc. For example: - dmcoef1=1.e-10, dmcoef2=4.e-10, ... If a value is not specified, then - dmcoef is set to 0.0. - - Attributes - ---------- - - Methods - ------- - - See Also - -------- - - Notes - ----- - - Examples - -------- - - >>> import flopy - >>> m = flopy.mt3d.Mt3dms() - >>> dsp = flopy.mt3d.Mt3dDsp(m) - - """ - - def __init__(self, model, al=0.01, trpt=0.1, trpv=0.01, dmcoef=1e-9, - extension='dsp', multiDiff=False, unitnumber=None, - filenames=None, **kwargs): - - if unitnumber is None: - unitnumber = Mt3dDsp.defaultunit() - elif unitnumber == 0: - unitnumber = Mt3dDsp.reservedunit() - - # set filenames - if filenames is None: - filenames = [None] - elif isinstance(filenames, str): - filenames = [filenames] - - # Fill namefile items - name = [Mt3dDsp.ftype()] - units = [unitnumber] - extra = [''] - - # set package name - fname = [filenames[0]] - - # Call ancestor's init to set self.parent, extension, name and unit number - Package.__init__(self, model, extension=extension, name=name, - unit_number=units, extra=extra, filenames=fname) - - nrow = model.nrow - ncol = model.ncol - nlay = model.nlay - ncomp = model.ncomp - mcomp = model.mcomp - self.multiDiff = multiDiff - self.al = Util3d(model, (nlay, nrow, ncol), np.float32, al, name='al', - locat=self.unit_number[0], - array_free_format=False) - self.trpt = Util2d(model, (nlay,), np.float32, trpt, name='trpt', - locat=self.unit_number[0], - array_free_format=False) - self.trpv = Util2d(model, (nlay,), np.float32, trpv, name='trpv', - locat=self.unit_number[0], - array_free_format=False) - - # Multi-species and multi-diffusion, hence the complexity - self.dmcoef = [] - shape = (nlay, 1) - utype = Util2d - nmcomp = ncomp - if multiDiff: - shape = (nlay, nrow, ncol) - utype = Util3d - nmcomp = mcomp - u2or3 = utype(model, shape, np.float32, dmcoef, - name='dmcoef1', locat=self.unit_number[0], - array_free_format=False) - self.dmcoef.append(u2or3) - for icomp in range(2, nmcomp + 1): - name = "dmcoef" + str(icomp) - val = 0.0 - if name in list(kwargs.keys()): - val = kwargs.pop(name) - else: - print("DSP: setting dmcoef for component " + - str(icomp) + " to zero, kwarg name " + - name) - u2or3 = utype(model, shape, np.float32, val, - name=name, locat=self.unit_number[0], - array_free_format=False) - self.dmcoef.append(u2or3) - - if len(list(kwargs.keys())) > 0: - raise Exception("DSP error: unrecognized kwargs: " + - ' '.join(list(kwargs.keys()))) - self.parent.add_package(self) - return - - def write_file(self): - """ - Write the package file - - Returns - ------- - None - - """ - # Get size - nrow = self.parent.nrow - ncol = self.parent.ncol - nlay = self.parent.nlay - - # Open file for writing - f_dsp = open(self.fn_path, 'w') - - # Write multidiffusion keyword - if self.multiDiff: - f_dsp.write('$ MultiDiffusion\n') - - # Write arrays - f_dsp.write(self.al.get_file_entry()) - f_dsp.write(self.trpt.get_file_entry()) - f_dsp.write(self.trpv.get_file_entry()) - f_dsp.write(self.dmcoef[0].get_file_entry()) - if self.multiDiff: - for i in range(1, len(self.dmcoef)): - f_dsp.write(self.dmcoef[i].get_file_entry()) - f_dsp.close() - return - - @staticmethod - def load(f, model, nlay=None, nrow=None, ncol=None, ext_unit_dict=None): - """ - Load an existing package. - - Parameters - ---------- - f : filename or file handle - File to load. - model : model object - The model object (of type :class:`flopy.mt3d.mt.Mt3dms`) to - which this package will be added. - nlay : int - number of model layers. If None it will be retrieved from the - model. - nrow : int - number of model rows. If None it will be retrieved from the - model. - ncol : int - number of model columns. If None it will be retrieved from the - model. - ext_unit_dict : dictionary, optional - If the arrays in the file are specified using EXTERNAL, - or older style array control records, then `f` should be a file - handle. In this case ext_unit_dict is required, which can be - constructed using the function - :class:`flopy.utils.mfreadnam.parsenamefile`. - - Returns - ------- - adv : Mt3dDsp object - Mt3dDsp object. - - Examples - -------- - - >>> import flopy - >>> mt = flopy.mt3d.Mt3dms() - >>> dsp = flopy.mt3d.Mt3dAdv.load('test.dsp', m) - - """ - - if model.verbose: - sys.stdout.write('loading dsp package file...\n') - - # Set dimensions if necessary - if nlay is None: - nlay = model.nlay - if nrow is None: - nrow = model.nrow - if ncol is None: - ncol = model.ncol - - # Open file, if necessary - openfile = not hasattr(f, 'read') - if openfile: - filename = f - f = open(filename, 'r') - - # Dataset 0 -- comment line - imsd = 0 - while True: - line = f.readline() - if line.strip() == '': - continue - elif line[0] == '#': - continue - elif line[0] == '$': - imsd = 1 - break - else: - break - - # Check for keywords (multidiffusion) - multiDiff = False - if imsd == 1: - keywords = line[1:].strip().split() - for k in keywords: - if k.lower() == 'multidiffusion': - multiDiff = True - else: - # go back to beginning of file - f.seek(0, 0) - - # Read arrays - if model.verbose: - print(' loading AL...') - al = Util3d.load(f, model, (nlay, nrow, ncol), np.float32, 'al', - ext_unit_dict, array_format="mt3d") - - if model.verbose: - print(' loading TRPT...') - trpt = Util2d.load(f, model, (nlay,), np.float32, 'trpt', - ext_unit_dict, array_format="mt3d", - array_free_format=False) - - if model.verbose: - print(' loading TRPV...') - trpv = Util2d.load(f, model, (nlay,), np.float32, 'trpv', - ext_unit_dict, array_format="mt3d", - array_free_format=False) - - if model.verbose: - print(' loading DMCOEFF...') - kwargs = {} - dmcoef = [] - if multiDiff: - dmcoef = Util3d.load(f, model, (nlay, nrow, ncol), np.float32, - 'dmcoef1', ext_unit_dict, array_format="mt3d") - if model.mcomp > 1: - for icomp in range(2, model.mcomp + 1): - name = "dmcoef" + str(icomp) - u3d = Util3d.load(f, model, (nlay, nrow, ncol), np.float32, - name, ext_unit_dict, array_format="mt3d") - kwargs[name] = u3d - - - else: - dmcoef = Util2d.load(f, model, (nlay,), np.float32, - 'dmcoef1', ext_unit_dict, array_format="mt3d") - # if model.mcomp > 1: - # for icomp in range(2, model.mcomp + 1): - # name = "dmcoef" + str(icomp + 1) - # u2d = Util2d.load(f, model, (nlay,), np.float32, name, - # ext_unit_dict, array_format="mt3d") - # kwargs[name] = u2d - - if openfile: - f.close() - - # set package unit number - unitnumber = None - filenames = [None] - if ext_unit_dict is not None: - unitnumber, filenames[0] = \ - model.get_ext_dict_attr(ext_unit_dict, - filetype=Mt3dDsp.ftype()) - - dsp = Mt3dDsp(model, al=al, trpt=trpt, trpv=trpv, dmcoef=dmcoef, - multiDiff=multiDiff, unitnumber=unitnumber, - filenames=filenames, **kwargs) - return dsp - - @staticmethod - def ftype(): - return 'DSP' - - @staticmethod - def defaultunit(): - return 33 - - @staticmethod - def reservedunit(): - return 3 +import sys +import numpy as np +from ..pakbase import Package +from ..utils import Util2d, Util3d + + +class Mt3dDsp(Package): + """ + MT3DMS Dispersion Package Class. + + Parameters + ---------- + model : model object + The model object (of type :class:`flopy.mt3d.mt.Mt3dms`) to which + this package will be added. + al : float or array of floats (nlay, nrow, ncol) + AL is the longitudinal dispersivity, for every cell of the model grid + (unit, L). + (default is 0.01) + trpt : float or array of floats (nlay) + s a 1D real array defining the ratio of the horizontal transverse + dispersivity to the longitudinal dispersivity. Each value + in the array corresponds to one model layer. Some recent field + studies suggest that TRPT is generally not greater than 0.1. + (default is 0.1) + trpv : float or array of floats (nlay) + is the ratio of the vertical transverse dispersivity to the + longitudinal dispersivity. Each value in the array corresponds to one + model layer. Some recent field studies suggest that TRPT is generally + not greater than 0.01. Set TRPV equal to TRPT to use the standard + isotropic dispersion model (Equation 10 in Chapter 2). Otherwise, + the modified isotropic dispersion model is used (Equation 11 in + Chapter 2). + (default is 0.01) + dmcoef : float or array of floats (nlay) or (nlay, nrow, ncol) if the + multiDiff option is used. + DMCOEF is the effective molecular diffusion coefficient (unit, L2T-1). + Set DMCOEF = 0 if the effect of molecular diffusion is considered + unimportant. Each value in the array corresponds to one model layer. + The value for dmcoef applies only to species 1. See kwargs for + entering dmcoef for other species. + (default is 1.e-9). + multiDiff : boolean + To activate the component-dependent diffusion option, a keyword + input record must be inserted to the beginning of the Dispersion + (DSP) input file. The symbol $ in the first column of an input line + signifies a keyword input record containing one or more predefined + keywords. Above the keyword input record, comment lines marked by the + symbol # in the first column are allowed. Comment lines are processed + but have no effect on the simulation. Furthermore, blank lines are + also acceptable above the keyword input record. Below the keyword + input record, the format of the DSP input file must remain unchanged + from the previous versions except for the diffusion coefficient as + explained below. If no keyword input record is specified, the input + file remains backward compatible with all previous versions of MT3DMS. + The predefined keyword for the component-dependent diffusion option + is MultiDiffusion. The keyword is case insensitive so + ''MultiDiffusion'' is equivalent to either ''Multidiffusion'' or + ''multidiffusion''. If this keyword is specified in the keyword input + record that has been inserted into the beginning of the DSP input + file, the component-dependent diffusion option has been activated and + the user needs to specify one diffusion coefficient for each mobile + solute component and at each model cell. This is done by specifying + one mobile component at a time, from the first component to the last + component (MCOMP). For each mobile component, the real array reader + utility (RARRAY) is used to input the 3-D diffusion coefficient + array, one model layer at a time. + (default is False) + extension : string + Filename extension (default is 'dsp') + unitnumber : int + File unit number (default is None). + filenames : str or list of str + Filenames to use for the package. If filenames=None the package name + will be created using the model name and package extension. If a + single string is passed the package will be set to the string. + Default is None. + kwargs : dictionary + If a multi-species simulation, then dmcoef values can be specified for + other species as dmcoef2, dmcoef3, etc. For example: + dmcoef1=1.e-10, dmcoef2=4.e-10, ... If a value is not specified, then + dmcoef is set to 0.0. + + Attributes + ---------- + + Methods + ------- + + See Also + -------- + + Notes + ----- + + Examples + -------- + + >>> import flopy + >>> m = flopy.mt3d.Mt3dms() + >>> dsp = flopy.mt3d.Mt3dDsp(m) + + """ + + def __init__(self, model, al=0.01, trpt=0.1, trpv=0.01, dmcoef=1e-9, + extension='dsp', multiDiff=False, unitnumber=None, + filenames=None, **kwargs): + + if unitnumber is None: + unitnumber = Mt3dDsp.defaultunit() + elif unitnumber == 0: + unitnumber = Mt3dDsp.reservedunit() + + # set filenames + if filenames is None: + filenames = [None] + elif isinstance(filenames, str): + filenames = [filenames] + + # Fill namefile items + name = [Mt3dDsp.ftype()] + units = [unitnumber] + extra = [''] + + # set package name + fname = [filenames[0]] + + # Call ancestor's init to set self.parent, extension, name and unit number + Package.__init__(self, model, extension=extension, name=name, + unit_number=units, extra=extra, filenames=fname) + + nrow = model.nrow + ncol = model.ncol + nlay = model.nlay + ncomp = model.ncomp + mcomp = model.mcomp + self.multiDiff = multiDiff + self.al = Util3d(model, (nlay, nrow, ncol), np.float32, al, name='al', + locat=self.unit_number[0], + array_free_format=False) + self.trpt = Util2d(model, (nlay,), np.float32, trpt, name='trpt', + locat=self.unit_number[0], + array_free_format=False) + self.trpv = Util2d(model, (nlay,), np.float32, trpv, name='trpv', + locat=self.unit_number[0], + array_free_format=False) + + # Multi-species and multi-diffusion, hence the complexity + self.dmcoef = [] + shape = (nlay, 1) + utype = Util2d + nmcomp = ncomp + if multiDiff: + shape = (nlay, nrow, ncol) + utype = Util3d + nmcomp = mcomp + u2or3 = utype(model, shape, np.float32, dmcoef, + name='dmcoef1', locat=self.unit_number[0], + array_free_format=False) + self.dmcoef.append(u2or3) + for icomp in range(2, nmcomp + 1): + name = "dmcoef" + str(icomp) + val = 0.0 + if name in list(kwargs.keys()): + val = kwargs.pop(name) + else: + print("DSP: setting dmcoef for component " + + str(icomp) + " to zero, kwarg name " + + name) + u2or3 = utype(model, shape, np.float32, val, + name=name, locat=self.unit_number[0], + array_free_format=False) + self.dmcoef.append(u2or3) + + if len(list(kwargs.keys())) > 0: + raise Exception("DSP error: unrecognized kwargs: " + + ' '.join(list(kwargs.keys()))) + self.parent.add_package(self) + return + + def write_file(self): + """ + Write the package file + + Returns + ------- + None + + """ + # Get size + nrow = self.parent.nrow + ncol = self.parent.ncol + nlay = self.parent.nlay + + # Open file for writing + f_dsp = open(self.fn_path, 'w') + + # Write multidiffusion keyword + if self.multiDiff: + f_dsp.write('$ MultiDiffusion\n') + + # Write arrays + f_dsp.write(self.al.get_file_entry()) + f_dsp.write(self.trpt.get_file_entry()) + f_dsp.write(self.trpv.get_file_entry()) + f_dsp.write(self.dmcoef[0].get_file_entry()) + if self.multiDiff: + for i in range(1, len(self.dmcoef)): + f_dsp.write(self.dmcoef[i].get_file_entry()) + f_dsp.close() + return + + @staticmethod + def load(f, model, nlay=None, nrow=None, ncol=None, ext_unit_dict=None): + """ + Load an existing package. + + Parameters + ---------- + f : filename or file handle + File to load. + model : model object + The model object (of type :class:`flopy.mt3d.mt.Mt3dms`) to + which this package will be added. + nlay : int + number of model layers. If None it will be retrieved from the + model. + nrow : int + number of model rows. If None it will be retrieved from the + model. + ncol : int + number of model columns. If None it will be retrieved from the + model. + ext_unit_dict : dictionary, optional + If the arrays in the file are specified using EXTERNAL, + or older style array control records, then `f` should be a file + handle. In this case ext_unit_dict is required, which can be + constructed using the function + :class:`flopy.utils.mfreadnam.parsenamefile`. + + Returns + ------- + adv : Mt3dDsp object + Mt3dDsp object. + + Examples + -------- + + >>> import flopy + >>> mt = flopy.mt3d.Mt3dms() + >>> dsp = flopy.mt3d.Mt3dAdv.load('test.dsp', m) + + """ + + if model.verbose: + sys.stdout.write('loading dsp package file...\n') + + # Set dimensions if necessary + if nlay is None: + nlay = model.nlay + if nrow is None: + nrow = model.nrow + if ncol is None: + ncol = model.ncol + + # Open file, if necessary + openfile = not hasattr(f, 'read') + if openfile: + filename = f + f = open(filename, 'r') + + # Dataset 0 -- comment line + imsd = 0 + while True: + line = f.readline() + if line.strip() == '': + continue + elif line[0] == '#': + continue + elif line[0] == '$': + imsd = 1 + break + else: + break + + # Check for keywords (multidiffusion) + multiDiff = False + if imsd == 1: + keywords = line[1:].strip().split() + for k in keywords: + if k.lower() == 'multidiffusion': + multiDiff = True + else: + # go back to beginning of file + f.seek(0, 0) + + # Read arrays + if model.verbose: + print(' loading AL...') + al = Util3d.load(f, model, (nlay, nrow, ncol), np.float32, 'al', + ext_unit_dict, array_format="mt3d") + + if model.verbose: + print(' loading TRPT...') + trpt = Util2d.load(f, model, (nlay,), np.float32, 'trpt', + ext_unit_dict, array_format="mt3d", + array_free_format=False) + + if model.verbose: + print(' loading TRPV...') + trpv = Util2d.load(f, model, (nlay,), np.float32, 'trpv', + ext_unit_dict, array_format="mt3d", + array_free_format=False) + + if model.verbose: + print(' loading DMCOEFF...') + kwargs = {} + dmcoef = [] + if multiDiff: + dmcoef = Util3d.load(f, model, (nlay, nrow, ncol), np.float32, + 'dmcoef1', ext_unit_dict, array_format="mt3d") + if model.mcomp > 1: + for icomp in range(2, model.mcomp + 1): + name = "dmcoef" + str(icomp) + u3d = Util3d.load(f, model, (nlay, nrow, ncol), np.float32, + name, ext_unit_dict, array_format="mt3d") + kwargs[name] = u3d + + + else: + dmcoef = Util2d.load(f, model, (nlay,), np.float32, + 'dmcoef1', ext_unit_dict, array_format="mt3d") + # if model.mcomp > 1: + # for icomp in range(2, model.mcomp + 1): + # name = "dmcoef" + str(icomp + 1) + # u2d = Util2d.load(f, model, (nlay,), np.float32, name, + # ext_unit_dict, array_format="mt3d") + # kwargs[name] = u2d + + if openfile: + f.close() + + # set package unit number + unitnumber = None + filenames = [None] + if ext_unit_dict is not None: + unitnumber, filenames[0] = \ + model.get_ext_dict_attr(ext_unit_dict, + filetype=Mt3dDsp.ftype()) + + dsp = Mt3dDsp(model, al=al, trpt=trpt, trpv=trpv, dmcoef=dmcoef, + multiDiff=multiDiff, unitnumber=unitnumber, + filenames=filenames, **kwargs) + return dsp + + @staticmethod + def ftype(): + return 'DSP' + + @staticmethod + def defaultunit(): + return 33 + + @staticmethod + def reservedunit(): + return 3 diff --git a/flopy/mt3d/mtgcg.py b/flopy/mt3d/mtgcg.py index 696703605d..bed6c106a9 100644 --- a/flopy/mt3d/mtgcg.py +++ b/flopy/mt3d/mtgcg.py @@ -1,236 +1,236 @@ -import sys -from ..pakbase import Package - - -class Mt3dGcg(Package): - """ - MT3DMS Generalized Conjugate Gradient Package Class. - - Parameters - ---------- - model : model object - The model object (of type :class:`flopy.mt3d.mt.Mt3dms`) to which - this package will be added. - mxiter : int - is the maximum number of outer iterations; it should be set to an - integer greater than one only when a nonlinear sorption isotherm is - included in simulation. (default is 1) - iter1 : int - is the maximum number of inner iterations; a value of 30-50 should be - adequate for most problems. (default is 50) - isolve : int - is the type of preconditioners to be used with the Lanczos/ORTHOMIN - acceleration scheme: - = 1, Jacobi - = 2, SSOR - = 3, Modified Incomplete Cholesky (MIC) (MIC usually converges faster, - but it needs significantly more memory) - (default is 3) - ncrs : int - is an integer flag for treatment of dispersion tensor cross terms: - = 0, lump all dispersion cross terms to the right-hand-side - (approximate but highly efficient). = 1, include full dispersion - tensor (memory intensive). - (default is 0) - accl : float - is the relaxation factor for the SSOR option; a value of 1.0 is - generally adequate. - (default is 1) - cclose : float - is the convergence criterion in terms of relative concentration; a - real value between 10-4 and 10-6 is generally adequate. - (default is 1.E-5) - iprgcg : int - IPRGCG is the interval for printing the maximum concentration changes - of each iteration. Set IPRGCG to zero as default for printing at the - end of each stress period. - (default is 0) - extension : string - Filename extension (default is 'gcg') - unitnumber : int - File unit number (default is None). - filenames : str or list of str - Filenames to use for the package. If filenames=None the package name - will be created using the model name and package extension. If a - single string is passed the package will be set to the string. - Default is None. - - Attributes - ---------- - - Methods - ------- - - See Also - -------- - - Notes - ----- - - Examples - -------- - - >>> import flopy - >>> m = flopy.mt3d.Mt3dms() - >>> gcg = flopy.mt3d.Mt3dGcg(m) - - """ - unitnumber = 35 - - def __init__(self, model, mxiter=1, iter1=50, isolve=3, ncrs=0, - accl=1, cclose=1e-5, iprgcg=0, extension='gcg', - unitnumber=None, filenames=None): - - if unitnumber is None: - unitnumber = Mt3dGcg.defaultunit() - elif unitnumber == 0: - unitnumber = Mt3dGcg.reservedunit() - - # set filenames - if filenames is None: - filenames = [None] - elif isinstance(filenames, str): - filenames = [filenames] - - # Fill namefile items - name = [Mt3dGcg.ftype()] - units = [unitnumber] - extra = [''] - - # set package name - fname = [filenames[0]] - - # Call ancestor's init to set self.parent, extension, name and unit number - Package.__init__(self, model, extension=extension, name=name, - unit_number=units, extra=extra, filenames=fname) - - self.mxiter = mxiter - self.iter1 = iter1 - self.isolve = isolve - self.ncrs = ncrs - self.accl = accl - self.cclose = cclose - self.iprgcg = iprgcg - self.parent.add_package(self) - return - - def write_file(self): - """ - Write the package file - - Returns - ------- - None - - """ - # Open file for writing - f_gcg = open(self.fn_path, 'w') - f_gcg.write('{} {} {} {}\n'.format(self.mxiter, self.iter1, - self.isolve, self.ncrs)) - f_gcg.write('{} {} {}\n'.format(self.accl, self.cclose, self.iprgcg)) - f_gcg.close() - return - - @staticmethod - def load(f, model, ext_unit_dict=None): - """ - Load an existing package. - - Parameters - ---------- - f : filename or file handle - File to load. - model : model object - The model object (of type :class:`flopy.mt3d.mt.Mt3dms`) to - which this package will be added. - ext_unit_dict : dictionary, optional - If the arrays in the file are specified using EXTERNAL, - or older style array control records, then `f` should be a file - handle. In this case ext_unit_dict is required, which can be - constructed using the function - :class:`flopy.utils.mfreadnam.parsenamefile`. - - Returns - ------- - gcg : Mt3dGcg object - Mt3dGcg object. - - Examples - -------- - - >>> import flopy - >>> mt = flopy.mt3d.Mt3dms() - >>> gcg = flopy.mt3d.Mt3dGcg.load('test.gcg', m) - - """ - - if model.verbose: - sys.stdout.write('loading gcg package file...\n') - - # Open file, if necessary - openfile = not hasattr(f, 'read') - if openfile: - filename = f - f = open(filename, 'r') - - # Dataset 0 -- comment line - while True: - line = f.readline() - if line[0] != '#': - break - - # Item F1: MIXELM, PERCEL, MXPART, NADVFD - line already read above - if model.verbose: - print(' loading MXITER, ITER1, ISOLVE, NCRS...') - t = line.strip().split() - mxiter = int(t[0]) - iter1 = int(t[1]) - isolve = int(t[2]) - ncrs = int(t[3]) - if model.verbose: - print(' MXITER {}'.format(mxiter)) - print(' ITER1 {}'.format(iter1)) - print(' ISOLVE {}'.format(isolve)) - print(' NCRS {}'.format(ncrs)) - - # Item F2: ACCL, CCLOSE, IPRGCG - if model.verbose: - print(' loading ACCL, CCLOSE, IPRGCG...') - line = f.readline() - t = line.strip().split() - accl = float(t[0]) - cclose = float(t[1]) - iprgcg = int(t[2]) - if model.verbose: - print(' ACCL {}'.format(accl)) - print(' CCLOSE {}'.format(cclose)) - print(' IPRGCG {}'.format(iprgcg)) - - if openfile: - f.close() - - # set package unit number - unitnumber = None - filenames = [None] - if ext_unit_dict is not None: - unitnumber, filenames[0] = \ - model.get_ext_dict_attr(ext_unit_dict, - filetype=Mt3dGcg.ftype()) - - # Construct and return gcg package - gcg = Mt3dGcg(model, mxiter=mxiter, iter1=iter1, isolve=isolve, - ncrs=ncrs, accl=accl, cclose=cclose, iprgcg=iprgcg, - unitnumber=unitnumber, filenames=filenames) - return gcg - - @staticmethod - def ftype(): - return 'GCG' - - @staticmethod - def defaultunit(): - return 35 - - @staticmethod - def reservedunit(): - return 9 +import sys +from ..pakbase import Package + + +class Mt3dGcg(Package): + """ + MT3DMS Generalized Conjugate Gradient Package Class. + + Parameters + ---------- + model : model object + The model object (of type :class:`flopy.mt3d.mt.Mt3dms`) to which + this package will be added. + mxiter : int + is the maximum number of outer iterations; it should be set to an + integer greater than one only when a nonlinear sorption isotherm is + included in simulation. (default is 1) + iter1 : int + is the maximum number of inner iterations; a value of 30-50 should be + adequate for most problems. (default is 50) + isolve : int + is the type of preconditioners to be used with the Lanczos/ORTHOMIN + acceleration scheme: + = 1, Jacobi + = 2, SSOR + = 3, Modified Incomplete Cholesky (MIC) (MIC usually converges faster, + but it needs significantly more memory) + (default is 3) + ncrs : int + is an integer flag for treatment of dispersion tensor cross terms: + = 0, lump all dispersion cross terms to the right-hand-side + (approximate but highly efficient). = 1, include full dispersion + tensor (memory intensive). + (default is 0) + accl : float + is the relaxation factor for the SSOR option; a value of 1.0 is + generally adequate. + (default is 1) + cclose : float + is the convergence criterion in terms of relative concentration; a + real value between 10-4 and 10-6 is generally adequate. + (default is 1.E-5) + iprgcg : int + IPRGCG is the interval for printing the maximum concentration changes + of each iteration. Set IPRGCG to zero as default for printing at the + end of each stress period. + (default is 0) + extension : string + Filename extension (default is 'gcg') + unitnumber : int + File unit number (default is None). + filenames : str or list of str + Filenames to use for the package. If filenames=None the package name + will be created using the model name and package extension. If a + single string is passed the package will be set to the string. + Default is None. + + Attributes + ---------- + + Methods + ------- + + See Also + -------- + + Notes + ----- + + Examples + -------- + + >>> import flopy + >>> m = flopy.mt3d.Mt3dms() + >>> gcg = flopy.mt3d.Mt3dGcg(m) + + """ + unitnumber = 35 + + def __init__(self, model, mxiter=1, iter1=50, isolve=3, ncrs=0, + accl=1, cclose=1e-5, iprgcg=0, extension='gcg', + unitnumber=None, filenames=None): + + if unitnumber is None: + unitnumber = Mt3dGcg.defaultunit() + elif unitnumber == 0: + unitnumber = Mt3dGcg.reservedunit() + + # set filenames + if filenames is None: + filenames = [None] + elif isinstance(filenames, str): + filenames = [filenames] + + # Fill namefile items + name = [Mt3dGcg.ftype()] + units = [unitnumber] + extra = [''] + + # set package name + fname = [filenames[0]] + + # Call ancestor's init to set self.parent, extension, name and unit number + Package.__init__(self, model, extension=extension, name=name, + unit_number=units, extra=extra, filenames=fname) + + self.mxiter = mxiter + self.iter1 = iter1 + self.isolve = isolve + self.ncrs = ncrs + self.accl = accl + self.cclose = cclose + self.iprgcg = iprgcg + self.parent.add_package(self) + return + + def write_file(self): + """ + Write the package file + + Returns + ------- + None + + """ + # Open file for writing + f_gcg = open(self.fn_path, 'w') + f_gcg.write('{} {} {} {}\n'.format(self.mxiter, self.iter1, + self.isolve, self.ncrs)) + f_gcg.write('{} {} {}\n'.format(self.accl, self.cclose, self.iprgcg)) + f_gcg.close() + return + + @staticmethod + def load(f, model, ext_unit_dict=None): + """ + Load an existing package. + + Parameters + ---------- + f : filename or file handle + File to load. + model : model object + The model object (of type :class:`flopy.mt3d.mt.Mt3dms`) to + which this package will be added. + ext_unit_dict : dictionary, optional + If the arrays in the file are specified using EXTERNAL, + or older style array control records, then `f` should be a file + handle. In this case ext_unit_dict is required, which can be + constructed using the function + :class:`flopy.utils.mfreadnam.parsenamefile`. + + Returns + ------- + gcg : Mt3dGcg object + Mt3dGcg object. + + Examples + -------- + + >>> import flopy + >>> mt = flopy.mt3d.Mt3dms() + >>> gcg = flopy.mt3d.Mt3dGcg.load('test.gcg', m) + + """ + + if model.verbose: + sys.stdout.write('loading gcg package file...\n') + + # Open file, if necessary + openfile = not hasattr(f, 'read') + if openfile: + filename = f + f = open(filename, 'r') + + # Dataset 0 -- comment line + while True: + line = f.readline() + if line[0] != '#': + break + + # Item F1: MIXELM, PERCEL, MXPART, NADVFD - line already read above + if model.verbose: + print(' loading MXITER, ITER1, ISOLVE, NCRS...') + t = line.strip().split() + mxiter = int(t[0]) + iter1 = int(t[1]) + isolve = int(t[2]) + ncrs = int(t[3]) + if model.verbose: + print(' MXITER {}'.format(mxiter)) + print(' ITER1 {}'.format(iter1)) + print(' ISOLVE {}'.format(isolve)) + print(' NCRS {}'.format(ncrs)) + + # Item F2: ACCL, CCLOSE, IPRGCG + if model.verbose: + print(' loading ACCL, CCLOSE, IPRGCG...') + line = f.readline() + t = line.strip().split() + accl = float(t[0]) + cclose = float(t[1]) + iprgcg = int(t[2]) + if model.verbose: + print(' ACCL {}'.format(accl)) + print(' CCLOSE {}'.format(cclose)) + print(' IPRGCG {}'.format(iprgcg)) + + if openfile: + f.close() + + # set package unit number + unitnumber = None + filenames = [None] + if ext_unit_dict is not None: + unitnumber, filenames[0] = \ + model.get_ext_dict_attr(ext_unit_dict, + filetype=Mt3dGcg.ftype()) + + # Construct and return gcg package + gcg = Mt3dGcg(model, mxiter=mxiter, iter1=iter1, isolve=isolve, + ncrs=ncrs, accl=accl, cclose=cclose, iprgcg=iprgcg, + unitnumber=unitnumber, filenames=filenames) + return gcg + + @staticmethod + def ftype(): + return 'GCG' + + @staticmethod + def defaultunit(): + return 35 + + @staticmethod + def reservedunit(): + return 9 diff --git a/flopy/mt3d/mtphc.py b/flopy/mt3d/mtphc.py index 9a514ea7b1..a34ceb01af 100644 --- a/flopy/mt3d/mtphc.py +++ b/flopy/mt3d/mtphc.py @@ -1,110 +1,110 @@ -from ..pakbase import Package - - -class Mt3dPhc(Package): - """ - PHC package class for PHT3D - """ - unitnumber = 38 - - def __init__(self, model, os=2, temp=25, asbin=0, eps_aqu=0, eps_ph=0, - scr_output=1, cb_offset=0, smse=['pH', 'pe'], mine=[], ie=[], - surf=[], mobkin=[], minkin=[], surfkin=[], imobkin=[], - extension='phc', unitnumber=None, filenames=None): - - if unitnumber is None: - unitnumber = Mt3dPhc.defaultunit() - elif unitnumber == 0: - unitnumber = Mt3dPhc.reservedunit() - - # set filenames - if filenames is None: - filenames = [None] - elif isinstance(filenames, str): - filenames = [filenames] - - # Fill namefile items - name = [Mt3dPhc.ftype()] - units = [unitnumber] - extra = [''] - - # set package name - fname = [filenames[0]] - - # Call ancestor's init to set self.parent, extension, name and unit number - Package.__init__(self, model, extension=extension, name=name, - unit_number=units, extra=extra, filenames=fname) - - self.os = os - self.temp = temp - self.asbin = asbin - self.eps_aqu = eps_aqu - self.eps_ph = eps_ph - self.scr_output = scr_output - self.cb_offset = cb_offset - self.smse = smse - self.nsmse = len(self.smse) - self.mine = mine - self.nmine = len(self.mine) - self.ie = ie - self.nie = len(self.ie) - self.surf = surf - self.nsurf = len(self.surf) - self.mobkin = mobkin - self.nmobkin = len(self.mobkin) - self.minkin = minkin[0] - self.nminkin = len(self.minkin) - self.minkin_parms = minkin[1] - self.surfkin = surfkin - self.nsurfkin = len(self.surfkin) - self.imobkin = imobkin - self.nimobkin = len(self.imobkin) - self.parent.add_package(self) - return - - def __repr__(self): - return 'PHC package class for PHT3D' - - def write_file(self): - """ - Write the package file - - Returns - ------- - None - - """ - # Open file for writing - f_phc = open(self.fn_path, 'w') - f_phc.write('%3d%10f%3d%10f%10f%3d\n' % (self.os, self.temp, - self.asbin, self.eps_aqu, - self.eps_ph, self.scr_output)) - f_phc.write('%10f\n' % (self.cb_offset)) - f_phc.write('%3d\n' % (self.nsmse)) - f_phc.write('%3d\n' % (self.nmine)) - f_phc.write('%3d\n' % (self.nie)) - f_phc.write('%3d\n' % (self.nsurf)) - f_phc.write('%3d%3d%3d%3d\n' % (self.nmobkin, self.nminkin, - self.nsurfkin, self.nimobkin)) - for s in self.smse: - f_phc.write('%s\n' % (s)) - i = 0 - for m in self.minkin: - f_phc.write('%s %d\n' % (m, len(self.minkin_parms[i]))) - for n in self.minkin_parms[i]: - f_phc.write('\t%10f\n' % (n)) - i = i + 1 - f_phc.close() - return - - @staticmethod - def ftype(): - return 'PHC' - - @staticmethod - def defaultunit(): - return 38 - - @staticmethod - def reservedunit(): - return 38 +from ..pakbase import Package + + +class Mt3dPhc(Package): + """ + PHC package class for PHT3D + """ + unitnumber = 38 + + def __init__(self, model, os=2, temp=25, asbin=0, eps_aqu=0, eps_ph=0, + scr_output=1, cb_offset=0, smse=['pH', 'pe'], mine=[], ie=[], + surf=[], mobkin=[], minkin=[], surfkin=[], imobkin=[], + extension='phc', unitnumber=None, filenames=None): + + if unitnumber is None: + unitnumber = Mt3dPhc.defaultunit() + elif unitnumber == 0: + unitnumber = Mt3dPhc.reservedunit() + + # set filenames + if filenames is None: + filenames = [None] + elif isinstance(filenames, str): + filenames = [filenames] + + # Fill namefile items + name = [Mt3dPhc.ftype()] + units = [unitnumber] + extra = [''] + + # set package name + fname = [filenames[0]] + + # Call ancestor's init to set self.parent, extension, name and unit number + Package.__init__(self, model, extension=extension, name=name, + unit_number=units, extra=extra, filenames=fname) + + self.os = os + self.temp = temp + self.asbin = asbin + self.eps_aqu = eps_aqu + self.eps_ph = eps_ph + self.scr_output = scr_output + self.cb_offset = cb_offset + self.smse = smse + self.nsmse = len(self.smse) + self.mine = mine + self.nmine = len(self.mine) + self.ie = ie + self.nie = len(self.ie) + self.surf = surf + self.nsurf = len(self.surf) + self.mobkin = mobkin + self.nmobkin = len(self.mobkin) + self.minkin = minkin[0] + self.nminkin = len(self.minkin) + self.minkin_parms = minkin[1] + self.surfkin = surfkin + self.nsurfkin = len(self.surfkin) + self.imobkin = imobkin + self.nimobkin = len(self.imobkin) + self.parent.add_package(self) + return + + def __repr__(self): + return 'PHC package class for PHT3D' + + def write_file(self): + """ + Write the package file + + Returns + ------- + None + + """ + # Open file for writing + f_phc = open(self.fn_path, 'w') + f_phc.write('%3d%10f%3d%10f%10f%3d\n' % (self.os, self.temp, + self.asbin, self.eps_aqu, + self.eps_ph, self.scr_output)) + f_phc.write('%10f\n' % (self.cb_offset)) + f_phc.write('%3d\n' % (self.nsmse)) + f_phc.write('%3d\n' % (self.nmine)) + f_phc.write('%3d\n' % (self.nie)) + f_phc.write('%3d\n' % (self.nsurf)) + f_phc.write('%3d%3d%3d%3d\n' % (self.nmobkin, self.nminkin, + self.nsurfkin, self.nimobkin)) + for s in self.smse: + f_phc.write('%s\n' % (s)) + i = 0 + for m in self.minkin: + f_phc.write('%s %d\n' % (m, len(self.minkin_parms[i]))) + for n in self.minkin_parms[i]: + f_phc.write('\t%10f\n' % (n)) + i = i + 1 + f_phc.close() + return + + @staticmethod + def ftype(): + return 'PHC' + + @staticmethod + def defaultunit(): + return 38 + + @staticmethod + def reservedunit(): + return 38 diff --git a/flopy/mt3d/mtrct.py b/flopy/mt3d/mtrct.py index 77fa91397d..4a6a90a8a4 100644 --- a/flopy/mt3d/mtrct.py +++ b/flopy/mt3d/mtrct.py @@ -1,612 +1,612 @@ -import sys -import numpy as np -from ..pakbase import Package -from ..utils import Util3d - - -class Mt3dRct(Package): - """ - Chemical reaction package class. - - Parameters - ---------- - model : model object - The model object (of type :class:`flopy.mt3dms.mt.Mt3dms`) to which - this package will be added. - isothm : int - isothm is a flag indicating which type of sorption (or dual-domain mass - transfer) is simulated: isothm = 0, no sorption is simulated; - isothm = 1, linear isotherm (equilibrium-controlled); isothm = 2, - Freundlich isotherm (equilibrium-controlled); isothm = 3, Langmuir - isotherm (equilibrium-controlled); isothm = 4, first-order kinetic - sorption (nonequilibrium); isothm = 5, dual-domain mass transfer - (without sorption); isothm = 6, dual-domain mass transfer - (with sorption). (default is 0). - ireact : int - ireact is a flag indicating which type of kinetic rate reaction is - simulated: ireact = 0, no kinetic rate reaction is simulated; - ireact = 1, first-order irreversible reaction, ireact = 100, - zero-order reactions (decay or production). Note that this reaction - package is not intended for modeling chemical reactions between - species. An add-on reaction package developed specifically for that - purpose may be used. (default is 0). - igetsc : int - igetsc is an integer flag indicating whether the initial concentration - for the nonequilibrium sorbed or immobile phase of all species should - be read when nonequilibrium sorption (isothm = 4) or dual-domain mass - transfer (isothm = 5 or 6) is simulated: igetsc = 0, the initial - concentration for the sorbed or immobile phase is not read. By default, - the sorbed phase is assumed to be in equilibrium with the dissolved - phase (isothm = 4), and the immobile domain is assumed to have zero - concentration (isothm = 5 or 6). igetsc > 0, the initial concentration - for the sorbed phase or immobile liquid phase of all species will be - read. (default is 1). - rhob : float or array of floats (nlay, nrow, ncol) - rhob is the bulk density of the aquifer medium (unit, ML-3). rhob is - used if isothm = 1, 2, 3, 4, or 6. If rhob is not user-specified and - isothm is not 5 then rhob is set to 1.8e3. (default is None) - prsity2 : float or array of floats (nlay, nrow, ncol) - prsity2 is the porosity of the immobile domain (the ratio of pore - spaces filled with immobile fluids over the bulk volume of the aquifer - medium) when the simulation is intended to represent a dual-domain - system. prsity2 is used if isothm = 5 or 6. If prsity2 is not user- - specified and isothm = 5 or 6 then prsity2 is set to 0.1. - (default is None) - srconc : float or array of floats (nlay, nrow, ncol) - srconc is the user-specified initial concentration for the sorbed phase - of the first species if isothm = 4 (unit, MM-1). Note that for - equilibrium-controlled sorption, the initial concentration for the - sorbed phase cannot be specified. srconc is the user-specified initial - concentration of the first species for the immobile liquid phase if - isothm = 5 or 6 (unit, ML-3). If srconc is not user-specified and - isothm = 4, 5, or 6 then srconc is set to 0. (default is None). - sp1 : float or array of floats (nlay, nrow, ncol) - sp1 is the first sorption parameter for the first species. The use of - sp1 depends on the type of sorption selected (the value of isothm). - For linear sorption (isothm = 1) and nonequilibrium sorption (isothm = - 4), sp1 is the distribution coefficient (Kd) (unit, L3M-1). For - Freundlich sorption (isothm = 2), sp1 is the Freundlich equilibrium - constant (Kf) (the unit depends on the Freundlich exponent a). For - Langmuir sorption (isothm = 3), sp1 is the Langmuir equilibrium - constant (Kl) (unit, L3M-1 ). For dual-domain mass transfer without - sorption (isothm = 5), sp1 is not used, but still must be entered. For - dual-domain mass transfer with sorption (isothm = 6), sp1 is also the - distribution coefficient (Kd) (unit, L3M-1). If sp1 is not specified - and isothm > 0 then sp1 is set to 0. (default is None). - sp2 : float or array of floats (nlay, nrow, ncol) - sp2 is the second sorption or dual-domain model parameter for the first - species. The use of sp2 depends on the type of sorption or dual-domain - model selected. For linear sorption (isothm = 1), sp2 is read but not - used. For Freundlich sorption (isothm = 2), sp2 is the Freundlich - exponent a. For Langmuir sorption (isothm = 3), sp2 is the total - concentration of the sorption sites available ( S ) (unit, MM-1). For - nonequilibrium sorption (isothm = 4), sp2 is the first-order mass - transfer rate between the dissolved and sorbed phases (unit, T-1). For - dual-domain mass transfer (isothm = 5 or 6), sp2 is the first-order - mass transfer rate between the two domains (unit, T-1). If sp2 is not - specified and isothm > 0 then sp2 is set to 0. (default is None). - rc1 : float or array of floats (nlay, nrow, ncol) - rc1 is the first-order reaction rate for the dissolved (liquid) phase - for the first species (unit, T-1). rc1 is not used ireact = 0. If a - dual-domain system is simulated, the reaction rates for the liquid - phase in the mobile and immobile domains are assumed to be equal. If - rc1 is not specified and ireact > 0 then rc1 is set to 0. - (default is None). - rc2 : float or array of floats (nlay, nrow, ncol) - rc2 is the first-order reaction rate for the sorbed phase for the first - species (unit, T-1). rc2 is not used ireact = 0. If a dual-domain - system is simulated, the reaction rates for the sorbed phase in the - mobile and immobile domains are assumed to be equal. Generally, if the - reaction is radioactive decay, rc2 should be set equal to rc1, while - for biodegradation, rc2 may be different from rc1. Note that rc2 is - read but not used, if no sorption is included in the simulation. If - rc2 is not specified and ireact > 0 then rc2 is set to 0. - (default is None). - extension : string - Filename extension (default is 'rct') - unitnumber : int - File unit number. If file unit number is None then an unused unit - number if used. (default is None). - - Other Parameters - ---------------- - srconcn : float or array of floats (nlay, nrow, ncol) - srconcn is the user-specified initial concentration for the sorbed - phase of species n. If srconcn is not passed as a **kwarg and - isothm = 4, 5, or 6 then srconc for species n is set to 0. - See description of srconc for a more complete description of srconcn. - sp1n : float or array of floats (nlay, nrow, ncol) - sp1n is the first sorption parameter for species n. If sp1n is not - passed as a **kwarg and isothm > 0 then sp1 for species n is set to 0. - See description of sp1 for a more complete description of sp1n. - sp2n : float or array of floats (nlay, nrow, ncol) - sp2n is the second sorption or dual-domain model parameter for species - n. If sp2n is not passed as a **kwarg and isothm > 0 then sp2 for - species n is set to 0. See description of sp2 for a more complete - description of sp2n. - rc1n : float or array of floats (nlay, nrow, ncol) - rc1n is the first-order reaction rate for the dissolved (liquid) phase - for species n. If rc1n is not passed as a **kwarg and ireact > 0 then - rc1 for species n is set to 0. See description of rc1 for a more - complete description of rc1n. - rc2n : float or array of floats (nlay, nrow, ncol) - rc2n is the first-order reaction rate for the sorbed phase for species - n. If rc2n is not passed as a **kwarg and ireact > 0 then rc2 for - species n is set to 0. See description of rc2 for a more complete - description of rc2n. - - - Attributes - ---------- - - Methods - ------- - - See Also - -------- - - Notes - ----- - - Examples - -------- - - >>> import flopy - >>> mt = flopy.mt3dms.Mt3dms() - >>> rct = flopy.mt3dms.Mt3dRct(mt) - - """ - - def __init__(self, model, isothm=0, ireact=0, igetsc=1, rhob=None, - prsity2=None, srconc=None, sp1=None, sp2=None, rc1=None, - rc2=None, extension='rct', unitnumber=None, - filenames=None, **kwargs): - """ - Package constructor. - - """ - - if unitnumber is None: - unitnumber = Mt3dRct.defaultunit() - elif unitnumber == 0: - unitnumber = Mt3dRct.reservedunit() - - # set filenames - if filenames is None: - filenames = [None] - elif isinstance(filenames, str): - filenames = [filenames] - - # Fill namefile items - name = [Mt3dRct.ftype()] - units = [unitnumber] - extra = [''] - - # set package name - fname = [filenames[0]] - - # Call ancestor's init to set self.parent, extension, name and unit number - Package.__init__(self, model, extension=extension, name=name, - unit_number=units, extra=extra, filenames=fname) - - nrow = model.nrow - ncol = model.ncol - nlay = model.nlay - ncomp = model.ncomp - - # Item E1: ISOTHM, IREACT, IRCTOP, IGETSC - self.isothm = isothm - self.ireact = ireact - self.irctop = 2 # All RCT vars are specified as 3D arrays - self.igetsc = igetsc - - # Item E2A: RHOB - if rhob is None: - rhob = 1.8e3 - self.rhob = Util3d(model, (nlay, nrow, ncol), np.float32, rhob, - name='rhob', locat=self.unit_number[0], - array_free_format=False) - - # Item E2B: PRSITY - if prsity2 is None: - prsity2 = 0.1 - self.prsity2 = Util3d(model, (nlay, nrow, ncol), np.float32, prsity2, - name='prsity2', locat=self.unit_number[0], - array_free_format=False) - - # Item E2C: SRCONC - if srconc is None: - srconc = 0.0 - self.srconc = [] - u3d = Util3d(model, (nlay, nrow, ncol), np.float32, srconc, - name='srconc1', locat=self.unit_number[0], - array_free_format=False) - self.srconc.append(u3d) - if ncomp > 1: - for icomp in range(2, ncomp + 1): - name = "srconc" + str(icomp) - val = 0.0 - if name in kwargs: - val = kwargs.pop(name) - else: - print("RCT: setting srconc for component " + - str(icomp) + " to zero, kwarg name " + - name) - u3d = Util3d(model, (nlay, nrow, ncol), np.float32, val, - name=name, locat=self.unit_number[0], - array_free_format=False) - self.srconc.append(u3d) - - # Item E3: SP1 - if sp1 is None: - sp1 = 0.0 - self.sp1 = [] - u3d = Util3d(model, (nlay, nrow, ncol), np.float32, sp1, name='sp11', - locat=self.unit_number[0], array_free_format=False) - self.sp1.append(u3d) - if ncomp > 1: - for icomp in range(2, ncomp + 1): - name = "sp1" + str(icomp) - val = 0.0 - if name in kwargs: - val = kwargs.pop(name) - else: - print("RCT: setting sp1 for component " + - str(icomp) + " to zero, kwarg name " + - name) - u3d = Util3d(model, (nlay, nrow, ncol), np.float32, val, - name=name, locat=self.unit_number[0], - array_free_format=False) - self.sp1.append(u3d) - - # Item E4: SP2 - if sp2 is None: - sp2 = 0.0 - self.sp2 = [] - u3d = Util3d(model, (nlay, nrow, ncol), np.float32, sp2, name='sp21', - locat=self.unit_number[0], array_free_format=False) - self.sp2.append(u3d) - if ncomp > 1: - for icomp in range(2, ncomp + 1): - name = "sp2" + str(icomp) - val = 0.0 - if name in kwargs: - val = kwargs.pop(name) - else: - print("RCT: setting sp2 for component " + - str(icomp) + " to zero, kwarg name " + - name) - u3d = Util3d(model, (nlay, nrow, ncol), np.float32, val, - name=name, locat=self.unit_number[0], - array_free_format=False) - self.sp2.append(u3d) - - # Item E5: RC1 - if rc1 is None: - rc1 = 0.0 - self.rc1 = [] - u3d = Util3d(model, (nlay, nrow, ncol), np.float32, rc1, name='rc11', - locat=self.unit_number[0], array_free_format=False) - self.rc1.append(u3d) - if ncomp > 1: - for icomp in range(2, ncomp + 1): - name = "rc1" + str(icomp) - val = 0.0 - if name in kwargs: - val = kwargs.pop(name) - else: - print("RCT: setting rc1 for component " + - str(icomp) + " to zero, kwarg name " + - name) - u3d = Util3d(model, (nlay, nrow, ncol), np.float32, val, - name=name, locat=self.unit_number[0], - array_free_format=False) - self.rc1.append(u3d) - - # Item E4: RC2 - if rc2 is None: - rc2 = 0.0 - self.rc2 = [] - u3d = Util3d(model, (nlay, nrow, ncol), np.float32, rc2, name='rc21', - locat=self.unit_number[0], array_free_format=False) - self.rc2.append(u3d) - if ncomp > 1: - for icomp in range(2, ncomp + 1): - name = "rc2" + str(icomp) - val = 0.0 - if name in kwargs: - val = kwargs.pop(name) - else: - print("RCT: setting rc2 for component " + - str(icomp) + " to zero, kwarg name " + - name) - u3d = Util3d(model, (nlay, nrow, ncol), np.float32, val, - name=name, locat=self.unit_number[0], - array_free_format=False) - self.rc2.append(u3d) - - # Check to make sure that all kwargs have been consumed - if len(list(kwargs.keys())) > 0: - raise Exception("RCT error: unrecognized kwargs: " + - ' '.join(list(kwargs.keys()))) - - self.parent.add_package(self) - return - - def __repr__(self): - return 'Chemical reaction package class' - - def write_file(self): - """ - Write the package file - - Returns - ------- - None - - """ - # Open file for writing - f_rct = open(self.fn_path, 'w') - f_rct.write('%10i%10i%10i%10i\n' % (self.isothm, self.ireact, - self.irctop, self.igetsc)) - if (self.isothm in [1, 2, 3, 4, 6]): - f_rct.write(self.rhob.get_file_entry()) - if (self.isothm in [5, 6]): - f_rct.write(self.prsity2.get_file_entry()) - if (self.igetsc > 0): - for icomp in range(len(self.srconc)): - f_rct.write(self.srconc[icomp].get_file_entry()) - if (self.isothm > 0): - for icomp in range(len(self.sp1)): - f_rct.write(self.sp1[icomp].get_file_entry()) - if (self.isothm > 0): - for icomp in range(len(self.sp2)): - f_rct.write(self.sp2[icomp].get_file_entry()) - if (self.ireact > 0): - for icomp in range(len(self.rc1)): - f_rct.write(self.rc1[icomp].get_file_entry()) - if (self.ireact > 0): - for icomp in range(len(self.rc2)): - f_rct.write(self.rc2[icomp].get_file_entry()) - f_rct.close() - return - - @staticmethod - def load(f, model, nlay=None, nrow=None, ncol=None, ncomp=None, - ext_unit_dict=None): - """ - Load an existing package. - - Parameters - ---------- - f : filename or file handle - File to load. - model : model object - The model object (of type :class:`flopy.mt3d.mt.Mt3dms`) to - which this package will be added. - nlay : int - Number of model layers in the reaction package. If nlay is not - specified, the number of layers in the passed model object is - used. (default is None). - nrow : int - Number of model rows in the reaction package. If nrow is not - specified, the number of rows in the passed model object is - used. (default is None). - ncol : int - Number of model columns in the reaction package. If nlay is not - specified, the number of columns in the passed model object is - used. (default is None). - ext_unit_dict : dictionary, optional - If the arrays in the file are specified using EXTERNAL, - or older style array control records, then `f` should be a file - handle. In this case ext_unit_dict is required, which can be - constructed using the function - :class:`flopy.utils.mfreadnam.parsenamefile`. - - Returns - ------- - rct : Mt3dRct object - Mt3dRct object. - - Examples - -------- - - >>> import flopy - >>> mt = flopy.mt3d.Mt3dms() - >>> rct = flopy.mt3d.Mt3dRct.load('test.rct', mt) - - """ - - if model.verbose: - sys.stdout.write('loading rct package file...\n') - - # Open file, if necessary - openfile = not hasattr(f, 'read') - if openfile: - filename = f - f = open(filename, 'r') - - # Set dimensions if necessary - if nlay is None: - nlay = model.nlay - if nrow is None: - nrow = model.nrow - if ncol is None: - ncol = model.ncol - if ncomp is None: - ncomp = model.ncomp - - # Setup kwargs to store multispecies information - kwargs = {} - - # Item E1 - line = f.readline() - if model.verbose: - print(' loading ISOTHM, IREACT, IRCTOP, IGETSC...') - isothm = int(line[0:10]) - ireact = int(line[10:20]) - try: - irctop = int(line[20:30]) - except: - irctop = 0 - try: - igetsc = int(line[30:40]) - except: - igetsc = 0 - if model.verbose: - print(' ISOTHM {}'.format(isothm)) - print(' IREACT {}'.format(ireact)) - print(' IRCTOP {}'.format(irctop)) - print(' IGETSC {}'.format(igetsc)) - - # Item E2A: RHOB - rhob = None - if model.verbose: - print(' loading RHOB...') - if isothm in [1, 2, 3, 4, 6]: - rhob = Util3d.load(f, model, (nlay, nrow, ncol), np.float32, - 'rhob', ext_unit_dict, array_format="mt3d") - if model.verbose: - print(' RHOB {}'.format(rhob)) - - # Item E2A: PRSITY2 - prsity2 = None - if model.verbose: - print(' loading PRSITY2...') - if isothm in [5, 6]: - prsity2 = Util3d.load(f, model, (nlay, nrow, ncol), np.float32, - 'prsity2', ext_unit_dict, - array_format="mt3d") - if model.verbose: - print(' PRSITY2 {}'.format(prsity2)) - - # Item E2C: SRCONC - srconc = None - if model.verbose: - print(' loading SRCONC...') - if igetsc > 0: - srconc = Util3d.load(f, model, (nlay, nrow, ncol), np.float32, - 'srconc1', ext_unit_dict, array_format="mt3d") - if model.verbose: - print(' SRCONC {}'.format(srconc)) - if ncomp > 1: - for icomp in range(2, ncomp + 1): - name = "srconc" + str(icomp) - if model.verbose: - print(' loading {}...'.format(name)) - u3d = Util3d.load(f, model, (nlay, nrow, ncol), np.float32, - name, ext_unit_dict, array_format="mt3d") - kwargs[name] = u3d - if model.verbose: - print(' SRCONC{} {}'.format(icomp, u3d)) - - # Item E3: SP1 - sp1 = None - if model.verbose: - print(' loading SP1...') - if isothm > 0: - sp1 = Util3d.load(f, model, (nlay, nrow, ncol), np.float32, - 'sp11', ext_unit_dict, array_format="mt3d") - if model.verbose: - print(' SP1 {}'.format(sp1)) - if ncomp > 1: - for icomp in range(2, ncomp + 1): - name = "sp1" + str(icomp) - if model.verbose: - print(' loading {}...'.format(name)) - u3d = Util3d.load(f, model, (nlay, nrow, ncol), np.float32, - name, ext_unit_dict, array_format="mt3d") - kwargs[name] = u3d - if model.verbose: - print(' SP1{} {}'.format(icomp, u3d)) - - # Item E4: SP2 - sp2 = None - if model.verbose: - print(' loading SP2...') - if isothm > 0: - sp2 = Util3d.load(f, model, (nlay, nrow, ncol), np.float32, - 'sp21', ext_unit_dict, array_format="mt3d") - if model.verbose: - print(' SP2 {}'.format(sp2)) - if ncomp > 1: - for icomp in range(2, ncomp + 1): - name = "sp2" + str(icomp) - if model.verbose: - print(' loading {}...'.format(name)) - u3d = Util3d.load(f, model, (nlay, nrow, ncol), np.float32, - name, ext_unit_dict, array_format="mt3d") - kwargs[name] = u3d - if model.verbose: - print(' SP2{} {}'.format(icomp, u3d)) - - # Item E5: RC1 - rc1 = None - if model.verbose: - print(' loading RC1...') - if ireact > 0: - rc1 = Util3d.load(f, model, (nlay, nrow, ncol), np.float32, - 'rc11', ext_unit_dict, - array_format="mt3d") - if model.verbose: - print(' RC1 {}'.format(rc1)) - if ncomp > 1: - for icomp in range(2, ncomp + 1): - name = "rc1" + str(icomp) - if model.verbose: - print(' loading {}...'.format(name)) - u3d = Util3d.load(f, model, (nlay, nrow, ncol), np.float32, - name, ext_unit_dict, array_format="mt3d") - kwargs[name] = u3d - if model.verbose: - print(' RC1{} {}'.format(icomp, u3d)) - - # Item E6: RC2 - rc2 = None - if model.verbose: - print(' loading RC2...') - if ireact > 0: - rc2 = Util3d.load(f, model, (nlay, nrow, ncol), np.float32, - 'rc21', ext_unit_dict, array_format="mt3d") - if model.verbose: - print(' RC2 {}'.format(rc2)) - if ncomp > 1: - for icomp in range(2, ncomp + 1): - name = "rc2" + str(icomp) - if model.verbose: - print(' loading {}...'.format(name)) - u3d = Util3d.load(f, model, (nlay, nrow, ncol), np.float32, - name, ext_unit_dict, array_format="mt3d") - kwargs[name] = u3d - if model.verbose: - print(' RC2{} {}'.format(icomp, u3d)) - - if openfile: - f.close() - - # set package unit number - unitnumber = None - filenames = [None] - if ext_unit_dict is not None: - unitnumber, filenames[0] = \ - model.get_ext_dict_attr(ext_unit_dict, - filetype=Mt3dRct.ftype()) - - # Construct and return rct package - rct = Mt3dRct(model, isothm=isothm, ireact=ireact, igetsc=igetsc, - rhob=rhob, prsity2=prsity2, srconc=srconc, sp1=sp1, - sp2=sp2, rc1=rc1, rc2=rc2, unitnumber=unitnumber, - filenames=filenames, **kwargs) - return rct - - @staticmethod - def ftype(): - return 'RCT' - - @staticmethod - def defaultunit(): - return 36 - - @staticmethod - def reservedunit(): - return 8 +import sys +import numpy as np +from ..pakbase import Package +from ..utils import Util3d + + +class Mt3dRct(Package): + """ + Chemical reaction package class. + + Parameters + ---------- + model : model object + The model object (of type :class:`flopy.mt3dms.mt.Mt3dms`) to which + this package will be added. + isothm : int + isothm is a flag indicating which type of sorption (or dual-domain mass + transfer) is simulated: isothm = 0, no sorption is simulated; + isothm = 1, linear isotherm (equilibrium-controlled); isothm = 2, + Freundlich isotherm (equilibrium-controlled); isothm = 3, Langmuir + isotherm (equilibrium-controlled); isothm = 4, first-order kinetic + sorption (nonequilibrium); isothm = 5, dual-domain mass transfer + (without sorption); isothm = 6, dual-domain mass transfer + (with sorption). (default is 0). + ireact : int + ireact is a flag indicating which type of kinetic rate reaction is + simulated: ireact = 0, no kinetic rate reaction is simulated; + ireact = 1, first-order irreversible reaction, ireact = 100, + zero-order reactions (decay or production). Note that this reaction + package is not intended for modeling chemical reactions between + species. An add-on reaction package developed specifically for that + purpose may be used. (default is 0). + igetsc : int + igetsc is an integer flag indicating whether the initial concentration + for the nonequilibrium sorbed or immobile phase of all species should + be read when nonequilibrium sorption (isothm = 4) or dual-domain mass + transfer (isothm = 5 or 6) is simulated: igetsc = 0, the initial + concentration for the sorbed or immobile phase is not read. By default, + the sorbed phase is assumed to be in equilibrium with the dissolved + phase (isothm = 4), and the immobile domain is assumed to have zero + concentration (isothm = 5 or 6). igetsc > 0, the initial concentration + for the sorbed phase or immobile liquid phase of all species will be + read. (default is 1). + rhob : float or array of floats (nlay, nrow, ncol) + rhob is the bulk density of the aquifer medium (unit, ML-3). rhob is + used if isothm = 1, 2, 3, 4, or 6. If rhob is not user-specified and + isothm is not 5 then rhob is set to 1.8e3. (default is None) + prsity2 : float or array of floats (nlay, nrow, ncol) + prsity2 is the porosity of the immobile domain (the ratio of pore + spaces filled with immobile fluids over the bulk volume of the aquifer + medium) when the simulation is intended to represent a dual-domain + system. prsity2 is used if isothm = 5 or 6. If prsity2 is not user- + specified and isothm = 5 or 6 then prsity2 is set to 0.1. + (default is None) + srconc : float or array of floats (nlay, nrow, ncol) + srconc is the user-specified initial concentration for the sorbed phase + of the first species if isothm = 4 (unit, MM-1). Note that for + equilibrium-controlled sorption, the initial concentration for the + sorbed phase cannot be specified. srconc is the user-specified initial + concentration of the first species for the immobile liquid phase if + isothm = 5 or 6 (unit, ML-3). If srconc is not user-specified and + isothm = 4, 5, or 6 then srconc is set to 0. (default is None). + sp1 : float or array of floats (nlay, nrow, ncol) + sp1 is the first sorption parameter for the first species. The use of + sp1 depends on the type of sorption selected (the value of isothm). + For linear sorption (isothm = 1) and nonequilibrium sorption (isothm = + 4), sp1 is the distribution coefficient (Kd) (unit, L3M-1). For + Freundlich sorption (isothm = 2), sp1 is the Freundlich equilibrium + constant (Kf) (the unit depends on the Freundlich exponent a). For + Langmuir sorption (isothm = 3), sp1 is the Langmuir equilibrium + constant (Kl) (unit, L3M-1 ). For dual-domain mass transfer without + sorption (isothm = 5), sp1 is not used, but still must be entered. For + dual-domain mass transfer with sorption (isothm = 6), sp1 is also the + distribution coefficient (Kd) (unit, L3M-1). If sp1 is not specified + and isothm > 0 then sp1 is set to 0. (default is None). + sp2 : float or array of floats (nlay, nrow, ncol) + sp2 is the second sorption or dual-domain model parameter for the first + species. The use of sp2 depends on the type of sorption or dual-domain + model selected. For linear sorption (isothm = 1), sp2 is read but not + used. For Freundlich sorption (isothm = 2), sp2 is the Freundlich + exponent a. For Langmuir sorption (isothm = 3), sp2 is the total + concentration of the sorption sites available ( S ) (unit, MM-1). For + nonequilibrium sorption (isothm = 4), sp2 is the first-order mass + transfer rate between the dissolved and sorbed phases (unit, T-1). For + dual-domain mass transfer (isothm = 5 or 6), sp2 is the first-order + mass transfer rate between the two domains (unit, T-1). If sp2 is not + specified and isothm > 0 then sp2 is set to 0. (default is None). + rc1 : float or array of floats (nlay, nrow, ncol) + rc1 is the first-order reaction rate for the dissolved (liquid) phase + for the first species (unit, T-1). rc1 is not used ireact = 0. If a + dual-domain system is simulated, the reaction rates for the liquid + phase in the mobile and immobile domains are assumed to be equal. If + rc1 is not specified and ireact > 0 then rc1 is set to 0. + (default is None). + rc2 : float or array of floats (nlay, nrow, ncol) + rc2 is the first-order reaction rate for the sorbed phase for the first + species (unit, T-1). rc2 is not used ireact = 0. If a dual-domain + system is simulated, the reaction rates for the sorbed phase in the + mobile and immobile domains are assumed to be equal. Generally, if the + reaction is radioactive decay, rc2 should be set equal to rc1, while + for biodegradation, rc2 may be different from rc1. Note that rc2 is + read but not used, if no sorption is included in the simulation. If + rc2 is not specified and ireact > 0 then rc2 is set to 0. + (default is None). + extension : string + Filename extension (default is 'rct') + unitnumber : int + File unit number. If file unit number is None then an unused unit + number if used. (default is None). + + Other Parameters + ---------------- + srconcn : float or array of floats (nlay, nrow, ncol) + srconcn is the user-specified initial concentration for the sorbed + phase of species n. If srconcn is not passed as a **kwarg and + isothm = 4, 5, or 6 then srconc for species n is set to 0. + See description of srconc for a more complete description of srconcn. + sp1n : float or array of floats (nlay, nrow, ncol) + sp1n is the first sorption parameter for species n. If sp1n is not + passed as a **kwarg and isothm > 0 then sp1 for species n is set to 0. + See description of sp1 for a more complete description of sp1n. + sp2n : float or array of floats (nlay, nrow, ncol) + sp2n is the second sorption or dual-domain model parameter for species + n. If sp2n is not passed as a **kwarg and isothm > 0 then sp2 for + species n is set to 0. See description of sp2 for a more complete + description of sp2n. + rc1n : float or array of floats (nlay, nrow, ncol) + rc1n is the first-order reaction rate for the dissolved (liquid) phase + for species n. If rc1n is not passed as a **kwarg and ireact > 0 then + rc1 for species n is set to 0. See description of rc1 for a more + complete description of rc1n. + rc2n : float or array of floats (nlay, nrow, ncol) + rc2n is the first-order reaction rate for the sorbed phase for species + n. If rc2n is not passed as a **kwarg and ireact > 0 then rc2 for + species n is set to 0. See description of rc2 for a more complete + description of rc2n. + + + Attributes + ---------- + + Methods + ------- + + See Also + -------- + + Notes + ----- + + Examples + -------- + + >>> import flopy + >>> mt = flopy.mt3dms.Mt3dms() + >>> rct = flopy.mt3dms.Mt3dRct(mt) + + """ + + def __init__(self, model, isothm=0, ireact=0, igetsc=1, rhob=None, + prsity2=None, srconc=None, sp1=None, sp2=None, rc1=None, + rc2=None, extension='rct', unitnumber=None, + filenames=None, **kwargs): + """ + Package constructor. + + """ + + if unitnumber is None: + unitnumber = Mt3dRct.defaultunit() + elif unitnumber == 0: + unitnumber = Mt3dRct.reservedunit() + + # set filenames + if filenames is None: + filenames = [None] + elif isinstance(filenames, str): + filenames = [filenames] + + # Fill namefile items + name = [Mt3dRct.ftype()] + units = [unitnumber] + extra = [''] + + # set package name + fname = [filenames[0]] + + # Call ancestor's init to set self.parent, extension, name and unit number + Package.__init__(self, model, extension=extension, name=name, + unit_number=units, extra=extra, filenames=fname) + + nrow = model.nrow + ncol = model.ncol + nlay = model.nlay + ncomp = model.ncomp + + # Item E1: ISOTHM, IREACT, IRCTOP, IGETSC + self.isothm = isothm + self.ireact = ireact + self.irctop = 2 # All RCT vars are specified as 3D arrays + self.igetsc = igetsc + + # Item E2A: RHOB + if rhob is None: + rhob = 1.8e3 + self.rhob = Util3d(model, (nlay, nrow, ncol), np.float32, rhob, + name='rhob', locat=self.unit_number[0], + array_free_format=False) + + # Item E2B: PRSITY + if prsity2 is None: + prsity2 = 0.1 + self.prsity2 = Util3d(model, (nlay, nrow, ncol), np.float32, prsity2, + name='prsity2', locat=self.unit_number[0], + array_free_format=False) + + # Item E2C: SRCONC + if srconc is None: + srconc = 0.0 + self.srconc = [] + u3d = Util3d(model, (nlay, nrow, ncol), np.float32, srconc, + name='srconc1', locat=self.unit_number[0], + array_free_format=False) + self.srconc.append(u3d) + if ncomp > 1: + for icomp in range(2, ncomp + 1): + name = "srconc" + str(icomp) + val = 0.0 + if name in kwargs: + val = kwargs.pop(name) + else: + print("RCT: setting srconc for component " + + str(icomp) + " to zero, kwarg name " + + name) + u3d = Util3d(model, (nlay, nrow, ncol), np.float32, val, + name=name, locat=self.unit_number[0], + array_free_format=False) + self.srconc.append(u3d) + + # Item E3: SP1 + if sp1 is None: + sp1 = 0.0 + self.sp1 = [] + u3d = Util3d(model, (nlay, nrow, ncol), np.float32, sp1, name='sp11', + locat=self.unit_number[0], array_free_format=False) + self.sp1.append(u3d) + if ncomp > 1: + for icomp in range(2, ncomp + 1): + name = "sp1" + str(icomp) + val = 0.0 + if name in kwargs: + val = kwargs.pop(name) + else: + print("RCT: setting sp1 for component " + + str(icomp) + " to zero, kwarg name " + + name) + u3d = Util3d(model, (nlay, nrow, ncol), np.float32, val, + name=name, locat=self.unit_number[0], + array_free_format=False) + self.sp1.append(u3d) + + # Item E4: SP2 + if sp2 is None: + sp2 = 0.0 + self.sp2 = [] + u3d = Util3d(model, (nlay, nrow, ncol), np.float32, sp2, name='sp21', + locat=self.unit_number[0], array_free_format=False) + self.sp2.append(u3d) + if ncomp > 1: + for icomp in range(2, ncomp + 1): + name = "sp2" + str(icomp) + val = 0.0 + if name in kwargs: + val = kwargs.pop(name) + else: + print("RCT: setting sp2 for component " + + str(icomp) + " to zero, kwarg name " + + name) + u3d = Util3d(model, (nlay, nrow, ncol), np.float32, val, + name=name, locat=self.unit_number[0], + array_free_format=False) + self.sp2.append(u3d) + + # Item E5: RC1 + if rc1 is None: + rc1 = 0.0 + self.rc1 = [] + u3d = Util3d(model, (nlay, nrow, ncol), np.float32, rc1, name='rc11', + locat=self.unit_number[0], array_free_format=False) + self.rc1.append(u3d) + if ncomp > 1: + for icomp in range(2, ncomp + 1): + name = "rc1" + str(icomp) + val = 0.0 + if name in kwargs: + val = kwargs.pop(name) + else: + print("RCT: setting rc1 for component " + + str(icomp) + " to zero, kwarg name " + + name) + u3d = Util3d(model, (nlay, nrow, ncol), np.float32, val, + name=name, locat=self.unit_number[0], + array_free_format=False) + self.rc1.append(u3d) + + # Item E4: RC2 + if rc2 is None: + rc2 = 0.0 + self.rc2 = [] + u3d = Util3d(model, (nlay, nrow, ncol), np.float32, rc2, name='rc21', + locat=self.unit_number[0], array_free_format=False) + self.rc2.append(u3d) + if ncomp > 1: + for icomp in range(2, ncomp + 1): + name = "rc2" + str(icomp) + val = 0.0 + if name in kwargs: + val = kwargs.pop(name) + else: + print("RCT: setting rc2 for component " + + str(icomp) + " to zero, kwarg name " + + name) + u3d = Util3d(model, (nlay, nrow, ncol), np.float32, val, + name=name, locat=self.unit_number[0], + array_free_format=False) + self.rc2.append(u3d) + + # Check to make sure that all kwargs have been consumed + if len(list(kwargs.keys())) > 0: + raise Exception("RCT error: unrecognized kwargs: " + + ' '.join(list(kwargs.keys()))) + + self.parent.add_package(self) + return + + def __repr__(self): + return 'Chemical reaction package class' + + def write_file(self): + """ + Write the package file + + Returns + ------- + None + + """ + # Open file for writing + f_rct = open(self.fn_path, 'w') + f_rct.write('%10i%10i%10i%10i\n' % (self.isothm, self.ireact, + self.irctop, self.igetsc)) + if (self.isothm in [1, 2, 3, 4, 6]): + f_rct.write(self.rhob.get_file_entry()) + if (self.isothm in [5, 6]): + f_rct.write(self.prsity2.get_file_entry()) + if (self.igetsc > 0): + for icomp in range(len(self.srconc)): + f_rct.write(self.srconc[icomp].get_file_entry()) + if (self.isothm > 0): + for icomp in range(len(self.sp1)): + f_rct.write(self.sp1[icomp].get_file_entry()) + if (self.isothm > 0): + for icomp in range(len(self.sp2)): + f_rct.write(self.sp2[icomp].get_file_entry()) + if (self.ireact > 0): + for icomp in range(len(self.rc1)): + f_rct.write(self.rc1[icomp].get_file_entry()) + if (self.ireact > 0): + for icomp in range(len(self.rc2)): + f_rct.write(self.rc2[icomp].get_file_entry()) + f_rct.close() + return + + @staticmethod + def load(f, model, nlay=None, nrow=None, ncol=None, ncomp=None, + ext_unit_dict=None): + """ + Load an existing package. + + Parameters + ---------- + f : filename or file handle + File to load. + model : model object + The model object (of type :class:`flopy.mt3d.mt.Mt3dms`) to + which this package will be added. + nlay : int + Number of model layers in the reaction package. If nlay is not + specified, the number of layers in the passed model object is + used. (default is None). + nrow : int + Number of model rows in the reaction package. If nrow is not + specified, the number of rows in the passed model object is + used. (default is None). + ncol : int + Number of model columns in the reaction package. If nlay is not + specified, the number of columns in the passed model object is + used. (default is None). + ext_unit_dict : dictionary, optional + If the arrays in the file are specified using EXTERNAL, + or older style array control records, then `f` should be a file + handle. In this case ext_unit_dict is required, which can be + constructed using the function + :class:`flopy.utils.mfreadnam.parsenamefile`. + + Returns + ------- + rct : Mt3dRct object + Mt3dRct object. + + Examples + -------- + + >>> import flopy + >>> mt = flopy.mt3d.Mt3dms() + >>> rct = flopy.mt3d.Mt3dRct.load('test.rct', mt) + + """ + + if model.verbose: + sys.stdout.write('loading rct package file...\n') + + # Open file, if necessary + openfile = not hasattr(f, 'read') + if openfile: + filename = f + f = open(filename, 'r') + + # Set dimensions if necessary + if nlay is None: + nlay = model.nlay + if nrow is None: + nrow = model.nrow + if ncol is None: + ncol = model.ncol + if ncomp is None: + ncomp = model.ncomp + + # Setup kwargs to store multispecies information + kwargs = {} + + # Item E1 + line = f.readline() + if model.verbose: + print(' loading ISOTHM, IREACT, IRCTOP, IGETSC...') + isothm = int(line[0:10]) + ireact = int(line[10:20]) + try: + irctop = int(line[20:30]) + except: + irctop = 0 + try: + igetsc = int(line[30:40]) + except: + igetsc = 0 + if model.verbose: + print(' ISOTHM {}'.format(isothm)) + print(' IREACT {}'.format(ireact)) + print(' IRCTOP {}'.format(irctop)) + print(' IGETSC {}'.format(igetsc)) + + # Item E2A: RHOB + rhob = None + if model.verbose: + print(' loading RHOB...') + if isothm in [1, 2, 3, 4, 6]: + rhob = Util3d.load(f, model, (nlay, nrow, ncol), np.float32, + 'rhob', ext_unit_dict, array_format="mt3d") + if model.verbose: + print(' RHOB {}'.format(rhob)) + + # Item E2A: PRSITY2 + prsity2 = None + if model.verbose: + print(' loading PRSITY2...') + if isothm in [5, 6]: + prsity2 = Util3d.load(f, model, (nlay, nrow, ncol), np.float32, + 'prsity2', ext_unit_dict, + array_format="mt3d") + if model.verbose: + print(' PRSITY2 {}'.format(prsity2)) + + # Item E2C: SRCONC + srconc = None + if model.verbose: + print(' loading SRCONC...') + if igetsc > 0: + srconc = Util3d.load(f, model, (nlay, nrow, ncol), np.float32, + 'srconc1', ext_unit_dict, array_format="mt3d") + if model.verbose: + print(' SRCONC {}'.format(srconc)) + if ncomp > 1: + for icomp in range(2, ncomp + 1): + name = "srconc" + str(icomp) + if model.verbose: + print(' loading {}...'.format(name)) + u3d = Util3d.load(f, model, (nlay, nrow, ncol), np.float32, + name, ext_unit_dict, array_format="mt3d") + kwargs[name] = u3d + if model.verbose: + print(' SRCONC{} {}'.format(icomp, u3d)) + + # Item E3: SP1 + sp1 = None + if model.verbose: + print(' loading SP1...') + if isothm > 0: + sp1 = Util3d.load(f, model, (nlay, nrow, ncol), np.float32, + 'sp11', ext_unit_dict, array_format="mt3d") + if model.verbose: + print(' SP1 {}'.format(sp1)) + if ncomp > 1: + for icomp in range(2, ncomp + 1): + name = "sp1" + str(icomp) + if model.verbose: + print(' loading {}...'.format(name)) + u3d = Util3d.load(f, model, (nlay, nrow, ncol), np.float32, + name, ext_unit_dict, array_format="mt3d") + kwargs[name] = u3d + if model.verbose: + print(' SP1{} {}'.format(icomp, u3d)) + + # Item E4: SP2 + sp2 = None + if model.verbose: + print(' loading SP2...') + if isothm > 0: + sp2 = Util3d.load(f, model, (nlay, nrow, ncol), np.float32, + 'sp21', ext_unit_dict, array_format="mt3d") + if model.verbose: + print(' SP2 {}'.format(sp2)) + if ncomp > 1: + for icomp in range(2, ncomp + 1): + name = "sp2" + str(icomp) + if model.verbose: + print(' loading {}...'.format(name)) + u3d = Util3d.load(f, model, (nlay, nrow, ncol), np.float32, + name, ext_unit_dict, array_format="mt3d") + kwargs[name] = u3d + if model.verbose: + print(' SP2{} {}'.format(icomp, u3d)) + + # Item E5: RC1 + rc1 = None + if model.verbose: + print(' loading RC1...') + if ireact > 0: + rc1 = Util3d.load(f, model, (nlay, nrow, ncol), np.float32, + 'rc11', ext_unit_dict, + array_format="mt3d") + if model.verbose: + print(' RC1 {}'.format(rc1)) + if ncomp > 1: + for icomp in range(2, ncomp + 1): + name = "rc1" + str(icomp) + if model.verbose: + print(' loading {}...'.format(name)) + u3d = Util3d.load(f, model, (nlay, nrow, ncol), np.float32, + name, ext_unit_dict, array_format="mt3d") + kwargs[name] = u3d + if model.verbose: + print(' RC1{} {}'.format(icomp, u3d)) + + # Item E6: RC2 + rc2 = None + if model.verbose: + print(' loading RC2...') + if ireact > 0: + rc2 = Util3d.load(f, model, (nlay, nrow, ncol), np.float32, + 'rc21', ext_unit_dict, array_format="mt3d") + if model.verbose: + print(' RC2 {}'.format(rc2)) + if ncomp > 1: + for icomp in range(2, ncomp + 1): + name = "rc2" + str(icomp) + if model.verbose: + print(' loading {}...'.format(name)) + u3d = Util3d.load(f, model, (nlay, nrow, ncol), np.float32, + name, ext_unit_dict, array_format="mt3d") + kwargs[name] = u3d + if model.verbose: + print(' RC2{} {}'.format(icomp, u3d)) + + if openfile: + f.close() + + # set package unit number + unitnumber = None + filenames = [None] + if ext_unit_dict is not None: + unitnumber, filenames[0] = \ + model.get_ext_dict_attr(ext_unit_dict, + filetype=Mt3dRct.ftype()) + + # Construct and return rct package + rct = Mt3dRct(model, isothm=isothm, ireact=ireact, igetsc=igetsc, + rhob=rhob, prsity2=prsity2, srconc=srconc, sp1=sp1, + sp2=sp2, rc1=rc1, rc2=rc2, unitnumber=unitnumber, + filenames=filenames, **kwargs) + return rct + + @staticmethod + def ftype(): + return 'RCT' + + @staticmethod + def defaultunit(): + return 36 + + @staticmethod + def reservedunit(): + return 8 diff --git a/flopy/mt3d/mtssm.py b/flopy/mt3d/mtssm.py index fdfaed563d..8bb5f90713 100644 --- a/flopy/mt3d/mtssm.py +++ b/flopy/mt3d/mtssm.py @@ -1,736 +1,736 @@ -import sys -import numpy as np -import warnings -from ..pakbase import Package -from ..utils import Util2d, MfList, Transient2d - -# Note: Order matters as first 6 need logical flag on line 1 of SSM file -SsmLabels = ['WEL', 'DRN', 'RCH', 'EVT', 'RIV', 'GHB', 'BAS6', 'CHD', 'PBC'] - - -class SsmPackage(object): - def __init__(self, label='', instance=None, needTFstr=False): - self.label = label - self.instance = instance - self.needTFstr = needTFstr - self.TFstr = ' F' - if self.instance is not None: - self.TFstr = ' T' - - -class Mt3dSsm(Package): - """ - MT3DMS Source and Sink Mixing Package Class. - - Parameters - ---------- - model : model object - The model object (of type :class:`flopy.mt3d.mt.Mt3dms`) to which - this package will be added. - crch : Transient2d, scalar, array of floats, or dictionary - CRCH is the concentration of recharge for species 1. - If the recharge flux is positive, it acts as a source whose - concentration can be specified as desired. If the recharge flux is - negative, it acts as a sink (discharge) whose concentration is always - set equal to the concentration of groundwater at the cell where - discharge occurs. Note that the location and flow rate of - recharge/discharge are obtained from the flow model directly through - the unformatted flow-transport link file. crch can be specified as - an array, if the array is constant for the entire simulation. If - crch changes by stress period, then the user must provide a - dictionary, where the key is the stress period number (zero based) and - the value is the recharge array. The recharge concentration - can be specified for additional species by passing additional - arguments to the Mt3dSsm constructor. For example, to specify the - recharge concentration for species two one could use - crch2={0: 0., 1: 10*np.ones((nrow, ncol), dtype=np.float)} as - and additional keyword argument that is passed to Mt3dSsm when making - the ssm object. - cevt : Transient2d, scalar, array of floats, or dictionary - is the concentration of evapotranspiration flux for species 1. - Evapotranspiration is the only type of sink whose - concentration may be specified externally. Note that the - concentration of a sink cannot be greater than that of the aquifer at - the sink cell. Thus, if the sink concentration is specified greater - than that of the aquifer, it is automatically set equal to the - concentration of the aquifer. Also note that the location and flow - rate of evapotranspiration are obtained from the flow model directly - through the unformatted flow-transport link file. For multi-species - simulations, see crch for a description of how to specify - additional concentrations arrays for each species. - stress_period_data : dictionary - Keys in the dictionary are stress zero-based stress period numbers; - values in the dictionary are recarrays of SSM boundaries. The - dtype for the recarray can be obtained using ssm.dtype (after the - ssm package has been created). The default dtype for the recarray is - np.dtype([('k', np.int), ("i", np.int), ("j", np.int), - ("css", np.float32), ("itype", np.int), - ((cssms(n), np.float), n=1, ncomp)]) - If there are more than one component species, then additional entries - will be added to the dtype as indicated by cssm(n). - Note that if the number of dictionary entries is less than the number - of stress periods, then the last recarray of boundaries will apply - until the end of the simulation. Full details of all options to - specify stress_period_data can be found in the - flopy3_multi-component_SSM ipython notebook in the Notebook - subdirectory of the examples directory. - css is the specified source concentration or mass-loading rate, - depending on the value of ITYPE, in a single-species simulation, - (For a multispecies simulation, CSS is not used, but a dummy value - still needs to be entered here.) - Note that for most types of sources, CSS is interpreted as the - source concentration with the unit of mass per unit volume (ML-3), - which, when multiplied by its corresponding flow rate (L3T-1) from - the flow model, yields the mass-loading rate (MT-1) of the source. - For a special type of sources (ITYPE = 15), CSS is taken directly as - the mass-loading rate (MT-1) of the source so that no flow rate is - required from the flow model. - Furthermore, if the source is specified as a constant-concentration - cell (itype = -1), the specified value of CSS is assigned directly as - the concentration of the designated cell. If the designated cell is - also associated with a sink/source term in the flow model, the flow - rate is not used. - itype is an integer indicating the type of the point source. An itype - dictionary can be retrieved from the ssm object as - itype = mt3d.Mt3dSsm.itype_dict() - (CSSMS(n), n=1, NCOMP) defines the concentrations of a point source - for multispecies simulation with NCOMP>1. In a multispecies - simulation, it is necessary to define the concentrations of all - species associated with a point source. As an example, if a chemical - of a certain species is injected into a multispecies system, the - concentration of that species is assigned a value greater than zero - while the concentrations of all other species are assigned zero. - CSSMS(n) can be entered in free format, separated by a comma or space - between values. - Several important notes on assigning concentration for the - constant-concentration condition (ITYPE = -1) are listed below: - The constant-concentration condition defined in this input file takes - precedence to that defined in the Basic Transport Package input file. - In a multiple stress period simulation, a constant-concentration - cell, once defined, will remain a constant- concentration cell in the - duration of the simulation, but its concentration value can be - specified to vary in different stress periods. - In a multispecies simulation, if it is only necessary to define - different constant-concentration conditions for selected species at - the same cell location, specify the desired concentrations for those - species, and assign a negative value for all other species. The - negative value is a flag used by MT3DMS to skip assigning the - constant-concentration condition for the designated species. - dtype : np.dtype - dtype to use for the recarray of boundaries. If left as None (the - default) then the dtype will be automatically constructed. - extension : string - Filename extension (default is 'ssm') - unitnumber : int - File unit number (default is None). - filenames : str or list of str - Filenames to use for the package. If filenames=None the package name - will be created using the model name and package extension. If a - single string is passed the package will be set to the string. - Default is None. - - Attributes - ---------- - - Methods - ------- - - See Also - -------- - - Notes - ----- - - Examples - -------- - - >>> import flopy - >>> m = flopy.mt3d.Mt3dms() - >>> itype = mt3d.Mt3dSsm.itype_dict() - >>> ssm_data = {} - >>> ssm_data[0] = [(4, 4, 4, 1.0, itype['GHB'], 1.0, 100.0)] - >>> ssm_data[5] = [(4, 4, 4, 0.5, itype['GHB'], 0.5, 200.0)] - >>> ssm = flopy.mt3d.Mt3dSsm(m, stress_period_data=ssm_data) - - """ - - def __init__(self, model, crch=None, cevt=None, mxss=None, - stress_period_data=None, dtype=None, - extension='ssm', unitnumber=None, filenames=None, - **kwargs): - - if unitnumber is None: - unitnumber = Mt3dSsm.defaultunit() - elif unitnumber == 0: - unitnumber = Mt3dSsm.reservedunit() - - # set filenames - if filenames is None: - filenames = [None] - elif isinstance(filenames, str): - filenames = [filenames] - - # Fill namefile items - name = [Mt3dSsm.ftype()] - units = [unitnumber] - extra = [''] - - # set package name - fname = [filenames[0]] - - # Call ancestor's init to set self.parent, extension, name and unit number - Package.__init__(self, model, extension=extension, name=name, - unit_number=units, extra=extra, filenames=fname) - - deprecated_kwargs = ['criv', 'cghb', 'cibd', 'cchd', 'cpbc', 'cwel'] - for key in kwargs: - if (key in deprecated_kwargs): - warnings.warn("Deprecation Warning: Keyword argument '" + key + - "' no longer supported. Use " + - "'stress_period_data' instead.") - - # Set dimensions - mf = self.parent.mf - nrow = model.nrow - ncol = model.ncol - nlay = model.nlay - ncomp = model.ncomp - nper = model.nper - - # Create a list of SsmPackage (class defined above) - self.__SsmPackages = [] - if mf is not None: - for i, label in enumerate(SsmLabels): - mfpack = mf.get_package(label) - ssmpack = SsmPackage(label, mfpack, (i < 6)) - self.__SsmPackages.append( - ssmpack) # First 6 need T/F flag in file line 1 - - if dtype is not None: - self.dtype = dtype - else: - self.dtype = self.get_default_dtype(ncomp) - - if stress_period_data is None: - self.stress_period_data = None - else: - self.stress_period_data = MfList(self, model=model, - data=stress_period_data, - list_free_format=False) - - if mxss is None and mf is None: - warnings.warn('SSM Package: mxss is None and modflowmodel is ' + - 'None. Cannot calculate max number of sources ' + - 'and sinks. Estimating from stress_period_data. ') - - if mxss is None: - # Need to calculate max number of sources and sinks - self.mxss = 0 - mxss_kper = 0 - - # Do not assume first key (stress period 0) has data, it may - # not. Cycle through stress periods looking for one w/ data - if self.stress_period_data is not None: - for i in range(nper): - if i in self.stress_period_data.data: - mxss_kper += np.sum( - self.stress_period_data.data[i].itype == -1) - mxss_kper += np.sum( - self.stress_period_data.data[i].itype == -15) - self.mxss = max(self.mxss, mxss_kper) - - if isinstance(self.parent.btn.icbund, np.ndarray): - self.mxss += (self.parent.btn.icbund < 0).sum() - - for p in self.__SsmPackages: - if ((p.label == 'BAS6') and (p.instance != None)): - self.mxss += (p.instance.ibound.array < 0).sum() - elif p.instance != None: - self.mxss += p.instance.ncells() - else: - self.mxss = mxss - - # Note: list is used for multi-species, NOT for stress periods! - self.crch = None - try: - if crch is None and model.mf.rch is not None: - print("found 'rch' in modflow model, resetting crch to 0.0") - crch = 0.0 - except: - if model.verbose: - print(' explicit crcg in file') - - if crch is not None: - - self.crch = [] - t2d = Transient2d(model, (nrow, ncol), np.float32, - crch, name='crch1', - locat=self.unit_number[0], - array_free_format=False) - self.crch.append(t2d) - if ncomp > 1: - for icomp in range(2, ncomp + 1): - val = 0.0 - name = "crch" + str(icomp) - if name in list(kwargs.keys()): - val = kwargs.pop(name) - else: - print("SSM: setting crch for component " + \ - str(icomp) + " to zero. kwarg name " + \ - name) - t2d = Transient2d(model, (nrow, ncol), np.float32, - val, name=name, - locat=self.unit_number[0], - array_free_format=False) - self.crch.append(t2d) - # else: - # try: - # if model.mf.rch is not None: - # print("found 'rch' in modflow model, resetting crch to 0.0") - # self.crch = [Transient2d(model, (nrow, ncol), np.float32, - # 0, name='crch1', - # locat=self.unit_number[0], - # array_free_format=False)] - # - # else: - # self.crch = None - # except: - # self.crch = None - - self.cevt = None - try: - if cevt is None and ( - model.mf.evt is not None or model.mf.ets is not None): - print( - "found 'ets'/'evt' in modflow model, resetting cevt to 0.0") - cevt = 0.0 - except: - if model.verbose: - print(' explicit cevt in file') - - if cevt is not None: - self.cevt = [] - t2d = Transient2d(model, (nrow, ncol), np.float32, - cevt, name='cevt1', - locat=self.unit_number[0], - array_free_format=False) - self.cevt.append(t2d) - if ncomp > 1: - for icomp in range(2, ncomp + 1): - val = 0.0 - name = "cevt" + str(icomp) - if name in list(kwargs.keys()): - val = kwargs[name] - kwargs.pop(name) - else: - print("SSM: setting cevt for component " + \ - str(icomp) + " to zero, kwarg name " + \ - name) - t2d = Transient2d(model, (nrow, ncol), np.float32, - val, name=name, - locat=self.unit_number[0], - array_free_format=False) - self.cevt.append(t2d) - - # else: - # try: - # if model.mf.evt is not None or model.mf.ets is not None: - # print("found 'ets'/'evt' in modflow model, resetting cevt to 0.0") - # self.cevt = [Transient2d(model, (nrow, ncol), np.float32, - # 0, name='cevt1', - # locat=self.unit_number[0], - # array_free_format=False)] - # - # else: - # self.cevt = None - # except: - # self.cevt = None - - if len(list(kwargs.keys())) > 0: - raise Exception("SSM error: unrecognized kwargs: " + - ' '.join(list(kwargs.keys()))) - - # Add self to parent and return - self.parent.add_package(self) - return - - def from_package(self, package, ncomp_aux_names): - """ - read the point source and sink info from a package - ncomp_aux_names (list): the aux variable names in the package - that are the component concentrations - """ - raise NotImplementedError() - - @staticmethod - def itype_dict(): - itype = {} - itype["CHD"] = 1 - itype["BAS6"] = 1 - itype["PBC"] = 1 - itype["WEL"] = 2 - itype["DRN"] = 3 - itype["RIV"] = 4 - itype["GHB"] = 5 - itype["MAS"] = 15 - itype["CC"] = -1 - return itype - - @staticmethod - def get_default_dtype(ncomp=1): - """ - Construct a dtype for the recarray containing the list of sources - and sinks - """ - type_list = [("k", np.int), ("i", np.int), ("j", np.int), - ("css", np.float32), ("itype", np.int)] - if ncomp > 1: - for comp in range(1, ncomp + 1): - comp_name = "cssm({0:02d})".format(comp) - type_list.append((comp_name, np.float32)) - dtype = np.dtype(type_list) - return dtype - - def write_file(self): - """ - Write the package file - - Returns - ------- - None - - """ - # Open file for writing - f_ssm = open(self.fn_path, 'w') - for p in self.__SsmPackages: - if p.needTFstr: - f_ssm.write(p.TFstr) - - f_ssm.write(' F F F F F F F F F F\n') - - f_ssm.write('{:10d}\n'.format(self.mxss)) - - # Loop through each stress period and write ssm information - nper = self.parent.nper - for kper in range(nper): - if f_ssm.closed == True: - f_ssm = open(f_ssm.name, 'a') - - # Distributed sources and sinks (Recharge and Evapotranspiration) - if self.crch is not None: - # If any species need to be written, then all need to be - # written - incrch = -1 - for t2d in self.crch: - incrchicomp, file_entry = t2d.get_kper_entry(kper) - incrch = max(incrch, incrchicomp) - if incrch == 1: - break - f_ssm.write('{:10d}\n'.format(incrch)) - if incrch == 1: - for t2d in self.crch: - u2d = t2d[kper] - file_entry = u2d.get_file_entry() - f_ssm.write(file_entry) - - if self.cevt is not None: - # If any species need to be written, then all need to be - # written - incevt = -1 - for t2d in self.cevt: - incevticomp, file_entry = t2d.get_kper_entry(kper) - incevt = max(incevt, incevticomp) - if incevt == 1: - break - f_ssm.write('{:10d}\n'.format(incevt)) - if incevt == 1: - for t2d in self.cevt: - u2d = t2d[kper] - file_entry = u2d.get_file_entry() - f_ssm.write(file_entry) - - # List of sources - if self.stress_period_data is not None: - self.stress_period_data.write_transient(f_ssm, single_per=kper) - else: - f_ssm.write('{}\n'.format(0)) - - f_ssm.close() - return - - @staticmethod - def load(f, model, nlay=None, nrow=None, ncol=None, nper=None, - ncomp=None, ext_unit_dict=None): - """ - Load an existing package. - - Parameters - ---------- - f : filename or file handle - File to load. - model : model object - The model object (of type :class:`flopy.mt3d.mt.Mt3dms`) to - which this package will be added. - ext_unit_dict : dictionary, optional - If the arrays in the file are specified using EXTERNAL, - or older style array control records, then `f` should be a file - handle. In this case ext_unit_dict is required, which can be - constructed using the function - :class:`flopy.utils.mfreadnam.parsenamefile`. - - Returns - ------- - ssm : Mt3dSsm object - Mt3dSsm object. - - Examples - -------- - - >>> import flopy - >>> mt = flopy.mt3d.Mt3dms() - >>> ssm = flopy.mt3d.Mt3dSsm.load('test.ssm', mt) - - """ - - if model.verbose: - sys.stdout.write('loading ssm package file...\n') - - # Open file, if necessary - openfile = not hasattr(f, 'read') - if openfile: - filename = f - f = open(filename, 'r') - - # Set modflow model and dimensions if necessary - mf = model.mf - if nlay is None: - nlay = model.nlay - if nrow is None: - nrow = model.nrow - if ncol is None: - ncol = model.ncol - if nper is None: - nper = model.nper - if ncomp is None: - ncomp = model.ncomp - - # dtype - dtype = Mt3dSsm.get_default_dtype(ncomp) - - # Dataset 0 -- comment line - while True: - line = f.readline() - if line[0] != '#': - break - - # Item D1: Dummy input line - line already read above - if model.verbose: - print( - ' loading FWEL, FDRN, FRCH, FEVT, FRIV, FGHB, (FNEW(n), n=1,4)...') - fwel = line[0:2] - fdrn = line[2:4] - frch = line[4:6] - fevt = line[6:8] - friv = line[8:10] - fghb = line[10:12] - if len(line) >= 14: - fnew1 = line[12:14] - else: - fnew1 = 'F' - if len(line) >= 16: - fnew2 = line[14:16] - else: - fnew2 = 'F' - if len(line) >= 18: - fnew3 = line[16:18] - else: - fnew3 = 'F' - if len(line) >= 20: - fnew4 = line[18:20] - else: - fnew4 = 'F' - if model.verbose: - print(' FWEL {}'.format(fwel)) - print(' FDRN {}'.format(fdrn)) - print(' FRCH {}'.format(frch)) - print(' FEVT {}'.format(fevt)) - print(' FRIV {}'.format(friv)) - print(' FGHB {}'.format(fghb)) - print(' FNEW1 {}'.format(fnew1)) - print(' FNEW2 {}'.format(fnew2)) - print(' FNEW3 {}'.format(fnew3)) - print(' FNEW4 {}'.format(fnew4)) - - # Override the logical settings at top of ssm file using the - # modflowmodel, if it is attached to parent - if mf is not None: - rchpack = mf.get_package('RCH') - if rchpack is not None: - frch = 't' - evtpack = mf.get_package('EVT') - if evtpack is not None: - fevt = 't' - - # Item D2: MXSS, ISSGOUT - mxss = None - if model.verbose: - print(' loading MXSS, ISSGOUT...') - line = f.readline() - mxss = int(line[0:10]) - try: - issgout = int(line[10:20]) - except: - issgout = 0 - if model.verbose: - print(' MXSS {}'.format(mxss)) - print(' ISSGOUT {}'.format(issgout)) - - # kwargs needed to construct crch2, crch3, etc. for multispecies - kwargs = {} - - crch = None - if 't' in frch.lower(): - t2d = 0. - crch = {0: t2d} - if ncomp > 1: - for icomp in range(2, ncomp + 1): - name = "crch" + str(icomp) - t2d = 0. - kwargs[name] = {0: t2d} - - cevt = None - if 't' in fevt.lower(): - t2d = 0. - cevt = {0: t2d} - if ncomp > 1: - for icomp in range(2, ncomp + 1): - name = "cevt" + str(icomp) - t2d = 0. - kwargs[name] = {0: t2d} - - stress_period_data = {} - - for iper in range(nper): - - if model.verbose: - print(" loading ssm for kper {0:5d}".format(iper + 1)) - - # Item D3: INCRCH - incrch = -1 - if 't' in frch.lower(): - if model.verbose: - print(' loading INCRCH...') - line = f.readline() - incrch = int(line[0:10]) - - # Item D4: CRCH - if incrch >= 0: - if model.verbose: - print(' loading CRCH...') - t = Util2d.load(f, model, (nrow, ncol), np.float32, 'crch', - ext_unit_dict, array_format="mt3d") - crch[iper] = t - # Load each multispecies array - if ncomp > 1: - for icomp in range(2, ncomp + 1): - name = "crch" + str(icomp) - if model.verbose: - print(' loading {}...'.format(name)) - t = Util2d.load(f, model, (nrow, ncol), - np.float32, name, ext_unit_dict, - array_format="mt3d") - crchicomp = kwargs[name] - crchicomp[iper] = t - - # Item D5: INCEVT - incevt = -1 - if 't' in fevt.lower(): - if model.verbose: - print(' loading INCEVT...') - line = f.readline() - incevt = int(line[0:10]) - - # Item D6: CEVT - if incevt >= 0: - if model.verbose: - print(' loading CEVT...') - t = Util2d.load(f, model, (nrow, ncol), np.float32, 'cevt', - ext_unit_dict, array_format="mt3d") - cevt[iper] = t - # Load each multispecies array - if ncomp > 1: - for icomp in range(2, ncomp + 1): - name = "cevt" + str(icomp) - if model.verbose: - print(' loading {}...'.format(name)) - t = Util2d.load(f, model, (nrow, ncol), - np.float32, name, ext_unit_dict, - array_format="mt3d") - cevticomp = kwargs[name] - cevticomp[iper] = t - - # Item D7: NSS - if model.verbose: - print(' loading NSS...') - line = f.readline() - nss = int(line[0:10]) - if model.verbose: - print(' NSS {}'.format(nss)) - - # Item D8: KSS, ISS, JSS, CSS, ITYPE, (CSSMS(n),n=1,NCOMP) - if model.verbose: - print(' loading KSS, ISS, JSS, CSS, ITYPE, ' - '(CSSMS(n),n=1,NCOMP)...') - if nss > 0: - current = np.empty((nss), dtype=dtype) - for ibnd in range(nss): - line = f.readline() - t = [] - for ivar in range(5): - istart = ivar * 10 - istop = istart + 10 - t.append(line[istart:istop]) - ncssms = len(current.dtype.names) - 5 - if ncssms > 0: - tt = line[istop:].strip().split() - for ivar in range(ncssms): - t.append(tt[ivar]) - current[ibnd] = tuple(t[:len(current.dtype.names)]) - # convert indices to zero-based - current['k'] -= 1 - current['i'] -= 1 - current['j'] -= 1 - current = current.view(np.recarray) - stress_period_data[iper] = current - elif nss == 0: - stress_period_data[iper] = nss - - - if openfile: - f.close() - - # set package unit number - unitnumber = None - filenames = [None] - if ext_unit_dict is not None: - unitnumber, filenames[0] = \ - model.get_ext_dict_attr(ext_unit_dict, - filetype=Mt3dSsm.ftype()) - - # Construct and return ssm package - ssm = Mt3dSsm(model, crch=crch, cevt=cevt, mxss=mxss, - stress_period_data=stress_period_data, - unitnumber=unitnumber, filenames=filenames, **kwargs) - return ssm - - @staticmethod - def ftype(): - return 'SSM' - - @staticmethod - def defaultunit(): - return 34 - - @staticmethod - def reservedunit(): - return 4 +import sys +import numpy as np +import warnings +from ..pakbase import Package +from ..utils import Util2d, MfList, Transient2d + +# Note: Order matters as first 6 need logical flag on line 1 of SSM file +SsmLabels = ['WEL', 'DRN', 'RCH', 'EVT', 'RIV', 'GHB', 'BAS6', 'CHD', 'PBC'] + + +class SsmPackage(object): + def __init__(self, label='', instance=None, needTFstr=False): + self.label = label + self.instance = instance + self.needTFstr = needTFstr + self.TFstr = ' F' + if self.instance is not None: + self.TFstr = ' T' + + +class Mt3dSsm(Package): + """ + MT3DMS Source and Sink Mixing Package Class. + + Parameters + ---------- + model : model object + The model object (of type :class:`flopy.mt3d.mt.Mt3dms`) to which + this package will be added. + crch : Transient2d, scalar, array of floats, or dictionary + CRCH is the concentration of recharge for species 1. + If the recharge flux is positive, it acts as a source whose + concentration can be specified as desired. If the recharge flux is + negative, it acts as a sink (discharge) whose concentration is always + set equal to the concentration of groundwater at the cell where + discharge occurs. Note that the location and flow rate of + recharge/discharge are obtained from the flow model directly through + the unformatted flow-transport link file. crch can be specified as + an array, if the array is constant for the entire simulation. If + crch changes by stress period, then the user must provide a + dictionary, where the key is the stress period number (zero based) and + the value is the recharge array. The recharge concentration + can be specified for additional species by passing additional + arguments to the Mt3dSsm constructor. For example, to specify the + recharge concentration for species two one could use + crch2={0: 0., 1: 10*np.ones((nrow, ncol), dtype=np.float)} as + and additional keyword argument that is passed to Mt3dSsm when making + the ssm object. + cevt : Transient2d, scalar, array of floats, or dictionary + is the concentration of evapotranspiration flux for species 1. + Evapotranspiration is the only type of sink whose + concentration may be specified externally. Note that the + concentration of a sink cannot be greater than that of the aquifer at + the sink cell. Thus, if the sink concentration is specified greater + than that of the aquifer, it is automatically set equal to the + concentration of the aquifer. Also note that the location and flow + rate of evapotranspiration are obtained from the flow model directly + through the unformatted flow-transport link file. For multi-species + simulations, see crch for a description of how to specify + additional concentrations arrays for each species. + stress_period_data : dictionary + Keys in the dictionary are stress zero-based stress period numbers; + values in the dictionary are recarrays of SSM boundaries. The + dtype for the recarray can be obtained using ssm.dtype (after the + ssm package has been created). The default dtype for the recarray is + np.dtype([('k', np.int), ("i", np.int), ("j", np.int), + ("css", np.float32), ("itype", np.int), + ((cssms(n), np.float), n=1, ncomp)]) + If there are more than one component species, then additional entries + will be added to the dtype as indicated by cssm(n). + Note that if the number of dictionary entries is less than the number + of stress periods, then the last recarray of boundaries will apply + until the end of the simulation. Full details of all options to + specify stress_period_data can be found in the + flopy3_multi-component_SSM ipython notebook in the Notebook + subdirectory of the examples directory. + css is the specified source concentration or mass-loading rate, + depending on the value of ITYPE, in a single-species simulation, + (For a multispecies simulation, CSS is not used, but a dummy value + still needs to be entered here.) + Note that for most types of sources, CSS is interpreted as the + source concentration with the unit of mass per unit volume (ML-3), + which, when multiplied by its corresponding flow rate (L3T-1) from + the flow model, yields the mass-loading rate (MT-1) of the source. + For a special type of sources (ITYPE = 15), CSS is taken directly as + the mass-loading rate (MT-1) of the source so that no flow rate is + required from the flow model. + Furthermore, if the source is specified as a constant-concentration + cell (itype = -1), the specified value of CSS is assigned directly as + the concentration of the designated cell. If the designated cell is + also associated with a sink/source term in the flow model, the flow + rate is not used. + itype is an integer indicating the type of the point source. An itype + dictionary can be retrieved from the ssm object as + itype = mt3d.Mt3dSsm.itype_dict() + (CSSMS(n), n=1, NCOMP) defines the concentrations of a point source + for multispecies simulation with NCOMP>1. In a multispecies + simulation, it is necessary to define the concentrations of all + species associated with a point source. As an example, if a chemical + of a certain species is injected into a multispecies system, the + concentration of that species is assigned a value greater than zero + while the concentrations of all other species are assigned zero. + CSSMS(n) can be entered in free format, separated by a comma or space + between values. + Several important notes on assigning concentration for the + constant-concentration condition (ITYPE = -1) are listed below: + The constant-concentration condition defined in this input file takes + precedence to that defined in the Basic Transport Package input file. + In a multiple stress period simulation, a constant-concentration + cell, once defined, will remain a constant- concentration cell in the + duration of the simulation, but its concentration value can be + specified to vary in different stress periods. + In a multispecies simulation, if it is only necessary to define + different constant-concentration conditions for selected species at + the same cell location, specify the desired concentrations for those + species, and assign a negative value for all other species. The + negative value is a flag used by MT3DMS to skip assigning the + constant-concentration condition for the designated species. + dtype : np.dtype + dtype to use for the recarray of boundaries. If left as None (the + default) then the dtype will be automatically constructed. + extension : string + Filename extension (default is 'ssm') + unitnumber : int + File unit number (default is None). + filenames : str or list of str + Filenames to use for the package. If filenames=None the package name + will be created using the model name and package extension. If a + single string is passed the package will be set to the string. + Default is None. + + Attributes + ---------- + + Methods + ------- + + See Also + -------- + + Notes + ----- + + Examples + -------- + + >>> import flopy + >>> m = flopy.mt3d.Mt3dms() + >>> itype = mt3d.Mt3dSsm.itype_dict() + >>> ssm_data = {} + >>> ssm_data[0] = [(4, 4, 4, 1.0, itype['GHB'], 1.0, 100.0)] + >>> ssm_data[5] = [(4, 4, 4, 0.5, itype['GHB'], 0.5, 200.0)] + >>> ssm = flopy.mt3d.Mt3dSsm(m, stress_period_data=ssm_data) + + """ + + def __init__(self, model, crch=None, cevt=None, mxss=None, + stress_period_data=None, dtype=None, + extension='ssm', unitnumber=None, filenames=None, + **kwargs): + + if unitnumber is None: + unitnumber = Mt3dSsm.defaultunit() + elif unitnumber == 0: + unitnumber = Mt3dSsm.reservedunit() + + # set filenames + if filenames is None: + filenames = [None] + elif isinstance(filenames, str): + filenames = [filenames] + + # Fill namefile items + name = [Mt3dSsm.ftype()] + units = [unitnumber] + extra = [''] + + # set package name + fname = [filenames[0]] + + # Call ancestor's init to set self.parent, extension, name and unit number + Package.__init__(self, model, extension=extension, name=name, + unit_number=units, extra=extra, filenames=fname) + + deprecated_kwargs = ['criv', 'cghb', 'cibd', 'cchd', 'cpbc', 'cwel'] + for key in kwargs: + if (key in deprecated_kwargs): + warnings.warn("Deprecation Warning: Keyword argument '" + key + + "' no longer supported. Use " + + "'stress_period_data' instead.") + + # Set dimensions + mf = self.parent.mf + nrow = model.nrow + ncol = model.ncol + nlay = model.nlay + ncomp = model.ncomp + nper = model.nper + + # Create a list of SsmPackage (class defined above) + self.__SsmPackages = [] + if mf is not None: + for i, label in enumerate(SsmLabels): + mfpack = mf.get_package(label) + ssmpack = SsmPackage(label, mfpack, (i < 6)) + self.__SsmPackages.append( + ssmpack) # First 6 need T/F flag in file line 1 + + if dtype is not None: + self.dtype = dtype + else: + self.dtype = self.get_default_dtype(ncomp) + + if stress_period_data is None: + self.stress_period_data = None + else: + self.stress_period_data = MfList(self, model=model, + data=stress_period_data, + list_free_format=False) + + if mxss is None and mf is None: + warnings.warn('SSM Package: mxss is None and modflowmodel is ' + + 'None. Cannot calculate max number of sources ' + + 'and sinks. Estimating from stress_period_data. ') + + if mxss is None: + # Need to calculate max number of sources and sinks + self.mxss = 0 + mxss_kper = 0 + + # Do not assume first key (stress period 0) has data, it may + # not. Cycle through stress periods looking for one w/ data + if self.stress_period_data is not None: + for i in range(nper): + if i in self.stress_period_data.data: + mxss_kper += np.sum( + self.stress_period_data.data[i].itype == -1) + mxss_kper += np.sum( + self.stress_period_data.data[i].itype == -15) + self.mxss = max(self.mxss, mxss_kper) + + if isinstance(self.parent.btn.icbund, np.ndarray): + self.mxss += (self.parent.btn.icbund < 0).sum() + + for p in self.__SsmPackages: + if ((p.label == 'BAS6') and (p.instance != None)): + self.mxss += (p.instance.ibound.array < 0).sum() + elif p.instance != None: + self.mxss += p.instance.ncells() + else: + self.mxss = mxss + + # Note: list is used for multi-species, NOT for stress periods! + self.crch = None + try: + if crch is None and model.mf.rch is not None: + print("found 'rch' in modflow model, resetting crch to 0.0") + crch = 0.0 + except: + if model.verbose: + print(' explicit crcg in file') + + if crch is not None: + + self.crch = [] + t2d = Transient2d(model, (nrow, ncol), np.float32, + crch, name='crch1', + locat=self.unit_number[0], + array_free_format=False) + self.crch.append(t2d) + if ncomp > 1: + for icomp in range(2, ncomp + 1): + val = 0.0 + name = "crch" + str(icomp) + if name in list(kwargs.keys()): + val = kwargs.pop(name) + else: + print("SSM: setting crch for component " + \ + str(icomp) + " to zero. kwarg name " + \ + name) + t2d = Transient2d(model, (nrow, ncol), np.float32, + val, name=name, + locat=self.unit_number[0], + array_free_format=False) + self.crch.append(t2d) + # else: + # try: + # if model.mf.rch is not None: + # print("found 'rch' in modflow model, resetting crch to 0.0") + # self.crch = [Transient2d(model, (nrow, ncol), np.float32, + # 0, name='crch1', + # locat=self.unit_number[0], + # array_free_format=False)] + # + # else: + # self.crch = None + # except: + # self.crch = None + + self.cevt = None + try: + if cevt is None and ( + model.mf.evt is not None or model.mf.ets is not None): + print( + "found 'ets'/'evt' in modflow model, resetting cevt to 0.0") + cevt = 0.0 + except: + if model.verbose: + print(' explicit cevt in file') + + if cevt is not None: + self.cevt = [] + t2d = Transient2d(model, (nrow, ncol), np.float32, + cevt, name='cevt1', + locat=self.unit_number[0], + array_free_format=False) + self.cevt.append(t2d) + if ncomp > 1: + for icomp in range(2, ncomp + 1): + val = 0.0 + name = "cevt" + str(icomp) + if name in list(kwargs.keys()): + val = kwargs[name] + kwargs.pop(name) + else: + print("SSM: setting cevt for component " + \ + str(icomp) + " to zero, kwarg name " + \ + name) + t2d = Transient2d(model, (nrow, ncol), np.float32, + val, name=name, + locat=self.unit_number[0], + array_free_format=False) + self.cevt.append(t2d) + + # else: + # try: + # if model.mf.evt is not None or model.mf.ets is not None: + # print("found 'ets'/'evt' in modflow model, resetting cevt to 0.0") + # self.cevt = [Transient2d(model, (nrow, ncol), np.float32, + # 0, name='cevt1', + # locat=self.unit_number[0], + # array_free_format=False)] + # + # else: + # self.cevt = None + # except: + # self.cevt = None + + if len(list(kwargs.keys())) > 0: + raise Exception("SSM error: unrecognized kwargs: " + + ' '.join(list(kwargs.keys()))) + + # Add self to parent and return + self.parent.add_package(self) + return + + def from_package(self, package, ncomp_aux_names): + """ + read the point source and sink info from a package + ncomp_aux_names (list): the aux variable names in the package + that are the component concentrations + """ + raise NotImplementedError() + + @staticmethod + def itype_dict(): + itype = {} + itype["CHD"] = 1 + itype["BAS6"] = 1 + itype["PBC"] = 1 + itype["WEL"] = 2 + itype["DRN"] = 3 + itype["RIV"] = 4 + itype["GHB"] = 5 + itype["MAS"] = 15 + itype["CC"] = -1 + return itype + + @staticmethod + def get_default_dtype(ncomp=1): + """ + Construct a dtype for the recarray containing the list of sources + and sinks + """ + type_list = [("k", np.int), ("i", np.int), ("j", np.int), + ("css", np.float32), ("itype", np.int)] + if ncomp > 1: + for comp in range(1, ncomp + 1): + comp_name = "cssm({0:02d})".format(comp) + type_list.append((comp_name, np.float32)) + dtype = np.dtype(type_list) + return dtype + + def write_file(self): + """ + Write the package file + + Returns + ------- + None + + """ + # Open file for writing + f_ssm = open(self.fn_path, 'w') + for p in self.__SsmPackages: + if p.needTFstr: + f_ssm.write(p.TFstr) + + f_ssm.write(' F F F F F F F F F F\n') + + f_ssm.write('{:10d}\n'.format(self.mxss)) + + # Loop through each stress period and write ssm information + nper = self.parent.nper + for kper in range(nper): + if f_ssm.closed == True: + f_ssm = open(f_ssm.name, 'a') + + # Distributed sources and sinks (Recharge and Evapotranspiration) + if self.crch is not None: + # If any species need to be written, then all need to be + # written + incrch = -1 + for t2d in self.crch: + incrchicomp, file_entry = t2d.get_kper_entry(kper) + incrch = max(incrch, incrchicomp) + if incrch == 1: + break + f_ssm.write('{:10d}\n'.format(incrch)) + if incrch == 1: + for t2d in self.crch: + u2d = t2d[kper] + file_entry = u2d.get_file_entry() + f_ssm.write(file_entry) + + if self.cevt is not None: + # If any species need to be written, then all need to be + # written + incevt = -1 + for t2d in self.cevt: + incevticomp, file_entry = t2d.get_kper_entry(kper) + incevt = max(incevt, incevticomp) + if incevt == 1: + break + f_ssm.write('{:10d}\n'.format(incevt)) + if incevt == 1: + for t2d in self.cevt: + u2d = t2d[kper] + file_entry = u2d.get_file_entry() + f_ssm.write(file_entry) + + # List of sources + if self.stress_period_data is not None: + self.stress_period_data.write_transient(f_ssm, single_per=kper) + else: + f_ssm.write('{}\n'.format(0)) + + f_ssm.close() + return + + @staticmethod + def load(f, model, nlay=None, nrow=None, ncol=None, nper=None, + ncomp=None, ext_unit_dict=None): + """ + Load an existing package. + + Parameters + ---------- + f : filename or file handle + File to load. + model : model object + The model object (of type :class:`flopy.mt3d.mt.Mt3dms`) to + which this package will be added. + ext_unit_dict : dictionary, optional + If the arrays in the file are specified using EXTERNAL, + or older style array control records, then `f` should be a file + handle. In this case ext_unit_dict is required, which can be + constructed using the function + :class:`flopy.utils.mfreadnam.parsenamefile`. + + Returns + ------- + ssm : Mt3dSsm object + Mt3dSsm object. + + Examples + -------- + + >>> import flopy + >>> mt = flopy.mt3d.Mt3dms() + >>> ssm = flopy.mt3d.Mt3dSsm.load('test.ssm', mt) + + """ + + if model.verbose: + sys.stdout.write('loading ssm package file...\n') + + # Open file, if necessary + openfile = not hasattr(f, 'read') + if openfile: + filename = f + f = open(filename, 'r') + + # Set modflow model and dimensions if necessary + mf = model.mf + if nlay is None: + nlay = model.nlay + if nrow is None: + nrow = model.nrow + if ncol is None: + ncol = model.ncol + if nper is None: + nper = model.nper + if ncomp is None: + ncomp = model.ncomp + + # dtype + dtype = Mt3dSsm.get_default_dtype(ncomp) + + # Dataset 0 -- comment line + while True: + line = f.readline() + if line[0] != '#': + break + + # Item D1: Dummy input line - line already read above + if model.verbose: + print( + ' loading FWEL, FDRN, FRCH, FEVT, FRIV, FGHB, (FNEW(n), n=1,4)...') + fwel = line[0:2] + fdrn = line[2:4] + frch = line[4:6] + fevt = line[6:8] + friv = line[8:10] + fghb = line[10:12] + if len(line) >= 14: + fnew1 = line[12:14] + else: + fnew1 = 'F' + if len(line) >= 16: + fnew2 = line[14:16] + else: + fnew2 = 'F' + if len(line) >= 18: + fnew3 = line[16:18] + else: + fnew3 = 'F' + if len(line) >= 20: + fnew4 = line[18:20] + else: + fnew4 = 'F' + if model.verbose: + print(' FWEL {}'.format(fwel)) + print(' FDRN {}'.format(fdrn)) + print(' FRCH {}'.format(frch)) + print(' FEVT {}'.format(fevt)) + print(' FRIV {}'.format(friv)) + print(' FGHB {}'.format(fghb)) + print(' FNEW1 {}'.format(fnew1)) + print(' FNEW2 {}'.format(fnew2)) + print(' FNEW3 {}'.format(fnew3)) + print(' FNEW4 {}'.format(fnew4)) + + # Override the logical settings at top of ssm file using the + # modflowmodel, if it is attached to parent + if mf is not None: + rchpack = mf.get_package('RCH') + if rchpack is not None: + frch = 't' + evtpack = mf.get_package('EVT') + if evtpack is not None: + fevt = 't' + + # Item D2: MXSS, ISSGOUT + mxss = None + if model.verbose: + print(' loading MXSS, ISSGOUT...') + line = f.readline() + mxss = int(line[0:10]) + try: + issgout = int(line[10:20]) + except: + issgout = 0 + if model.verbose: + print(' MXSS {}'.format(mxss)) + print(' ISSGOUT {}'.format(issgout)) + + # kwargs needed to construct crch2, crch3, etc. for multispecies + kwargs = {} + + crch = None + if 't' in frch.lower(): + t2d = 0. + crch = {0: t2d} + if ncomp > 1: + for icomp in range(2, ncomp + 1): + name = "crch" + str(icomp) + t2d = 0. + kwargs[name] = {0: t2d} + + cevt = None + if 't' in fevt.lower(): + t2d = 0. + cevt = {0: t2d} + if ncomp > 1: + for icomp in range(2, ncomp + 1): + name = "cevt" + str(icomp) + t2d = 0. + kwargs[name] = {0: t2d} + + stress_period_data = {} + + for iper in range(nper): + + if model.verbose: + print(" loading ssm for kper {0:5d}".format(iper + 1)) + + # Item D3: INCRCH + incrch = -1 + if 't' in frch.lower(): + if model.verbose: + print(' loading INCRCH...') + line = f.readline() + incrch = int(line[0:10]) + + # Item D4: CRCH + if incrch >= 0: + if model.verbose: + print(' loading CRCH...') + t = Util2d.load(f, model, (nrow, ncol), np.float32, 'crch', + ext_unit_dict, array_format="mt3d") + crch[iper] = t + # Load each multispecies array + if ncomp > 1: + for icomp in range(2, ncomp + 1): + name = "crch" + str(icomp) + if model.verbose: + print(' loading {}...'.format(name)) + t = Util2d.load(f, model, (nrow, ncol), + np.float32, name, ext_unit_dict, + array_format="mt3d") + crchicomp = kwargs[name] + crchicomp[iper] = t + + # Item D5: INCEVT + incevt = -1 + if 't' in fevt.lower(): + if model.verbose: + print(' loading INCEVT...') + line = f.readline() + incevt = int(line[0:10]) + + # Item D6: CEVT + if incevt >= 0: + if model.verbose: + print(' loading CEVT...') + t = Util2d.load(f, model, (nrow, ncol), np.float32, 'cevt', + ext_unit_dict, array_format="mt3d") + cevt[iper] = t + # Load each multispecies array + if ncomp > 1: + for icomp in range(2, ncomp + 1): + name = "cevt" + str(icomp) + if model.verbose: + print(' loading {}...'.format(name)) + t = Util2d.load(f, model, (nrow, ncol), + np.float32, name, ext_unit_dict, + array_format="mt3d") + cevticomp = kwargs[name] + cevticomp[iper] = t + + # Item D7: NSS + if model.verbose: + print(' loading NSS...') + line = f.readline() + nss = int(line[0:10]) + if model.verbose: + print(' NSS {}'.format(nss)) + + # Item D8: KSS, ISS, JSS, CSS, ITYPE, (CSSMS(n),n=1,NCOMP) + if model.verbose: + print(' loading KSS, ISS, JSS, CSS, ITYPE, ' + '(CSSMS(n),n=1,NCOMP)...') + if nss > 0: + current = np.empty((nss), dtype=dtype) + for ibnd in range(nss): + line = f.readline() + t = [] + for ivar in range(5): + istart = ivar * 10 + istop = istart + 10 + t.append(line[istart:istop]) + ncssms = len(current.dtype.names) - 5 + if ncssms > 0: + tt = line[istop:].strip().split() + for ivar in range(ncssms): + t.append(tt[ivar]) + current[ibnd] = tuple(t[:len(current.dtype.names)]) + # convert indices to zero-based + current['k'] -= 1 + current['i'] -= 1 + current['j'] -= 1 + current = current.view(np.recarray) + stress_period_data[iper] = current + elif nss == 0: + stress_period_data[iper] = nss + + + if openfile: + f.close() + + # set package unit number + unitnumber = None + filenames = [None] + if ext_unit_dict is not None: + unitnumber, filenames[0] = \ + model.get_ext_dict_attr(ext_unit_dict, + filetype=Mt3dSsm.ftype()) + + # Construct and return ssm package + ssm = Mt3dSsm(model, crch=crch, cevt=cevt, mxss=mxss, + stress_period_data=stress_period_data, + unitnumber=unitnumber, filenames=filenames, **kwargs) + return ssm + + @staticmethod + def ftype(): + return 'SSM' + + @staticmethod + def defaultunit(): + return 34 + + @staticmethod + def reservedunit(): + return 4 diff --git a/flopy/mt3d/mttob.py b/flopy/mt3d/mttob.py index 68059c0c60..155f50966c 100644 --- a/flopy/mt3d/mttob.py +++ b/flopy/mt3d/mttob.py @@ -1,112 +1,112 @@ -from ..pakbase import Package - - -class Mt3dTob(Package): - """ - Transport Observation package class - """ - - def __init__(self, model, outnam='tob_output', CScale=1.0, FluxGroups=[], - FScale=1.0, iOutFlux=0, extension='tob', unitnumber=None, - filenames=None): - - if unitnumber is None: - unitnumber = Mt3dTob.defaultunit() - elif unitnumber == 0: - unitnumber = Mt3dTob.reservedunit() - - # set filenames - if filenames is None: - filenames = [None] - elif isinstance(filenames, str): - filenames = [filenames] - - # Fill namefile items - name = [Mt3dTob.ftype()] - units = [unitnumber] - extra = [''] - - # set package name - fname = [filenames[0]] - - # Call ancestor's init to set self.parent, extension, name and unit number - Package.__init__(self, model, extension=extension, name=name, - unit_number=units, extra=extra, filenames=fname) - - self.heading = '# TOB for MT3DMS, generated by Flopy.' - self.outnam = outnam - self.CScale = CScale - self.FluxGroups = FluxGroups - self.FScale = FScale - self.iOutFlux = iOutFlux - self.parent.add_package(self) - return - - def __repr__(self): - return 'Transport Observation package class' - - def write_file(self): - """ - Write the package file - - Returns - ------- - None - - """ - # Open file for writing - f_tob = open(self.fn_path, 'w') - f_tob.write('%s\n' % (self.heading)) - MaxConcObs = 0 - MaxFluxObs = 0 - MaxFluxCells = 0 - inConcObs = 0 - inFluxObs = 88 - inSaveObs = 89 - if (inFluxObs): - for FluxGroup in self.FluxGroups: - MaxFluxCells = MaxFluxCells + len(FluxGroup[1]) - MaxFluxObs = MaxFluxObs + 1 - f_tob.write('%10d%10d%10d\n' % (MaxConcObs, MaxFluxObs, MaxFluxCells)) - f_tob.write('%s%10d%10d%10d\n' % (self.outnam, inConcObs, inFluxObs, - inSaveObs)) - # if (inConcObs): - # - if (inFluxObs): - nFluxGroup = len(self.FluxGroups) - f_tob.write('%10d%10f%10d\n' % (nFluxGroup, self.FScale, - self.iOutFlux)) - for FluxGroup in self.FluxGroups: - nFluxTimeObs, FluxTimeObs = ( - self.assign_layer_row_column_data(FluxGroup[0], 5, - zerobase=False)) # misuse of function - zerobase set to False - nCells, Cells = self.assign_layer_row_column_data(FluxGroup[1], - 4, - zerobase=False) # misuse of function - zerobase set to False - nCells = 4 - iSSType = FluxGroup[2] - f_tob.write('%10d%10d%10d\n' % (nFluxTimeObs, nCells, iSSType)) - for fto in FluxTimeObs: - fto = fto[0] # Still to fix this! - f_tob.write('%12s%10s%10s%10s%10s\n' % (fto[0], fto[1], - fto[2], fto[3], - fto[4])) - for c in Cells: - c = c[0] # Still to fix this! - f_tob.write('%10d%10d%10d%10f\n' % (c[0], c[1], c[2], - c[3])) - - f_tob.close() - return - - @staticmethod - def ftype(): - return 'TOB' - - @staticmethod - def defaultunit(): - return 37 - - @staticmethod - def reservedunit(): - return 12 +from ..pakbase import Package + + +class Mt3dTob(Package): + """ + Transport Observation package class + """ + + def __init__(self, model, outnam='tob_output', CScale=1.0, FluxGroups=[], + FScale=1.0, iOutFlux=0, extension='tob', unitnumber=None, + filenames=None): + + if unitnumber is None: + unitnumber = Mt3dTob.defaultunit() + elif unitnumber == 0: + unitnumber = Mt3dTob.reservedunit() + + # set filenames + if filenames is None: + filenames = [None] + elif isinstance(filenames, str): + filenames = [filenames] + + # Fill namefile items + name = [Mt3dTob.ftype()] + units = [unitnumber] + extra = [''] + + # set package name + fname = [filenames[0]] + + # Call ancestor's init to set self.parent, extension, name and unit number + Package.__init__(self, model, extension=extension, name=name, + unit_number=units, extra=extra, filenames=fname) + + self.heading = '# TOB for MT3DMS, generated by Flopy.' + self.outnam = outnam + self.CScale = CScale + self.FluxGroups = FluxGroups + self.FScale = FScale + self.iOutFlux = iOutFlux + self.parent.add_package(self) + return + + def __repr__(self): + return 'Transport Observation package class' + + def write_file(self): + """ + Write the package file + + Returns + ------- + None + + """ + # Open file for writing + f_tob = open(self.fn_path, 'w') + f_tob.write('%s\n' % (self.heading)) + MaxConcObs = 0 + MaxFluxObs = 0 + MaxFluxCells = 0 + inConcObs = 0 + inFluxObs = 88 + inSaveObs = 89 + if (inFluxObs): + for FluxGroup in self.FluxGroups: + MaxFluxCells = MaxFluxCells + len(FluxGroup[1]) + MaxFluxObs = MaxFluxObs + 1 + f_tob.write('%10d%10d%10d\n' % (MaxConcObs, MaxFluxObs, MaxFluxCells)) + f_tob.write('%s%10d%10d%10d\n' % (self.outnam, inConcObs, inFluxObs, + inSaveObs)) + # if (inConcObs): + # + if (inFluxObs): + nFluxGroup = len(self.FluxGroups) + f_tob.write('%10d%10f%10d\n' % (nFluxGroup, self.FScale, + self.iOutFlux)) + for FluxGroup in self.FluxGroups: + nFluxTimeObs, FluxTimeObs = ( + self.assign_layer_row_column_data(FluxGroup[0], 5, + zerobase=False)) # misuse of function - zerobase set to False + nCells, Cells = self.assign_layer_row_column_data(FluxGroup[1], + 4, + zerobase=False) # misuse of function - zerobase set to False + nCells = 4 + iSSType = FluxGroup[2] + f_tob.write('%10d%10d%10d\n' % (nFluxTimeObs, nCells, iSSType)) + for fto in FluxTimeObs: + fto = fto[0] # Still to fix this! + f_tob.write('%12s%10s%10s%10s%10s\n' % (fto[0], fto[1], + fto[2], fto[3], + fto[4])) + for c in Cells: + c = c[0] # Still to fix this! + f_tob.write('%10d%10d%10d%10f\n' % (c[0], c[1], c[2], + c[3])) + + f_tob.close() + return + + @staticmethod + def ftype(): + return 'TOB' + + @staticmethod + def defaultunit(): + return 37 + + @staticmethod + def reservedunit(): + return 12 diff --git a/flopy/plot/__init__.py b/flopy/plot/__init__.py index 4553d81556..20bbfe47d3 100644 --- a/flopy/plot/__init__.py +++ b/flopy/plot/__init__.py @@ -1,26 +1,26 @@ -""" - the main entry point of utils - - Parameters - ---------- - - Attributes - ---------- - - Methods - ------- - - See Also - -------- - - Notes - ----- - - Examples - -------- - - """ -from .plotutil import SwiConcentration, plot_shapefile, shapefile_extents, PlotUtilities -from .map import ModelMap, PlotMapView -from .crosssection import ModelCrossSection -from .plotbase import PlotCrossSection +""" + the main entry point of utils + + Parameters + ---------- + + Attributes + ---------- + + Methods + ------- + + See Also + -------- + + Notes + ----- + + Examples + -------- + + """ +from .plotutil import SwiConcentration, plot_shapefile, shapefile_extents, PlotUtilities +from .map import ModelMap, PlotMapView +from .crosssection import ModelCrossSection +from .plotbase import PlotCrossSection diff --git a/flopy/plot/crosssection.py b/flopy/plot/crosssection.py index e0c1cbc688..f2775a33bb 100644 --- a/flopy/plot/crosssection.py +++ b/flopy/plot/crosssection.py @@ -1,883 +1,883 @@ -import numpy as np - -try: - import matplotlib.pyplot as plt -except: - plt = None -from flopy.plot import plotutil -from flopy.utils import geometry -import warnings -warnings.simplefilter('always', PendingDeprecationWarning) - - -class _CrossSection(object): - """ - Base class for CrossSection plotting. Handles the model grid - transforms and searching for modelgrid and dis file information. - - This class must be general with absolutely no code specific to - a single model grid type. The user should not directly instantiate this - class - - Parameters - ---------- - ax : matplotlib.pyplot.axes object - model : flopy.mf6.Modflow or flopy.modflow.Modflow object - modelgrid : flopy.discretization.grid object - geographic_coords : bool - boolean flag to allow the user to plot cross section lines in - geographic coordinates. If False (default), cross section is plotted - as the distance along the cross section line. - - """ - def __init__(self, ax=None, model=None, modelgrid=None, - geographic_coords=False): - - self.ax = ax - self.geographic_coords = geographic_coords - if plt is None: - s = 'Could not import matplotlib. Must install matplotlib ' + \ - ' in order to use ModelCrossSection method' - raise ImportError(s) - - self.model = model - - if model is not None: - self.mg = model.modelgrid - - elif modelgrid is not None: - self.mg = modelgrid - if self.mg is None: - raise AssertionError("Cannot find model grid ") - - else: - raise Exception("Cannot find model grid") - - if self.mg.top is None or self.mg.botm is None: - raise AssertionError("modelgrid top and botm must be defined") - - -class _StructuredCrossSection(_CrossSection): - """ - Class to create a cross section of the model using - Structured discretization. - - Class is not to be instantiated by the user. - - Parameters - ---------- - ax : matplotlib.pyplot axis - The plot axis. If not provided it, plt.gca() will be used. - model : flopy.modflow object - flopy model object. (Default is None) - modelgrid : flopy.discretization.StructuredGrid - Structured model grid object - line : dict - Dictionary with either "row", "column", or "line" key. If key - is "row" or "column" key value should be the zero-based row or - column index for cross-section. If key is "line" value should - be an array of (x, y) tuples with vertices of cross-section. - Vertices should be in map coordinates consistent with xul, - yul, and rotation. - extent : tuple of floats - (xmin, xmax, ymin, ymax) will be used to specify axes limits. If None - then these will be calculated based on grid, coordinates, and rotation - geographic_coords : bool - boolean flag to allow the user to plot cross section lines in - geographic coordinates. If False (default), cross section is plotted - as the distance along the cross section line. - - """ - - def __init__(self, ax=None, model=None, modelgrid=None, - line=None, extent=None, geographic_coords=False): - super(_StructuredCrossSection, self).__init__(ax=ax, model=model, - modelgrid=modelgrid, - geographic_coords= - geographic_coords) - - if line is None: - s = 'line must be specified.' - raise Exception(s) - - linekeys = [linekeys.lower() for linekeys in list(line.keys())] - - if len(linekeys) != 1: - s = 'only row, column, or line can be specified in line dictionary.\n' - s += 'keys specified: ' - for k in linekeys: - s += '{} '.format(k) - raise AssertionError(s) - - if ax is None: - self.ax = plt.gca() - else: - self.ax = ax - - onkey = list(line.keys())[0] - eps = 1.e-4 - xedge, yedge = self.mg.xyedges - self.__geographic_xpts = None - - # un-translate model grid into model coordinates - self.xcellcenters, self.ycellcenters = \ - geometry.transform(self.mg.xcellcenters, - self.mg.ycellcenters, - self.mg.xoffset, self.mg.yoffset, - self.mg.angrot_radians, inverse=True) - - if 'row' in linekeys: - self.direction = 'x' - ycenter = self.ycellcenters.T[0] - pts = [(xedge[0] + eps, - ycenter[int(line[onkey])] - eps), - (xedge[-1] - eps, - ycenter[int(line[onkey])] + eps)] - elif 'column' in linekeys: - self.direction = 'y' - xcenter = self.xcellcenters[0, :] - pts = [(xcenter[int(line[onkey])] + eps, - yedge[0] - eps), - (xcenter[int(line[onkey])] - eps, - yedge[-1] + eps)] - else: - self.direction = 'xy' - verts = line[onkey] - xp = [] - yp = [] - for [v1, v2] in verts: - xp.append(v1) - yp.append(v2) - - xp, yp = self.mg.get_local_coords(xp, yp) - pts = [(xt, yt) for xt, yt in zip(xp, yp)] - # for now set offset to zero, since we do not have - # information on projection from the user - - # convert pts list to numpy array - self.pts = np.array(pts) - - # get points along the line - self.xpts = plotutil.line_intersect_grid(self.pts, self.mg.xyedges[0], - self.mg.xyedges[1]) - if len(self.xpts) < 2: - s = 'cross-section cannot be created\n.' - s += ' less than 2 points intersect the model grid\n' - s += ' {} points intersect the grid.'.format(len(self.xpts)) - raise Exception(s) - - # set horizontal distance - d = [] - for v in self.xpts: - d.append(v[2]) - self.d = np.array(d) - - self.idomain = self.mg.idomain - if self.mg.idomain is None: - self.idomain = np.ones((self.mg.nlay, self.mg.nrow, - self.mg.ncol), dtype=int) - - self.ncb = 0 - self.laycbd = [] - - if self.model is not None: - if self.model.laycbd is not None: - self.laycbd = self.model.laycbd - - for l in self.laycbd: - if l > 0: - self.ncb += 1 - - self.active = np.ones((self.mg.nlay + self.ncb), dtype=np.int) - kon = 0 - - if len(self.laycbd) > 0: - for k in range(self.mg.nlay): - if self.laycbd[k] > 0: - kon += 1 - self.active[kon] = 0 - kon += 1 - - top = self.mg.top - botm = self.mg.botm - elev = [top.copy()] - for k in range(self.mg.nlay + self.ncb): - elev.append(botm[k, :, :]) - - self.elev = np.array(elev) - self.layer0 = 0 - self.layer1 = self.mg.nlay + self.ncb + 1 - - zpts = [] - for k in range(self.layer0, self.layer1): - zpts.append(plotutil.cell_value_points(self.xpts, self.mg.xyedges[0], - self.mg.xyedges[1], - self.elev[k, :, :])) - self.zpts = np.array(zpts) - - xcentergrid, zcentergrid = self.get_centergrids(self.xpts, self.zpts) - self.xcentergrid = xcentergrid - self.zcentergrid = zcentergrid - - geo_xcentergrid, _ = self.get_centergrids(self.geographic_xpts, - self.zpts) - self.geographic_xcentergrid = geo_xcentergrid - - # Create cross-section extent - if extent is None: - self.extent = self.get_extent() - else: - self.extent = extent - - # Set axis limits - self.ax.set_xlim(self.extent[0], self.extent[1]) - self.ax.set_ylim(self.extent[2], self.extent[3]) - - return - - @property - def geographic_xpts(self): - """ - Method to retranslate model coordinates to geometric - coordinates for plotting - - Returns: - - """ - if self.__geographic_xpts is None: - xypts = self.xpts.T - xypts = geometry.transform(xypts[0], xypts[1], - self.mg.xoffset, - self.mg.yoffset, - self.mg.angrot_radians) - - if self.direction == "xy": - xdist = np.max(xypts[0]) - np.min(xypts[0]) - ydist = np.max(xypts[1]) - np.min(xypts[1]) - if xdist >= ydist: - xypts = np.append(xypts, np.array([xypts[0]]), axis=0) - else: - xypts = np.append(xypts, np.array([xypts[1]]), axis=0) - else: - xypts = np.append(xypts, np.array([xypts[0]]), axis=0) - - self.__geographic_xpts = xypts.T - - return self.__geographic_xpts - - def get_centergrids(self, xpts, zpts): - """ - Method to calculate the centergrid information for plotting - - Parameters - ---------- - xpts : np.ndarray - array of x, y, distance along the cross section - zpts : np.ndarray - array of elevation values along the cross section - - Returns - ------- - tuple : (xcentergrid, zcentergrid) - """ - xcentergrid = [] - zcentergrid = [] - nz = 0 - if self.mg.nlay == 1: - for k in range(0, zpts.shape[0]): - nz += 1 - nx = 0 - for i in range(0, xpts.shape[0], 2): - try: - xp = 0.5 * (xpts[i][2] + xpts[i + 1][2]) - zp = zpts[k, i] - xcentergrid.append(xp) - zcentergrid.append(zp) - nx += 1 - except: - break - else: - for k in range(0, zpts.shape[0] - 1): - if not self.active[k]: - continue - nz += 1 - nx = 0 - for i in range(0, xpts.shape[0], 2): - try: - xp = 0.5 * (xpts[i][2] + xpts[i + 1][2]) - zp = 0.5 * (zpts[k, i] + zpts[k + 1, i + 1]) - xcentergrid.append(xp) - zcentergrid.append(zp) - nx += 1 - except: - break - - xcentergrid = np.array(xcentergrid).reshape((nz, nx)) - zcentergrid = np.array(zcentergrid).reshape((nz, nx)) - return xcentergrid, zcentergrid - - def plot_array(self, a, masked_values=None, head=None, **kwargs): - """ - Plot a three-dimensional array as a patch collection. - - Parameters - ---------- - a : numpy.ndarray - Three-dimensional array to plot. - masked_values : iterable of floats, ints - Values to mask. - head : numpy.ndarray - Three-dimensional array to set top of patches to the minimum - of the top of a layer or the head value. Used to create - patches that conform to water-level elevations. - **kwargs : dictionary - keyword arguments passed to matplotlib.collections.PatchCollection - - Returns - ------- - patches : matplotlib.collections.PatchCollection - - """ - if 'ax' in kwargs: - ax = kwargs.pop('ax') - else: - ax = self.ax - - xedge, yedge = self.mg.xyedges - vpts = [] - for k in range(self.mg.nlay): - vpts.append(plotutil.cell_value_points(self.xpts, xedge, - yedge, a[k, :, :])) - if len(self.laycbd) > 0: - if self.laycbd[k] > 0: - ta = np.empty((self.mg.nrow, self.mg.ncol), dtype=np.float) - ta[:, :] = -1e9 - vpts.append(plotutil.cell_value_points(self.xpts, - xedge, yedge, ta)) - vpts = np.array(vpts) - if masked_values is not None: - for mval in masked_values: - vpts = np.ma.masked_values(vpts, mval) - - if isinstance(head, np.ndarray): - zpts = self.set_zpts(head) - else: - zpts = self.zpts - - if self.ncb > 0: - vpts = np.ma.masked_values(vpts, -1e9) - - pc = self.get_grid_patch_collection(zpts, vpts, **kwargs) - if pc != None: - ax.add_collection(pc) - return pc - - def plot_surface(self, a, masked_values=None, **kwargs): - """ - Plot a two- or three-dimensional array as line(s). - - Parameters - ---------- - a : numpy.ndarray - Two- or three-dimensional array to plot. - masked_values : iterable of floats, ints - Values to mask. - **kwargs : dictionary - keyword arguments passed to matplotlib.pyplot.plot - - Returns - ------- - plot : list containing matplotlib.plot objects - - """ - if 'ax' in kwargs: - ax = kwargs.pop('ax') - else: - ax = self.ax - - plotarray = a - - vpts = [] - if len(plotarray.shape) == 2: - nlay = 1 - plotarray = np.reshape(plotarray, - (1, plotarray.shape[0], plotarray.shape[1])) - elif len(plotarray.shape) == 3: - nlay = plotarray.shape[0] - else: - raise Exception('plot_array array must be a 2D or 3D array') - - xedge, yedge = self.mg.xyedges - for k in range(nlay): - vpts.append(plotutil.cell_value_points(self.xpts, xedge, - yedge, - plotarray[k, :, :])) - vpts = np.array(vpts) - - if masked_values is not None: - for mval in masked_values: - vpts = np.ma.masked_values(vpts, mval) - - plot = [] - # adust distance array for modelgrid offset - if self.geographic_coords: - d = self.geographic_xpts.T[-1] - else: - d = self.d - for k in range(vpts.shape[0]): - plot.append(ax.plot(d, vpts[k, :], **kwargs)) - - return plot - - def plot_fill_between(self, a, colors=('blue', 'red'), - masked_values=None, head=None, **kwargs): - """ - Plot a three-dimensional array as lines. - - Parameters - ---------- - a : numpy.ndarray - Three-dimensional array to plot. - colors : list - matplotlib fill colors, two required - masked_values : iterable of floats, ints - Values to mask. - head : numpy.ndarray - Three-dimensional array to set top of patches to the minimum - of the top of a layer or the head value. Used to create - patches that conform to water-level elevations. - **kwargs : dictionary - keyword arguments passed to matplotlib.pyplot.plot - - Returns - ------- - plot : list containing matplotlib.fillbetween objects - - """ - if 'ax' in kwargs: - ax = kwargs.pop('ax') - else: - ax = self.ax - - plotarray = a - - vpts = [] - for k in range(self.mg.nlay): - # print('k', k, self.laycbd[k]) - vpts.append(plotutil.cell_value_points(self.xpts, self.mg.xyedges[0], - self.mg.xyedges[1], - plotarray[k, :, :])) - if len(self.laycbd) > 0: - if self.laycbd[k] > 0: - ta = np.empty((self.mg.nrow, self.mg.ncol), dtype=np.float) - ta[:, :] = self.mg.botm.array[k, :, :] - vpts.append(plotutil.cell_value_points(self.xpts, - self.mg.xyedges[0], - self.mg.xyedges[1], ta)) - - vpts = np.ma.array(vpts, mask=False) - - if isinstance(head, np.ndarray): - zpts = self.set_zpts(head) - else: - zpts = self.zpts - - if masked_values is not None: - for mval in masked_values: - vpts = np.ma.masked_values(vpts, mval) - if self.ncb > 0: - vpts = np.ma.masked_values(vpts, -1e9) - idxm = np.ma.getmask(vpts) - - plot = [] - # print(zpts.shape) - for k in range(self.mg.nlay + self.ncb): - if self.active[k] == 0: - continue - idxmk = idxm[k, :] - v = vpts[k, :] - y1 = zpts[k, :] - y2 = zpts[k + 1, :] - # make sure y1 is not below y2 - idx = y1 < y2 - y1[idx] = y2[idx] - # make sure v is not below y2 - idx = v < y2 - v[idx] = y2[idx] - # make sure v is not above y1 - idx = v > y1 - v[idx] = y1[idx] - # set y2 to v - y2 = v - # mask cells - y1[idxmk] = np.nan - y2[idxmk] = np.nan - # adjust distance array for modelgrid offset - if self.geographic_coords: - d = self.geographic_xpts.T[-1] - else: - d = self.d - plot.append(ax.fill_between(d, y1=y1, y2=y2, - color=colors[0], **kwargs)) - y1 = y2 - y2 = self.zpts[k + 1, :] - y2[idxmk] = np.nan - plot.append(ax.fill_between(d, y1=y1, y2=y2, - color=colors[1], **kwargs)) - return plot - - def contour_array(self, a, masked_values=None, head=None, **kwargs): - """ - Contour a three-dimensional array. - - Parameters - ---------- - a : numpy.ndarray - Three-dimensional array to plot. - masked_values : iterable of floats, ints - Values to mask. - head : numpy.ndarray - Three-dimensional array to set top of patches to the minimum - of the top of a layer or the head value. Used to create - patches that conform to water-level elevations. - **kwargs : dictionary - keyword arguments passed to matplotlib.pyplot.contour - - Returns - ------- - contour_set : matplotlib.pyplot.contour - - """ - plotarray = a - - vpts = [] - xedge, yedge = self.mg.xyedges - for k in range(self.mg.nlay): - vpts.append(plotutil.cell_value_points(self.xpts, xedge, - yedge, - plotarray[k, :, :])) - vpts = np.array(vpts) - vpts = vpts[:, ::2] - if self.mg.nlay == 1: - vpts = np.vstack((vpts, vpts)) - - if masked_values is not None: - for mval in masked_values: - vpts = np.ma.masked_values(vpts, mval) - - if isinstance(head, np.ndarray): - zcentergrid = self.set_zcentergrid(head) - else: - zcentergrid = self.zcentergrid - - if self.geographic_coords: - xcentergrid = self.geographic_xcentergrid - else: - xcentergrid = self.xcentergrid - contour_set = self.ax.contour(xcentergrid, zcentergrid, - vpts, **kwargs) - return contour_set - - def plot_inactive(self): - raise NotImplementedError("Function must be called in PlotCrossSection") - - def plot_ibound(self): - raise NotImplementedError("Function must be called in PlotCrossSection") - - def plot_grid(self): - raise NotImplementedError("Function must be called in PlotCrossSection") - - def plot_bc(self): - raise NotImplementedError("Function must be called in PlotCrossSection") - - def plot_specific_discharge(self): - raise NotImplementedError("Function must be called in PlotCrossSection") - - def plot_discharge(self): - raise NotImplementedError("Function must be called in PlotCrossSection") - - def get_grid_patch_collection(self, zpts, plotarray, **kwargs): - """ - Get a PatchCollection of plotarray in unmasked cells - - Parameters - ---------- - zpts : numpy.ndarray - array of z elevations that correspond to the x, y, and horizontal - distance along the cross-section (self.xpts). Constructed using - plotutil.cell_value_points(). - plotarray : numpy.ndarray - Three-dimensional array to attach to the Patch Collection. - **kwargs : dictionary - keyword arguments passed to matplotlib.collections.PatchCollection - - Returns - ------- - patches : matplotlib.collections.PatchCollection - - """ - from matplotlib.patches import Polygon - from matplotlib.collections import PatchCollection - rectcol = [] - - if 'vmin' in kwargs: - vmin = kwargs.pop('vmin') - else: - vmin = None - if 'vmax' in kwargs: - vmax = kwargs.pop('vmax') - else: - vmax = None - - colors = [] - if self.geographic_coords: - xpts = self.geographic_xpts - else: - xpts = self.xpts - for k in range(zpts.shape[0] - 1): - for idx in range(0, len(xpts) - 1, 2): - try: - ll = ((xpts[idx][2], zpts[k + 1, idx])) - try: - dx = xpts[idx + 2][2] - xpts[idx][2] - except: - dx = xpts[idx + 1][2] - xpts[idx][2] - dz = zpts[k, idx] - zpts[k + 1, idx] - pts = (ll, - (ll[0], ll[1] + dz), (ll[0] + dx, ll[1] + dz), - (ll[0] + dx, ll[1])) # , ll) - if np.isnan(plotarray[k, idx]): - continue - if plotarray[k, idx] is np.ma.masked: - continue - rectcol.append(Polygon(pts, closed=True)) - colors.append(plotarray[k, idx]) - except: - pass - - if len(rectcol) > 0: - patches = PatchCollection(rectcol, **kwargs) - patches.set_array(np.array(colors)) - patches.set_clim(vmin, vmax) - else: - patches = None - return patches - - def get_grid_line_collection(self, **kwargs): - """ - Get a LineCollection of the grid - - Parameters - ---------- - **kwargs : dictionary - keyword arguments passed to matplotlib.collections.LineCollection - - Returns - ------- - linecollection : matplotlib.collections.LineCollection - """ - from matplotlib.collections import LineCollection - - color = "grey" - if "color" in kwargs: - color = kwargs.pop('color') - - linecol = [] - if self.geographic_coords: - xpts = self.geographic_xpts - else: - xpts = self.xpts - for k in range(self.zpts.shape[0] - 1): - for idx in range(0, len(xpts) - 1, 2): - try: - ll = ((xpts[idx][2], self.zpts[k + 1, idx])) - try: - dx = xpts[idx + 2][2] - xpts[idx][2] - except (IndexError, ValueError): - dx = xpts[idx + 1][2] - xpts[idx][2] - dz = self.zpts[k, idx] - self.zpts[k + 1, idx] - # horizontal lines - linecol.append(((ll), (ll[0] + dx, ll[1]))) - linecol.append( - ((ll[0], ll[1] + dz), (ll[0] + dx, ll[1] + dz))) - # vertical lines - linecol.append(((ll), (ll[0], ll[1] + dz))) - linecol.append( - ((ll[0] + dx, ll[1]), (ll[0] + dx, ll[1] + dz))) - except (IndexError, AttributeError, ValueError): - pass - - linecollection = LineCollection(linecol, color=color, **kwargs) - return linecollection - - def set_zpts(self, vs): - """ - Get an array of z elevations based on minimum of cell elevation - (self.elev) or passed vs numpy.ndarray - - Parameters - ---------- - vs : numpy.ndarray - Three-dimensional array to plot. - - Returns - ------- - zpts : numpy.ndarray - - """ - zpts = [] - xedge, yedge = self.mg.xyedges - for k in range(self.layer0, self.layer1): - e = self.elev[k, :, :] - if k < self.mg.nlay: - v = vs[k, :, :] - idx = v < e - e[idx] = v[idx] - zpts.append(plotutil.cell_value_points(self.xpts, xedge, - yedge, e)) - return np.array(zpts) - - def set_zcentergrid(self, vs): - """ - Get an array of z elevations at the center of a cell that is based - on minimum of cell top elevation (self.elev) or passed vs numpy.ndarray - - Parameters - ---------- - vs : numpy.ndarray - Three-dimensional array to plot. - - Returns - ------- - zcentergrid : numpy.ndarray - - """ - vpts = [] - xedge, yedge = self.mg.xyedges - for k in range(self.layer0, self.layer1): - if k < self.mg.nlay: - e = vs[k, :, :] - else: - e = self.elev[k, :, :] - vpts.append(plotutil.cell_value_points(self.xpts, xedge, - yedge, e)) - vpts = np.array(vpts) - - zcentergrid = [] - nz = 0 - if self.mg.nlay == 1: - for k in range(0, self.zpts.shape[0]): - nz += 1 - nx = 0 - for i in range(0, self.xpts.shape[0], 2): - nx += 1 - vp = vpts[k, i] - zp = self.zpts[k, i] - if k == 0: - if vp < zp: - zp = vp - zcentergrid.append(zp) - else: - for k in range(0, self.zpts.shape[0] - 1): - if not self.active[k]==1: - continue - nz += 1 - nx = 0 - for i in range(0, self.xpts.shape[0], 2): - nx += 1 - vp = vpts[k, i] - ep = self.zpts[k, i] - if vp < ep: - ep = vp - zp = 0.5 * (ep + self.zpts[k + 1, i + 1]) - zcentergrid.append(zp) - return np.array(zcentergrid).reshape((nz, nx)) - - def get_extent(self): - """ - Get the extent of the rotated and offset grid - - Returns - ------- - tuple : (xmin, xmax, ymin, ymax) - - """ - if self.geographic_coords: - xpts = self.geographic_xpts - else: - xpts = self.xpts - - xmin = xpts[0][2] - xmax = xpts[-1][2] - - ymin = self.zpts.min() - ymax = self.zpts.max() - - return (xmin, xmax, ymin, ymax) - - -class ModelCrossSection(object): - """ - Class to create a cross section of the model. - - Parameters - ---------- - ax : matplotlib.pyplot axis - The plot axis. If not provided it, plt.gca() will be used. - model : flopy.modflow object - flopy model object. (Default is None) - dis : flopy.modflow.ModflowDis object - flopy discretization object. (Default is None) - line : dict - Dictionary with either "row", "column", or "line" key. If key - is "row" or "column" key value should be the zero-based row or - column index for cross-section. If key is "line" value should - be an array of (x, y) tuples with vertices of cross-section. - Vertices should be in map coordinates consistent with xul, - yul, and rotation. - xul : float - x coordinate for upper left corner - yul : float - y coordinate for upper left corner. The default is the sum of the - delc array. - rotation : float - Angle of grid rotation around the upper left corner. A positive value - indicates clockwise rotation. Angles are in degrees. Default is None - extent : tuple of floats - (xmin, xmax, ymin, ymax) will be used to specify axes limits. If None - then these will be calculated based on grid, coordinates, and rotation. - - """ - def __new__(cls, ax=None, model=None, dis=None, line=None, - xul=None, yul=None, rotation=None, extent=None): - - from flopy.plot.plotbase import DeprecatedCrossSection - from flopy.discretization import StructuredGrid - - err_msg = "ModelCrossSection will be replaced by " +\ - "PlotCrossSection(), Calling PlotCrossSection()" - warnings.warn(err_msg, PendingDeprecationWarning) - - modelgrid = None - if model is not None: - if (xul, yul, rotation) != (None, None, None): - modelgrid = plotutil._set_coord_info(model.modelgrid, - xul, yul, None, None, - rotation) - - elif dis is not None: - modelgrid = StructuredGrid(delr=dis.delr.array, - delc=dis.delc.array, - top=dis.top.array, - botm=dis.botm.array) - - if (xul, yul, rotation) != (None, None, None): - modelgrid = plotutil._set_coord_info(modelgrid, - xul, yul, None, None, - rotation) - - - return DeprecatedCrossSection(ax=ax, model=model, - modelgrid=modelgrid, - line=line, extent=extent) - +import numpy as np + +try: + import matplotlib.pyplot as plt +except: + plt = None +from flopy.plot import plotutil +from flopy.utils import geometry +import warnings +warnings.simplefilter('always', PendingDeprecationWarning) + + +class _CrossSection(object): + """ + Base class for CrossSection plotting. Handles the model grid + transforms and searching for modelgrid and dis file information. + + This class must be general with absolutely no code specific to + a single model grid type. The user should not directly instantiate this + class + + Parameters + ---------- + ax : matplotlib.pyplot.axes object + model : flopy.mf6.Modflow or flopy.modflow.Modflow object + modelgrid : flopy.discretization.grid object + geographic_coords : bool + boolean flag to allow the user to plot cross section lines in + geographic coordinates. If False (default), cross section is plotted + as the distance along the cross section line. + + """ + def __init__(self, ax=None, model=None, modelgrid=None, + geographic_coords=False): + + self.ax = ax + self.geographic_coords = geographic_coords + if plt is None: + s = 'Could not import matplotlib. Must install matplotlib ' + \ + ' in order to use ModelCrossSection method' + raise ImportError(s) + + self.model = model + + if model is not None: + self.mg = model.modelgrid + + elif modelgrid is not None: + self.mg = modelgrid + if self.mg is None: + raise AssertionError("Cannot find model grid ") + + else: + raise Exception("Cannot find model grid") + + if self.mg.top is None or self.mg.botm is None: + raise AssertionError("modelgrid top and botm must be defined") + + +class _StructuredCrossSection(_CrossSection): + """ + Class to create a cross section of the model using + Structured discretization. + + Class is not to be instantiated by the user. + + Parameters + ---------- + ax : matplotlib.pyplot axis + The plot axis. If not provided it, plt.gca() will be used. + model : flopy.modflow object + flopy model object. (Default is None) + modelgrid : flopy.discretization.StructuredGrid + Structured model grid object + line : dict + Dictionary with either "row", "column", or "line" key. If key + is "row" or "column" key value should be the zero-based row or + column index for cross-section. If key is "line" value should + be an array of (x, y) tuples with vertices of cross-section. + Vertices should be in map coordinates consistent with xul, + yul, and rotation. + extent : tuple of floats + (xmin, xmax, ymin, ymax) will be used to specify axes limits. If None + then these will be calculated based on grid, coordinates, and rotation + geographic_coords : bool + boolean flag to allow the user to plot cross section lines in + geographic coordinates. If False (default), cross section is plotted + as the distance along the cross section line. + + """ + + def __init__(self, ax=None, model=None, modelgrid=None, + line=None, extent=None, geographic_coords=False): + super(_StructuredCrossSection, self).__init__(ax=ax, model=model, + modelgrid=modelgrid, + geographic_coords= + geographic_coords) + + if line is None: + s = 'line must be specified.' + raise Exception(s) + + linekeys = [linekeys.lower() for linekeys in list(line.keys())] + + if len(linekeys) != 1: + s = 'only row, column, or line can be specified in line dictionary.\n' + s += 'keys specified: ' + for k in linekeys: + s += '{} '.format(k) + raise AssertionError(s) + + if ax is None: + self.ax = plt.gca() + else: + self.ax = ax + + onkey = list(line.keys())[0] + eps = 1.e-4 + xedge, yedge = self.mg.xyedges + self.__geographic_xpts = None + + # un-translate model grid into model coordinates + self.xcellcenters, self.ycellcenters = \ + geometry.transform(self.mg.xcellcenters, + self.mg.ycellcenters, + self.mg.xoffset, self.mg.yoffset, + self.mg.angrot_radians, inverse=True) + + if 'row' in linekeys: + self.direction = 'x' + ycenter = self.ycellcenters.T[0] + pts = [(xedge[0] + eps, + ycenter[int(line[onkey])] - eps), + (xedge[-1] - eps, + ycenter[int(line[onkey])] + eps)] + elif 'column' in linekeys: + self.direction = 'y' + xcenter = self.xcellcenters[0, :] + pts = [(xcenter[int(line[onkey])] + eps, + yedge[0] - eps), + (xcenter[int(line[onkey])] - eps, + yedge[-1] + eps)] + else: + self.direction = 'xy' + verts = line[onkey] + xp = [] + yp = [] + for [v1, v2] in verts: + xp.append(v1) + yp.append(v2) + + xp, yp = self.mg.get_local_coords(xp, yp) + pts = [(xt, yt) for xt, yt in zip(xp, yp)] + # for now set offset to zero, since we do not have + # information on projection from the user + + # convert pts list to numpy array + self.pts = np.array(pts) + + # get points along the line + self.xpts = plotutil.line_intersect_grid(self.pts, self.mg.xyedges[0], + self.mg.xyedges[1]) + if len(self.xpts) < 2: + s = 'cross-section cannot be created\n.' + s += ' less than 2 points intersect the model grid\n' + s += ' {} points intersect the grid.'.format(len(self.xpts)) + raise Exception(s) + + # set horizontal distance + d = [] + for v in self.xpts: + d.append(v[2]) + self.d = np.array(d) + + self.idomain = self.mg.idomain + if self.mg.idomain is None: + self.idomain = np.ones((self.mg.nlay, self.mg.nrow, + self.mg.ncol), dtype=int) + + self.ncb = 0 + self.laycbd = [] + + if self.model is not None: + if self.model.laycbd is not None: + self.laycbd = self.model.laycbd + + for l in self.laycbd: + if l > 0: + self.ncb += 1 + + self.active = np.ones((self.mg.nlay + self.ncb), dtype=np.int) + kon = 0 + + if len(self.laycbd) > 0: + for k in range(self.mg.nlay): + if self.laycbd[k] > 0: + kon += 1 + self.active[kon] = 0 + kon += 1 + + top = self.mg.top + botm = self.mg.botm + elev = [top.copy()] + for k in range(self.mg.nlay + self.ncb): + elev.append(botm[k, :, :]) + + self.elev = np.array(elev) + self.layer0 = 0 + self.layer1 = self.mg.nlay + self.ncb + 1 + + zpts = [] + for k in range(self.layer0, self.layer1): + zpts.append(plotutil.cell_value_points(self.xpts, self.mg.xyedges[0], + self.mg.xyedges[1], + self.elev[k, :, :])) + self.zpts = np.array(zpts) + + xcentergrid, zcentergrid = self.get_centergrids(self.xpts, self.zpts) + self.xcentergrid = xcentergrid + self.zcentergrid = zcentergrid + + geo_xcentergrid, _ = self.get_centergrids(self.geographic_xpts, + self.zpts) + self.geographic_xcentergrid = geo_xcentergrid + + # Create cross-section extent + if extent is None: + self.extent = self.get_extent() + else: + self.extent = extent + + # Set axis limits + self.ax.set_xlim(self.extent[0], self.extent[1]) + self.ax.set_ylim(self.extent[2], self.extent[3]) + + return + + @property + def geographic_xpts(self): + """ + Method to retranslate model coordinates to geometric + coordinates for plotting + + Returns: + + """ + if self.__geographic_xpts is None: + xypts = self.xpts.T + xypts = geometry.transform(xypts[0], xypts[1], + self.mg.xoffset, + self.mg.yoffset, + self.mg.angrot_radians) + + if self.direction == "xy": + xdist = np.max(xypts[0]) - np.min(xypts[0]) + ydist = np.max(xypts[1]) - np.min(xypts[1]) + if xdist >= ydist: + xypts = np.append(xypts, np.array([xypts[0]]), axis=0) + else: + xypts = np.append(xypts, np.array([xypts[1]]), axis=0) + else: + xypts = np.append(xypts, np.array([xypts[0]]), axis=0) + + self.__geographic_xpts = xypts.T + + return self.__geographic_xpts + + def get_centergrids(self, xpts, zpts): + """ + Method to calculate the centergrid information for plotting + + Parameters + ---------- + xpts : np.ndarray + array of x, y, distance along the cross section + zpts : np.ndarray + array of elevation values along the cross section + + Returns + ------- + tuple : (xcentergrid, zcentergrid) + """ + xcentergrid = [] + zcentergrid = [] + nz = 0 + if self.mg.nlay == 1: + for k in range(0, zpts.shape[0]): + nz += 1 + nx = 0 + for i in range(0, xpts.shape[0], 2): + try: + xp = 0.5 * (xpts[i][2] + xpts[i + 1][2]) + zp = zpts[k, i] + xcentergrid.append(xp) + zcentergrid.append(zp) + nx += 1 + except: + break + else: + for k in range(0, zpts.shape[0] - 1): + if not self.active[k]: + continue + nz += 1 + nx = 0 + for i in range(0, xpts.shape[0], 2): + try: + xp = 0.5 * (xpts[i][2] + xpts[i + 1][2]) + zp = 0.5 * (zpts[k, i] + zpts[k + 1, i + 1]) + xcentergrid.append(xp) + zcentergrid.append(zp) + nx += 1 + except: + break + + xcentergrid = np.array(xcentergrid).reshape((nz, nx)) + zcentergrid = np.array(zcentergrid).reshape((nz, nx)) + return xcentergrid, zcentergrid + + def plot_array(self, a, masked_values=None, head=None, **kwargs): + """ + Plot a three-dimensional array as a patch collection. + + Parameters + ---------- + a : numpy.ndarray + Three-dimensional array to plot. + masked_values : iterable of floats, ints + Values to mask. + head : numpy.ndarray + Three-dimensional array to set top of patches to the minimum + of the top of a layer or the head value. Used to create + patches that conform to water-level elevations. + **kwargs : dictionary + keyword arguments passed to matplotlib.collections.PatchCollection + + Returns + ------- + patches : matplotlib.collections.PatchCollection + + """ + if 'ax' in kwargs: + ax = kwargs.pop('ax') + else: + ax = self.ax + + xedge, yedge = self.mg.xyedges + vpts = [] + for k in range(self.mg.nlay): + vpts.append(plotutil.cell_value_points(self.xpts, xedge, + yedge, a[k, :, :])) + if len(self.laycbd) > 0: + if self.laycbd[k] > 0: + ta = np.empty((self.mg.nrow, self.mg.ncol), dtype=np.float) + ta[:, :] = -1e9 + vpts.append(plotutil.cell_value_points(self.xpts, + xedge, yedge, ta)) + vpts = np.array(vpts) + if masked_values is not None: + for mval in masked_values: + vpts = np.ma.masked_values(vpts, mval) + + if isinstance(head, np.ndarray): + zpts = self.set_zpts(head) + else: + zpts = self.zpts + + if self.ncb > 0: + vpts = np.ma.masked_values(vpts, -1e9) + + pc = self.get_grid_patch_collection(zpts, vpts, **kwargs) + if pc != None: + ax.add_collection(pc) + return pc + + def plot_surface(self, a, masked_values=None, **kwargs): + """ + Plot a two- or three-dimensional array as line(s). + + Parameters + ---------- + a : numpy.ndarray + Two- or three-dimensional array to plot. + masked_values : iterable of floats, ints + Values to mask. + **kwargs : dictionary + keyword arguments passed to matplotlib.pyplot.plot + + Returns + ------- + plot : list containing matplotlib.plot objects + + """ + if 'ax' in kwargs: + ax = kwargs.pop('ax') + else: + ax = self.ax + + plotarray = a + + vpts = [] + if len(plotarray.shape) == 2: + nlay = 1 + plotarray = np.reshape(plotarray, + (1, plotarray.shape[0], plotarray.shape[1])) + elif len(plotarray.shape) == 3: + nlay = plotarray.shape[0] + else: + raise Exception('plot_array array must be a 2D or 3D array') + + xedge, yedge = self.mg.xyedges + for k in range(nlay): + vpts.append(plotutil.cell_value_points(self.xpts, xedge, + yedge, + plotarray[k, :, :])) + vpts = np.array(vpts) + + if masked_values is not None: + for mval in masked_values: + vpts = np.ma.masked_values(vpts, mval) + + plot = [] + # adust distance array for modelgrid offset + if self.geographic_coords: + d = self.geographic_xpts.T[-1] + else: + d = self.d + for k in range(vpts.shape[0]): + plot.append(ax.plot(d, vpts[k, :], **kwargs)) + + return plot + + def plot_fill_between(self, a, colors=('blue', 'red'), + masked_values=None, head=None, **kwargs): + """ + Plot a three-dimensional array as lines. + + Parameters + ---------- + a : numpy.ndarray + Three-dimensional array to plot. + colors : list + matplotlib fill colors, two required + masked_values : iterable of floats, ints + Values to mask. + head : numpy.ndarray + Three-dimensional array to set top of patches to the minimum + of the top of a layer or the head value. Used to create + patches that conform to water-level elevations. + **kwargs : dictionary + keyword arguments passed to matplotlib.pyplot.plot + + Returns + ------- + plot : list containing matplotlib.fillbetween objects + + """ + if 'ax' in kwargs: + ax = kwargs.pop('ax') + else: + ax = self.ax + + plotarray = a + + vpts = [] + for k in range(self.mg.nlay): + # print('k', k, self.laycbd[k]) + vpts.append(plotutil.cell_value_points(self.xpts, self.mg.xyedges[0], + self.mg.xyedges[1], + plotarray[k, :, :])) + if len(self.laycbd) > 0: + if self.laycbd[k] > 0: + ta = np.empty((self.mg.nrow, self.mg.ncol), dtype=np.float) + ta[:, :] = self.mg.botm.array[k, :, :] + vpts.append(plotutil.cell_value_points(self.xpts, + self.mg.xyedges[0], + self.mg.xyedges[1], ta)) + + vpts = np.ma.array(vpts, mask=False) + + if isinstance(head, np.ndarray): + zpts = self.set_zpts(head) + else: + zpts = self.zpts + + if masked_values is not None: + for mval in masked_values: + vpts = np.ma.masked_values(vpts, mval) + if self.ncb > 0: + vpts = np.ma.masked_values(vpts, -1e9) + idxm = np.ma.getmask(vpts) + + plot = [] + # print(zpts.shape) + for k in range(self.mg.nlay + self.ncb): + if self.active[k] == 0: + continue + idxmk = idxm[k, :] + v = vpts[k, :] + y1 = zpts[k, :] + y2 = zpts[k + 1, :] + # make sure y1 is not below y2 + idx = y1 < y2 + y1[idx] = y2[idx] + # make sure v is not below y2 + idx = v < y2 + v[idx] = y2[idx] + # make sure v is not above y1 + idx = v > y1 + v[idx] = y1[idx] + # set y2 to v + y2 = v + # mask cells + y1[idxmk] = np.nan + y2[idxmk] = np.nan + # adjust distance array for modelgrid offset + if self.geographic_coords: + d = self.geographic_xpts.T[-1] + else: + d = self.d + plot.append(ax.fill_between(d, y1=y1, y2=y2, + color=colors[0], **kwargs)) + y1 = y2 + y2 = self.zpts[k + 1, :] + y2[idxmk] = np.nan + plot.append(ax.fill_between(d, y1=y1, y2=y2, + color=colors[1], **kwargs)) + return plot + + def contour_array(self, a, masked_values=None, head=None, **kwargs): + """ + Contour a three-dimensional array. + + Parameters + ---------- + a : numpy.ndarray + Three-dimensional array to plot. + masked_values : iterable of floats, ints + Values to mask. + head : numpy.ndarray + Three-dimensional array to set top of patches to the minimum + of the top of a layer or the head value. Used to create + patches that conform to water-level elevations. + **kwargs : dictionary + keyword arguments passed to matplotlib.pyplot.contour + + Returns + ------- + contour_set : matplotlib.pyplot.contour + + """ + plotarray = a + + vpts = [] + xedge, yedge = self.mg.xyedges + for k in range(self.mg.nlay): + vpts.append(plotutil.cell_value_points(self.xpts, xedge, + yedge, + plotarray[k, :, :])) + vpts = np.array(vpts) + vpts = vpts[:, ::2] + if self.mg.nlay == 1: + vpts = np.vstack((vpts, vpts)) + + if masked_values is not None: + for mval in masked_values: + vpts = np.ma.masked_values(vpts, mval) + + if isinstance(head, np.ndarray): + zcentergrid = self.set_zcentergrid(head) + else: + zcentergrid = self.zcentergrid + + if self.geographic_coords: + xcentergrid = self.geographic_xcentergrid + else: + xcentergrid = self.xcentergrid + contour_set = self.ax.contour(xcentergrid, zcentergrid, + vpts, **kwargs) + return contour_set + + def plot_inactive(self): + raise NotImplementedError("Function must be called in PlotCrossSection") + + def plot_ibound(self): + raise NotImplementedError("Function must be called in PlotCrossSection") + + def plot_grid(self): + raise NotImplementedError("Function must be called in PlotCrossSection") + + def plot_bc(self): + raise NotImplementedError("Function must be called in PlotCrossSection") + + def plot_specific_discharge(self): + raise NotImplementedError("Function must be called in PlotCrossSection") + + def plot_discharge(self): + raise NotImplementedError("Function must be called in PlotCrossSection") + + def get_grid_patch_collection(self, zpts, plotarray, **kwargs): + """ + Get a PatchCollection of plotarray in unmasked cells + + Parameters + ---------- + zpts : numpy.ndarray + array of z elevations that correspond to the x, y, and horizontal + distance along the cross-section (self.xpts). Constructed using + plotutil.cell_value_points(). + plotarray : numpy.ndarray + Three-dimensional array to attach to the Patch Collection. + **kwargs : dictionary + keyword arguments passed to matplotlib.collections.PatchCollection + + Returns + ------- + patches : matplotlib.collections.PatchCollection + + """ + from matplotlib.patches import Polygon + from matplotlib.collections import PatchCollection + rectcol = [] + + if 'vmin' in kwargs: + vmin = kwargs.pop('vmin') + else: + vmin = None + if 'vmax' in kwargs: + vmax = kwargs.pop('vmax') + else: + vmax = None + + colors = [] + if self.geographic_coords: + xpts = self.geographic_xpts + else: + xpts = self.xpts + for k in range(zpts.shape[0] - 1): + for idx in range(0, len(xpts) - 1, 2): + try: + ll = ((xpts[idx][2], zpts[k + 1, idx])) + try: + dx = xpts[idx + 2][2] - xpts[idx][2] + except: + dx = xpts[idx + 1][2] - xpts[idx][2] + dz = zpts[k, idx] - zpts[k + 1, idx] + pts = (ll, + (ll[0], ll[1] + dz), (ll[0] + dx, ll[1] + dz), + (ll[0] + dx, ll[1])) # , ll) + if np.isnan(plotarray[k, idx]): + continue + if plotarray[k, idx] is np.ma.masked: + continue + rectcol.append(Polygon(pts, closed=True)) + colors.append(plotarray[k, idx]) + except: + pass + + if len(rectcol) > 0: + patches = PatchCollection(rectcol, **kwargs) + patches.set_array(np.array(colors)) + patches.set_clim(vmin, vmax) + else: + patches = None + return patches + + def get_grid_line_collection(self, **kwargs): + """ + Get a LineCollection of the grid + + Parameters + ---------- + **kwargs : dictionary + keyword arguments passed to matplotlib.collections.LineCollection + + Returns + ------- + linecollection : matplotlib.collections.LineCollection + """ + from matplotlib.collections import LineCollection + + color = "grey" + if "color" in kwargs: + color = kwargs.pop('color') + + linecol = [] + if self.geographic_coords: + xpts = self.geographic_xpts + else: + xpts = self.xpts + for k in range(self.zpts.shape[0] - 1): + for idx in range(0, len(xpts) - 1, 2): + try: + ll = ((xpts[idx][2], self.zpts[k + 1, idx])) + try: + dx = xpts[idx + 2][2] - xpts[idx][2] + except (IndexError, ValueError): + dx = xpts[idx + 1][2] - xpts[idx][2] + dz = self.zpts[k, idx] - self.zpts[k + 1, idx] + # horizontal lines + linecol.append(((ll), (ll[0] + dx, ll[1]))) + linecol.append( + ((ll[0], ll[1] + dz), (ll[0] + dx, ll[1] + dz))) + # vertical lines + linecol.append(((ll), (ll[0], ll[1] + dz))) + linecol.append( + ((ll[0] + dx, ll[1]), (ll[0] + dx, ll[1] + dz))) + except (IndexError, AttributeError, ValueError): + pass + + linecollection = LineCollection(linecol, color=color, **kwargs) + return linecollection + + def set_zpts(self, vs): + """ + Get an array of z elevations based on minimum of cell elevation + (self.elev) or passed vs numpy.ndarray + + Parameters + ---------- + vs : numpy.ndarray + Three-dimensional array to plot. + + Returns + ------- + zpts : numpy.ndarray + + """ + zpts = [] + xedge, yedge = self.mg.xyedges + for k in range(self.layer0, self.layer1): + e = self.elev[k, :, :] + if k < self.mg.nlay: + v = vs[k, :, :] + idx = v < e + e[idx] = v[idx] + zpts.append(plotutil.cell_value_points(self.xpts, xedge, + yedge, e)) + return np.array(zpts) + + def set_zcentergrid(self, vs): + """ + Get an array of z elevations at the center of a cell that is based + on minimum of cell top elevation (self.elev) or passed vs numpy.ndarray + + Parameters + ---------- + vs : numpy.ndarray + Three-dimensional array to plot. + + Returns + ------- + zcentergrid : numpy.ndarray + + """ + vpts = [] + xedge, yedge = self.mg.xyedges + for k in range(self.layer0, self.layer1): + if k < self.mg.nlay: + e = vs[k, :, :] + else: + e = self.elev[k, :, :] + vpts.append(plotutil.cell_value_points(self.xpts, xedge, + yedge, e)) + vpts = np.array(vpts) + + zcentergrid = [] + nz = 0 + if self.mg.nlay == 1: + for k in range(0, self.zpts.shape[0]): + nz += 1 + nx = 0 + for i in range(0, self.xpts.shape[0], 2): + nx += 1 + vp = vpts[k, i] + zp = self.zpts[k, i] + if k == 0: + if vp < zp: + zp = vp + zcentergrid.append(zp) + else: + for k in range(0, self.zpts.shape[0] - 1): + if not self.active[k]==1: + continue + nz += 1 + nx = 0 + for i in range(0, self.xpts.shape[0], 2): + nx += 1 + vp = vpts[k, i] + ep = self.zpts[k, i] + if vp < ep: + ep = vp + zp = 0.5 * (ep + self.zpts[k + 1, i + 1]) + zcentergrid.append(zp) + return np.array(zcentergrid).reshape((nz, nx)) + + def get_extent(self): + """ + Get the extent of the rotated and offset grid + + Returns + ------- + tuple : (xmin, xmax, ymin, ymax) + + """ + if self.geographic_coords: + xpts = self.geographic_xpts + else: + xpts = self.xpts + + xmin = xpts[0][2] + xmax = xpts[-1][2] + + ymin = self.zpts.min() + ymax = self.zpts.max() + + return (xmin, xmax, ymin, ymax) + + +class ModelCrossSection(object): + """ + Class to create a cross section of the model. + + Parameters + ---------- + ax : matplotlib.pyplot axis + The plot axis. If not provided it, plt.gca() will be used. + model : flopy.modflow object + flopy model object. (Default is None) + dis : flopy.modflow.ModflowDis object + flopy discretization object. (Default is None) + line : dict + Dictionary with either "row", "column", or "line" key. If key + is "row" or "column" key value should be the zero-based row or + column index for cross-section. If key is "line" value should + be an array of (x, y) tuples with vertices of cross-section. + Vertices should be in map coordinates consistent with xul, + yul, and rotation. + xul : float + x coordinate for upper left corner + yul : float + y coordinate for upper left corner. The default is the sum of the + delc array. + rotation : float + Angle of grid rotation around the upper left corner. A positive value + indicates clockwise rotation. Angles are in degrees. Default is None + extent : tuple of floats + (xmin, xmax, ymin, ymax) will be used to specify axes limits. If None + then these will be calculated based on grid, coordinates, and rotation. + + """ + def __new__(cls, ax=None, model=None, dis=None, line=None, + xul=None, yul=None, rotation=None, extent=None): + + from flopy.plot.plotbase import DeprecatedCrossSection + from flopy.discretization import StructuredGrid + + err_msg = "ModelCrossSection will be replaced by " +\ + "PlotCrossSection(), Calling PlotCrossSection()" + warnings.warn(err_msg, PendingDeprecationWarning) + + modelgrid = None + if model is not None: + if (xul, yul, rotation) != (None, None, None): + modelgrid = plotutil._set_coord_info(model.modelgrid, + xul, yul, None, None, + rotation) + + elif dis is not None: + modelgrid = StructuredGrid(delr=dis.delr.array, + delc=dis.delc.array, + top=dis.top.array, + botm=dis.botm.array) + + if (xul, yul, rotation) != (None, None, None): + modelgrid = plotutil._set_coord_info(modelgrid, + xul, yul, None, None, + rotation) + + + return DeprecatedCrossSection(ax=ax, model=model, + modelgrid=modelgrid, + line=line, extent=extent) + diff --git a/flopy/plot/map.py b/flopy/plot/map.py index c05babf3bd..dc448012a3 100644 --- a/flopy/plot/map.py +++ b/flopy/plot/map.py @@ -1,1610 +1,1610 @@ -import numpy as np -from ..discretization import StructuredGrid, UnstructuredGrid -from ..utils import geometry - -try: - import matplotlib.pyplot as plt - import matplotlib.colors - from matplotlib.collections import PatchCollection - from matplotlib.patches import Polygon -except ImportError: - plt = None - -from . import plotutil -import warnings - -warnings.simplefilter('always', PendingDeprecationWarning) - - -class PlotMapView(object): - """ - Class to create a map of the model. Delegates plotting - functionality based on model grid type. - - Parameters - ---------- - modelgrid : flopy.discretiztion.Grid - The modelgrid class can be StructuredGrid, VertexGrid, - or UnstructuredGrid (Default is None) - ax : matplotlib.pyplot axis - The plot axis. If not provided it, plt.gca() will be used. - If there is not a current axis then a new one will be created. - model : flopy.modflow object - flopy model object. (Default is None) - layer : int - Layer to plot. Default is 0. Must be between 0 and nlay - 1. - extent : tuple of floats - (xmin, xmax, ymin, ymax) will be used to specify axes limits. If None - then these will be calculated based on grid, coordinates, and rotation. - - Notes - ----- - - - """ - - def __init__(self, model=None, modelgrid=None, ax=None, - layer=0, extent=None): - - if plt is None: - s = 'Could not import matplotlib. Must install matplotlib ' + \ - ' in order to use ModelMap method' - raise ImportError(s) - - self.model = model - self.layer = layer - self.mg = None - - if model is not None: - self.mg = model.modelgrid - - elif modelgrid is not None: - self.mg = modelgrid - - else: - err_msg = "A model grid instance must be provided to PlotMapView" - raise AssertionError(err_msg) - - if self.mg.grid_type not in ("structured", "vertex", - "unstructured"): - err_msg = "Unrecognized modelgrid type {}" - raise TypeError(err_msg.format(self.mg.grid_type)) - - if ax is None: - try: - self.ax = plt.gca() - self.ax.set_aspect('equal') - except: - self.ax = plt.subplot(1, 1, 1, aspect='equal', axisbg="white") - else: - self.ax = ax - - if extent is not None: - self._extent = extent - else: - self._extent = None - - @property - def extent(self): - if self._extent is None: - self._extent = self.mg.extent - return self._extent - - def plot_array(self, a, masked_values=None, **kwargs): - """ - Plot an array. If the array is three-dimensional, then the method - will plot the layer tied to this class (self.layer). - - Parameters - ---------- - a : numpy.ndarray - Array to plot. - masked_values : iterable of floats, ints - Values to mask. - **kwargs : dictionary - keyword arguments passed to matplotlib.pyplot.pcolormesh - - Returns - ------- - quadmesh : matplotlib.collections.QuadMesh or - matplotlib.collections.PatchCollection - - """ - if not isinstance(a, np.ndarray): - a = np.array(a) - - if self.mg.grid_type == "structured": - if a.ndim == 3: - plotarray = a[self.layer, :, :] - elif a.ndim == 2: - plotarray = a - elif a.ndim == 1: - plotarray = a - else: - raise Exception('Array must be of dimension 1, 2, or 3') - - elif self.mg.grid_type == "vertex": - if a.ndim == 3: - if a.shape[0] == 1: - a = np.squeeze(a, axis=0) - plotarray = a[self.layer, :] - elif a.shape[1] == 1: - a = np.squeeze(a, axis=1) - plotarray = a[self.layer, :] - else: - raise Exception("Array must be of dimension 1 or 2") - elif a.ndim == 2: - plotarray = a[self.layer, :] - elif a.ndim == 1: - plotarray = a - else: - raise Exception('Array must be of dimension 1 or 2') - - elif self.mg.grid_type == "unstructured": - plotarray = a - - else: - raise TypeError( - "Unrecognized grid type {}".format(self.mg.grid_type)) - - if masked_values is not None: - for mval in masked_values: - plotarray = np.ma.masked_values(plotarray, mval) - - # add NaN values to mask - plotarray = np.ma.masked_where(np.isnan(plotarray), plotarray) - - if 'ax' in kwargs: - ax = kwargs.pop('ax') - else: - ax = self.ax - - if self.mg.grid_type in ("structured", "vertex"): - xgrid = np.array(self.mg.xvertices) - ygrid = np.array(self.mg.yvertices) - - if self.mg.grid_type == "structured": - quadmesh = ax.pcolormesh(xgrid, ygrid, plotarray) - - else: - patches = [Polygon(list(zip(xgrid[i], ygrid[i])), closed=True) - for i in range(xgrid.shape[0])] - - quadmesh = PatchCollection(patches) - quadmesh.set_array(plotarray) - - else: - quadmesh = plotutil.plot_cvfd(self.mg._vertices, self.mg._iverts, - a=plotarray, ax=ax) - - # set max and min - if 'vmin' in kwargs: - vmin = kwargs.pop('vmin') - else: - vmin = None - - if 'vmax' in kwargs: - vmax = kwargs.pop('vmax') - else: - vmax = None - - quadmesh.set_clim(vmin=vmin, vmax=vmax) - - # send rest of kwargs to quadmesh - quadmesh.set(**kwargs) - - # add collection to axis - ax.add_collection(quadmesh) - - # set limits - ax.set_xlim(self.extent[0], self.extent[1]) - ax.set_ylim(self.extent[2], self.extent[3]) - return quadmesh - - def contour_array(self, a, masked_values=None, **kwargs): - """ - Contour an array. If the array is three-dimensional, then the method - will contour the layer tied to this class (self.layer). - - Parameters - ---------- - a : numpy.ndarray - Array to plot. - masked_values : iterable of floats, ints - Values to mask. - **kwargs : dictionary - keyword arguments passed to matplotlib.pyplot.pcolormesh - - Returns - ------- - contour_set : matplotlib.pyplot.contour - - """ - try: - import matplotlib.tri as tri - except ImportError: - err_msg = "Matplotlib must be updated to use contour_array" - raise ImportError(err_msg) - - a = np.copy(a) - if not isinstance(a, np.ndarray): - a = np.array(a) - - xcentergrid = np.array(self.mg.xcellcenters) - ycentergrid = np.array(self.mg.ycellcenters) - - if self.mg.grid_type == "structured": - if a.ndim == 3: - plotarray = a[self.layer, :, :] - elif a.ndim == 2: - plotarray = a - elif a.ndim == 1: - plotarray = a - else: - raise Exception('Array must be of dimension 1, 2 or 3') - - elif self.mg.grid_type == "vertex": - if a.ndim == 3: - if a.shape[0] == 1: - a = np.squeeze(a, axis=0) - plotarray = a[self.layer, :] - elif a.shape[1] == 1: - a = np.squeeze(a, axis=1) - plotarray = a[self.layer, :] - else: - raise Exception("Array must be of dimension 1 or 2") - elif a.ndim == 2: - plotarray = a[self.layer, :] - elif a.ndim == 1: - plotarray = a - else: - raise Exception('Array must be of dimension 1, 2 or 3') - - else: - plotarray = a - - # work around for tri-contour ignore vmin & vmax - # necessary block for tri-contour NaN issue - if "levels" not in kwargs: - if "vmin" not in kwargs: - vmin = np.nanmin(plotarray) - else: - vmin = kwargs.pop("vmin") - if "vmax" not in kwargs: - vmax = np.nanmax(plotarray) - else: - vmax = kwargs.pop('vmax') - - levels = np.linspace(vmin, vmax, 7) - kwargs['levels'] = levels - - # workaround for tri-contour nan issue - # use -2**31 to allow for 32 bit int arrays - plotarray[np.isnan(plotarray)] = -2**31 - if masked_values is None: - masked_values = [-2**31] - else: - masked_values = list(masked_values) - if -2**31 not in masked_values: - masked_values.append(-2**31) - - ismasked = None - if masked_values is not None: - for mval in masked_values: - if ismasked is None: - ismasked = np.isclose(plotarray, mval) - else: - t = np.isclose(plotarray, mval) - ismasked += t - - if 'ax' in kwargs: - ax = kwargs.pop('ax') - else: - ax = self.ax - - if 'colors' in kwargs.keys(): - if 'cmap' in kwargs.keys(): - kwargs.pop('cmap') - - plot_triplot = False - if 'plot_triplot' in kwargs: - plot_triplot = kwargs.pop('plot_triplot') - - if 'extent' in kwargs: - extent = kwargs.pop('extent') - - if self.mg.grid_type in ('structured', 'vertex'): - idx = (xcentergrid >= extent[0]) & ( - xcentergrid <= extent[1]) & ( - ycentergrid >= extent[2]) & ( - ycentergrid <= extent[3]) - plotarray = plotarray[idx] - xcentergrid = xcentergrid[idx] - ycentergrid = ycentergrid[idx] - - plotarray = plotarray.flatten() - xcentergrid = xcentergrid.flatten() - ycentergrid = ycentergrid.flatten() - triang = tri.Triangulation(xcentergrid, ycentergrid) - - if ismasked is not None: - ismasked = ismasked.flatten() - mask = np.any(np.where(ismasked[triang.triangles], - True, False), axis=1) - triang.set_mask(mask) - - contour_set = ax.tricontour(triang, plotarray, **kwargs) - - if plot_triplot: - ax.triplot(triang, color='black', marker='o', lw=0.75) - - ax.set_xlim(self.extent[0], self.extent[1]) - ax.set_ylim(self.extent[2], self.extent[3]) - - return contour_set - - def plot_inactive(self, ibound=None, color_noflow='black', **kwargs): - """ - Make a plot of inactive cells. If not specified, then pull ibound - from the self.ml - - Parameters - ---------- - ibound : numpy.ndarray - ibound array to plot. (Default is ibound in 'BAS6' package.) - - color_noflow : string - (Default is 'black') - - Returns - ------- - quadmesh : matplotlib.collections.QuadMesh - - """ - if ibound is None: - if self.mg.idomain is None: - raise AssertionError("Ibound/Idomain array must be provided") - - ibound = self.mg.idomain - - plotarray = np.zeros(ibound.shape, dtype=np.int) - idx1 = (ibound == 0) - plotarray[idx1] = 1 - plotarray = np.ma.masked_equal(plotarray, 0) - cmap = matplotlib.colors.ListedColormap(['0', color_noflow]) - bounds = [0, 1, 2] - norm = matplotlib.colors.BoundaryNorm(bounds, cmap.N) - quadmesh = self.plot_array(plotarray, cmap=cmap, norm=norm, **kwargs) - return quadmesh - - def plot_ibound(self, ibound=None, color_noflow='black', color_ch='blue', - color_vpt='red', **kwargs): - """ - Make a plot of ibound. If not specified, then pull ibound from the - self.ml - - Parameters - ---------- - ibound : numpy.ndarray - ibound array to plot. (Default is ibound in the modelgrid) - color_noflow : string - (Default is 'black') - color_ch : string - Color for constant heads (Default is 'blue'.) - color_vpt: string - Color for vertical pass through cells (Default is 'red') - - Returns - ------- - quadmesh : matplotlib.collections.QuadMesh - - """ - import matplotlib.colors - - if ibound is None: - if self.model is not None: - if self.model.version == "mf6": - color_ch = color_vpt - - if self.mg.idomain is None: - raise AssertionError("Ibound/Idomain array must be provided") - - ibound = self.mg.idomain - - plotarray = np.zeros(ibound.shape, dtype=np.int) - idx1 = (ibound == 0) - idx2 = (ibound < 0) - plotarray[idx1] = 1 - plotarray[idx2] = 2 - plotarray = np.ma.masked_equal(plotarray, 0) - cmap = matplotlib.colors.ListedColormap(['0', color_noflow, color_ch]) - bounds = [0, 1, 2, 3] - norm = matplotlib.colors.BoundaryNorm(bounds, cmap.N) - quadmesh = self.plot_array(plotarray, cmap=cmap, norm=norm, **kwargs) - return quadmesh - - def plot_grid(self, **kwargs): - """ - Plot the grid lines. - - Parameters - ---------- - kwargs : ax, colors. The remaining kwargs are passed into the - the LineCollection constructor. - - Returns - ------- - lc : matplotlib.collections.LineCollection - - """ - from matplotlib.collections import LineCollection - - if 'ax' in kwargs: - ax = kwargs.pop('ax') - else: - ax = self.ax - - if 'colors' not in kwargs: - kwargs['colors'] = '0.5' - - lc = LineCollection(self.mg.grid_lines, **kwargs) - - ax.add_collection(lc) - ax.set_xlim(self.extent[0], self.extent[1]) - ax.set_ylim(self.extent[2], self.extent[3]) - - return lc - - def plot_bc(self, name=None, package=None, kper=0, color=None, - plotAll=False, **kwargs): - """ - Plot boundary conditions locations for a specific boundary - type from a flopy model - - Parameters - ---------- - name : string - Package name string ('WEL', 'GHB', etc.). (Default is None) - package : flopy.modflow.Modflow package class instance - flopy package class instance. (Default is None) - kper : int - Stress period to plot - color : string - matplotlib color string. (Default is None) - plotAll : bool - Boolean used to specify that boundary condition locations for all - layers will be plotted on the current ModelMap layer. - (Default is False) - **kwargs : dictionary - keyword arguments passed to matplotlib.collections.PatchCollection - - Returns - ------- - quadmesh : matplotlib.collections.QuadMesh - - """ - if 'ftype' in kwargs and name is None: - name = kwargs.pop('ftype') - - # Find package to plot - if package is not None: - p = package - name = p.name[0] - - elif self.model is not None: - if name is None: - raise Exception('ftype not specified') - name = name.upper() - p = self.model.get_package(name) - - else: - raise Exception('Cannot find package to plot') - - # trap for mf6 'cellid' vs mf2005 'k', 'i', 'j' convention - if isinstance(p, list) or p.parent.version == "mf6": - if not isinstance(p, list): - p = [p] - - idx = np.array([]) - for pp in p: - if pp.package_type in ('lak', 'sfr', 'maw', 'uzf'): - t = plotutil.advanced_package_bc_helper(pp, self.mg, - kper) - else: - try: - mflist = pp.stress_period_data.array[kper] - except Exception as e: - raise Exception("Not a list-style boundary package: " - + str(e)) - if mflist is None: - return - - t = np.array([list(i) for i in mflist['cellid']], - dtype=int).T - - if len(idx) == 0: - idx = np.copy(t) - else: - idx = np.append(idx, t, axis=1) - - else: - # modflow-2005 structured and unstructured grid - if p.package_type in ('uzf', 'lak'): - idx = plotutil.advanced_package_bc_helper(p, self.mg, kper) - else: - try: - mflist = p.stress_period_data[kper] - except Exception as e: - raise Exception("Not a list-style boundary package: " - + str(e)) - if mflist is None: - return - if len(self.mg.shape) == 3: - idx = [mflist['k'], mflist['i'], mflist['j']] - else: - idx = mflist['node'] - - nlay = self.mg.nlay - - # Plot the list locations - plotarray = np.zeros(self.mg.shape, dtype=np.int) - if plotAll and self.mg.grid_type != "unstructured": - pa = np.zeros(self.mg.shape[1:], dtype=np.int) - pa[list(idx[1:])] = 1 - for k in range(nlay): - plotarray[k] = pa.copy() - else: - plotarray[tuple(idx)] = 1 - - # mask the plot array - plotarray = np.ma.masked_equal(plotarray, 0) - - # set the colormap - if color is None: - # modflow 6 ftype fix, since multiple packages append _0, _1, etc: - key = name[:3].upper() - if key in plotutil.bc_color_dict: - c = plotutil.bc_color_dict[key] - else: - c = plotutil.bc_color_dict['default'] - else: - c = color - - cmap = matplotlib.colors.ListedColormap(['0', c]) - bounds = [0, 1, 2] - norm = matplotlib.colors.BoundaryNorm(bounds, cmap.N) - - # create normalized quadmesh or patch object depending on grid type - quadmesh = self.plot_array(plotarray, cmap=cmap, norm=norm, **kwargs) - - return quadmesh - - def plot_shapefile(self, shp, **kwargs): - """ - Plot a shapefile. The shapefile must be in the same coordinates as - the rotated and offset grid. - - Parameters - ---------- - shp : string or pyshp shapefile object - Name of the shapefile to plot - - kwargs : dictionary - Keyword arguments passed to plotutil.plot_shapefile() - - """ - if 'ax' in kwargs: - ax = kwargs.pop('ax') - else: - ax = self.ax - patch_collection = plotutil.plot_shapefile(shp, ax, **kwargs) - - return patch_collection - - def plot_cvfd(self, verts, iverts, **kwargs): - """ - Plot a cvfd grid. The vertices must be in the same coordinates as - the rotated and offset grid. - - Parameters - ---------- - verts : ndarray - 2d array of x and y points. - iverts : list of lists - should be of len(ncells) with a list of vertex number for each cell - - kwargs : dictionary - Keyword arguments passed to plotutil.plot_cvfd() - - """ - if 'ax' in kwargs: - ax = kwargs.pop('ax') - else: - ax = self.ax - patch_collection = plotutil.plot_cvfd(verts, iverts, ax, self.layer, - **kwargs) - return patch_collection - - def contour_array_cvfd(self, vertc, a, masked_values=None, **kwargs): - """ - Contour a cvfd array. If the array is three-dimensional, then the method - will contour the layer tied to this class (self.layer). The vertices - must be in the same coordinates as the rotated and offset grid. - - Parameters - ---------- - vertc : np.ndarray - Array with of size (nc, 2) with centroid location of cvfd - a : numpy.ndarray - Array to plot. - masked_values : iterable of floats, ints - Values to mask. - **kwargs : dictionary - keyword arguments passed to matplotlib.pyplot.pcolormesh - - Returns - ------- - contour_set : matplotlib.pyplot.contour - - """ - try: - import matplotlib.tri as tri - except ImportError: - err_msg = "Matplotlib must be updated to use contour_array" - raise ImportError(err_msg) - - if 'ncpl' in kwargs: - nlay = self.layer + 1 - ncpl = kwargs.pop('ncpl') - if isinstance(ncpl, int): - i = int(ncpl) - ncpl = np.ones((nlay,), dtype=np.int) * i - elif isinstance(ncpl, list) or isinstance(ncpl, tuple): - ncpl = np.array(ncpl) - i0 = 0 - i1 = 0 - for k in range(nlay): - i0 = i1 - i1 = i0 + ncpl[k] - # retain vertc in selected layer - vertc = vertc[i0:i1, :] - else: - i0 = 0 - i1 = vertc.shape[0] - - plotarray = a[i0:i1] - - ismasked = None - if masked_values is not None: - for mval in masked_values: - if ismasked is None: - ismasked = np.isclose(plotarray, mval) - else: - t = np.isclose(plotarray, mval) - ismasked += t - - # add NaN values to mask - if ismasked is None: - ismasked = np.isnan(plotarray) - else: - ismasked += np.isnan(plotarray) - - if 'ax' in kwargs: - ax = kwargs.pop('ax') - else: - ax = self.ax - - if 'colors' in kwargs.keys(): - if 'cmap' in kwargs.keys(): - kwargs.pop('cmap') - - triang = tri.Triangulation(vertc[:, 0], vertc[:, 1]) - - if ismasked is not None: - ismasked = ismasked.flatten() - mask = np.any(np.where(ismasked[triang.triangles], - True, False), axis=1) - triang.set_mask(mask) - - contour_set = ax.tricontour(triang, plotarray, **kwargs) - - return contour_set - - def plot_vector(self, vx, vy, istep=1, jstep=1, normalize=False, - masked_values=None, **kwargs): - """ - Plot a vector. - - Parameters - ---------- - vx : np.ndarray - x component of the vector to be plotted (non-rotated) - array shape must be (nlay, nrow, ncol) for a structured grid - array shape must be (nlay, ncpl) for a unstructured grid - vy : np.ndarray - y component of the vector to be plotted (non-rotated) - array shape must be (nlay, nrow, ncol) for a structured grid - array shape must be (nlay, ncpl) for a unstructured grid - istep : int - row frequency to plot (default is 1) - jstep : int - column frequency to plot (default is 1) - normalize : bool - boolean flag used to determine if vectors should be normalized - using the vector magnitude in each cell (default is False) - masked_values : iterable of floats - values to mask - kwargs : matplotlib.pyplot keyword arguments for the - plt.quiver method - - Returns - ------- - quiver : matplotlib.pyplot.quiver - result of the quiver function - - """ - if 'pivot' in kwargs: - pivot = kwargs.pop('pivot') - else: - pivot = 'middle' - - if 'ax' in kwargs: - ax = kwargs.pop('ax') - else: - ax = self.ax - - # get actual values to plot - if self.mg.grid_type == "structured": - x = self.mg.xcellcenters[::istep, ::jstep] - y = self.mg.ycellcenters[::istep, ::jstep] - u = vx[self.layer, ::istep, ::jstep] - v = vy[self.layer, ::istep, ::jstep] - else: - x = self.mg.xcellcenters[::istep] - y = self.mg.ycellcenters[::istep] - u = vx[self.layer, ::istep] - v = vy[self.layer, ::istep] - - # if necessary, copy to avoid changing the passed values - if masked_values is not None or normalize: - import copy - u = copy.copy(u) - v = copy.copy(v) - - # mask values - if masked_values is not None: - for mval in masked_values: - to_mask = np.logical_or(u==mval, v==mval) - u[to_mask] = np.nan - v[to_mask] = np.nan - - # normalize - if normalize: - vmag = np.sqrt(u ** 2. + v ** 2.) - idx = vmag > 0. - u[idx] /= vmag[idx] - v[idx] /= vmag[idx] - - # rotate and plot, offsets must be zero since - # these are vectors not locations - urot, vrot = geometry.rotate(u, v, 0., 0., self.mg.angrot_radians) - - # plot with quiver - quiver = ax.quiver(x, y, urot, vrot, pivot=pivot, **kwargs) - - return quiver - - def plot_specific_discharge(self, spdis, istep=1, - jstep=1, normalize=False, **kwargs): - """ - DEPRECATED. Use plot_vector() instead, which should follow after - postprocessing.get_specific_discharge(). - - Method to plot specific discharge from discharge vectors - provided by the cell by cell flow output file. In MODFLOW-6 - this option is controled in the NPF options block. This method - uses matplotlib quiver to create a matplotlib plot of the output. - - Parameters - ---------- - spdis : np.recarray - specific discharge recarray from cbc file - istep : int - row frequency to plot. (Default is 1.) - jstep : int - column frequency to plot. (Default is 1.) - kwargs : matplotlib.pyplot keyword arguments for the - plt.quiver method. - - Returns - ------- - quiver : matplotlib.pyplot.quiver - quiver plot of discharge vectors - - """ - warnings.warn('plot_specific_discharge() has been deprecated. Use ' - 'plot_vector() instead, which should follow after ' - 'postprocessing.get_specific_discharge()', - DeprecationWarning) - - if 'pivot' in kwargs: - pivot = kwargs.pop('pivot') - else: - pivot = 'middle' - - if 'ax' in kwargs: - ax = kwargs.pop('ax') - else: - ax = self.ax - - if isinstance(spdis, list): - print("Warning: Selecting the final stress period from Specific" - " Discharge list") - spdis = spdis[-1] - - if self.mg.grid_type == "structured": - ncpl = self.mg.nrow * self.mg.ncol - - else: - ncpl = self.mg.ncpl - - nlay = self.mg.nlay - - qx = np.zeros((nlay * ncpl)) - qy = np.zeros((nlay * ncpl)) - - idx = np.array(spdis['node']) - 1 - qx[idx] = spdis['qx'] - qy[idx] = spdis["qy"] - - if self.mg.grid_type == "structured": - qx.shape = (self.mg.nlay, self.mg.nrow, self.mg.ncol) - qy.shape = (self.mg.nlay, self.mg.nrow, self.mg.ncol) - x = self.mg.xcellcenters[::istep, ::jstep] - y = self.mg.ycellcenters[::istep, ::jstep] - u = qx[:, ::istep, ::jstep] - v = qy[:, ::istep, ::jstep] - else: - qx.shape = (self.mg.nlay, self.mg.ncpl) - qy.shape = (self.mg.nlay, self.mg.ncpl) - x = self.mg.xcellcenters[::istep] - y = self.mg.ycellcenters[::istep] - u = qx[:, ::istep] - v = qy[:, ::istep] - - # normalize - if normalize: - vmag = np.sqrt(u ** 2. + v ** 2.) - idx = vmag > 0. - u[idx] /= vmag[idx] - v[idx] /= vmag[idx] - - u[u == 0] = np.nan - v[v == 0] = np.nan - - u = u[self.layer, :] - v = v[self.layer, :] - # Rotate and plot, offsets must be zero since - # these are vectors not locations - urot, vrot = geometry.rotate(u, v, 0., 0., - self.mg.angrot_radians) - quiver = ax.quiver(x, y, urot, vrot, pivot=pivot, **kwargs) - return quiver - - def plot_discharge(self, frf=None, fff=None, - flf=None, head=None, istep=1, jstep=1, - normalize=False, **kwargs): - """ - DEPRECATED. Use plot_vector() instead, which should follow after - postprocessing.get_specific_discharge(). - - Use quiver to plot vectors. - - Parameters - ---------- - frf : numpy.ndarray - MODFLOW's 'flow right face' - fff : numpy.ndarray - MODFLOW's 'flow front face' - flf : numpy.ndarray - MODFLOW's 'flow lower face' (Default is None.) - head : numpy.ndarray - MODFLOW's head array. If not provided, then will assume confined - conditions in order to calculated saturated thickness. - istep : int - row frequency to plot. (Default is 1.) - jstep : int - column frequency to plot. (Default is 1.) - normalize : bool - boolean flag used to determine if discharge vectors should - be normalized using the magnitude of the specific discharge in each - cell. (default is False) - kwargs : dictionary - Keyword arguments passed to plt.quiver() - - Returns - ------- - quiver : matplotlib.pyplot.quiver - Vectors of specific discharge. - - """ - warnings.warn('plot_discharge() has been deprecated. Use ' - 'plot_vector() instead, which should follow after ' - 'postprocessing.get_specific_discharge()', - DeprecationWarning) - - if self.mg.grid_type != "structured": - err_msg = "Use plot_specific_discharge for " \ - "{} grids".format(self.mg.grid_type) - raise NotImplementedError(err_msg) - - else: - if self.mg.top is None: - err = "StructuredModelGrid must have top and " \ - "botm defined to use plot_discharge()" - raise AssertionError(err) - - ib = np.ones((self.mg.nlay, self.mg.nrow, self.mg.ncol)) - if self.mg.idomain is not None: - ib = self.mg.idomain - - delr = self.mg.delr - delc = self.mg.delc - top = np.copy(self.mg.top) - botm = np.copy(self.mg.botm) - laytyp = None - hnoflo = 999. - hdry = 999. - laycbd = None - - if self.model is not None: - if self.model.laytyp is not None: - laytyp = self.model.laytyp - - if self.model.hnoflo is not None: - hnoflo = self.model.hnoflo - - if self.model.hdry is not None: - hdry = self.model.hdry - - if self.model.laycbd is not None: - laycbd = self.model.laycbd - - if laycbd is not None and 1 in laycbd: - active = np.ones((botm.shape[0],), dtype=np.int) - kon = 0 - for cbd in laycbd: - if cbd > 0: - kon += 1 - active[kon] = 0 - botm = botm[active==1] - - # If no access to head or laytyp, then calculate confined saturated - # thickness by setting laytyp to zeros - if head is None or laytyp is None: - head = np.zeros(botm.shape, np.float32) - laytyp = np.zeros((botm.shape[0],), dtype=np.int) - - # calculate the saturated thickness - sat_thk = plotutil.PlotUtilities. \ - saturated_thickness(head, top, botm, laytyp, - [hnoflo, hdry]) - - # Calculate specific discharge - qx, qy, qz = plotutil.PlotUtilities. \ - centered_specific_discharge(frf, fff, flf, delr, - delc, sat_thk) - ib = ib.ravel() - qx = qx.ravel() - qy = qy.ravel() - del qz - - temp = [] - for ix, val in enumerate(ib): - if val != 0: - temp.append((ix + 1, qx[ix], qy[ix])) - - spdis = np.recarray((len(temp),), dtype=[('node', np.int), - ("qx", np.float), - ("qy", np.float)]) - for ix, tup in enumerate(temp): - spdis[ix] = tup - - return self.plot_specific_discharge(spdis, istep=istep, - jstep=jstep, - normalize=normalize, **kwargs) - - def plot_pathline(self, pl, travel_time=None, **kwargs): - """ - Plot the MODPATH pathlines. - - Parameters - ---------- - pl : list of rec arrays or a single rec array - rec array or list of rec arrays is data returned from - modpathfile PathlineFile get_data() or get_alldata() - methods. Data in rec array is 'x', 'y', 'z', 'time', - 'k', and 'particleid'. - travel_time : float or str - travel_time is a travel time selection for the displayed - pathlines. If a float is passed then pathlines with times - less than or equal to the passed time are plotted. If a - string is passed a variety logical constraints can be added - in front of a time value to select pathlines for a select - period of time. Valid logical constraints are <=, <, >=, and - >. For example, to select all pathlines less than 10000 days - travel_time='< 10000' would be passed to plot_pathline. - (default is None) - kwargs : layer, ax, colors. The remaining kwargs are passed - into the LineCollection constructor. If layer='all', - pathlines are output for all layers - - Returns - ------- - lc : matplotlib.collections.LineCollection - - """ - from matplotlib.collections import LineCollection - # make sure pathlines is a list - if not isinstance(pl, list): - pl = [pl] - - if 'layer' in kwargs: - kon = kwargs.pop('layer') - if isinstance(kon, bytes): - kon = kon.decode() - if isinstance(kon, str): - if kon.lower() == 'all': - kon = -1 - else: - kon = self.layer - else: - kon = self.layer - - if 'marker' in kwargs: - marker = kwargs.pop('marker') - else: - marker = None - - if 'markersize' in kwargs: - markersize = kwargs.pop('markersize') - elif 'ms' in kwargs: - markersize = kwargs.pop('ms') - else: - markersize = None - - if 'markercolor' in kwargs: - markercolor = kwargs.pop('markercolor') - else: - markercolor = None - - if 'markerevery' in kwargs: - markerevery = kwargs.pop('markerevery') - else: - markerevery = 1 - - if 'ax' in kwargs: - ax = kwargs.pop('ax') - else: - ax = self.ax - - if 'colors' not in kwargs: - kwargs['colors'] = '0.5' - - linecol = [] - markers = [] - for p in pl: - if travel_time is None: - tp = p.copy() - else: - if isinstance(travel_time, str): - if '<=' in travel_time: - time = float(travel_time.replace('<=', '')) - idx = (p['time'] <= time) - elif '<' in travel_time: - time = float(travel_time.replace('<', '')) - idx = (p['time'] < time) - elif '>=' in travel_time: - time = float(travel_time.replace('>=', '')) - idx = (p['time'] >= time) - elif '<' in travel_time: - time = float(travel_time.replace('>', '')) - idx = (p['time'] > time) - else: - try: - time = float(travel_time) - idx = (p['time'] <= time) - except: - errmsg = 'flopy.map.plot_pathline travel_time ' + \ - 'variable cannot be parsed. ' + \ - 'Acceptable logical variables are , ' + \ - '<=, <, >=, and >. ' + \ - 'You passed {}'.format(travel_time) - raise Exception(errmsg) - else: - time = float(travel_time) - idx = (p['time'] <= time) - tp = p[idx] - - # transform data! - x0r, y0r = geometry.transform(tp['x'], tp['y'], - self.mg.xoffset, - self.mg.yoffset, - self.mg.angrot_radians) - # build polyline array - arr = np.vstack((x0r, y0r)).T - # select based on layer - if kon >= 0: - kk = p['k'].copy().reshape(p.shape[0], 1) - kk = np.repeat(kk, 2, axis=1) - arr = np.ma.masked_where((kk != kon), arr) - else: - arr = np.ma.asarray(arr) - # append line to linecol if there is some unmasked segment - if not arr.mask.all(): - linecol.append(arr) - if not arr.mask.all(): - linecol.append(arr) - if marker is not None: - for xy in arr[::markerevery]: - if not xy.mask: - markers.append(xy) - # create line collection - lc = None - if len(linecol) > 0: - lc = LineCollection(linecol, **kwargs) - ax.add_collection(lc) - if marker is not None: - markers = np.array(markers) - ax.plot(markers[:, 0], markers[:, 1], lw=0, marker=marker, - color=markercolor, ms=markersize) - return lc - - def plot_timeseries(self, ts, travel_time=None, **kwargs): - """ - Plot the MODPATH timeseries. - - Parameters - ---------- - ts : list of rec arrays or a single rec array - rec array or list of rec arrays is data returned from - modpathfile TimeseriesFile get_data() or get_alldata() - methods. Data in rec array is 'x', 'y', 'z', 'time', - 'k', and 'particleid'. - travel_time : float or str - travel_time is a travel time selection for the displayed - pathlines. If a float is passed then pathlines with times - less than or equal to the passed time are plotted. If a - string is passed a variety logical constraints can be added - in front of a time value to select pathlines for a select - period of time. Valid logical constraints are <=, <, >=, and - >. For example, to select all pathlines less than 10000 days - travel_time='< 10000' would be passed to plot_pathline. - (default is None) - kwargs : layer, ax, colors. The remaining kwargs are passed - into the LineCollection constructor. If layer='all', - pathlines are output for all layers - - Returns - ------- - lo : list of Line2D objects - """ - - # make sure timeseries is a list - if not isinstance(ts, list): - ts = [ts] - - if 'layer' in kwargs: - kon = kwargs.pop('layer') - - if isinstance(kon, bytes): - kon = kon.decode() - - if isinstance(kon, str): - if kon.lower() == 'all': - kon = -1 - else: - kon = self.layer - else: - kon = self.layer - - if 'ax' in kwargs: - ax = kwargs.pop('ax') - - else: - ax = self.ax - - if 'color' not in kwargs: - kwargs['color'] = 'red' - - linecol = [] - for t in ts: - if travel_time is None: - tp = t.copy() - - else: - if isinstance(travel_time, str): - if '<=' in travel_time: - time = float(travel_time.replace('<=', '')) - idx = (t['time'] <= time) - elif '<' in travel_time: - time = float(travel_time.replace('<', '')) - idx = (t['time'] < time) - elif '>=' in travel_time: - time = float(travel_time.replace('>=', '')) - idx = (t['time'] >= time) - elif '<' in travel_time: - time = float(travel_time.replace('>', '')) - idx = (t['time'] > time) - else: - try: - time = float(travel_time) - idx = (t['time'] <= time) - except: - errmsg = 'flopy.map.plot_pathline travel_time ' + \ - 'variable cannot be parsed. ' + \ - 'Acceptable logical variables are , ' + \ - '<=, <, >=, and >. ' + \ - 'You passed {}'.format(travel_time) - raise Exception(errmsg) - else: - time = float(travel_time) - idx = (t['time'] <= time) - tp = ts[idx] - - x0r, y0r = geometry.transform(tp['x'], tp['y'], - self.mg.xoffset, - self.mg.yoffset, - self.mg.angrot_radians) - - # build polyline array - arr = np.vstack((x0r, y0r)).T - # select based on layer - if kon >= 0: - kk = t['k'].copy().reshape(t.shape[0], 1) - kk = np.repeat(kk, 2, axis=1) - arr = np.ma.masked_where((kk != kon), arr) - - else: - arr = np.ma.asarray(arr) - - # append line to linecol if there is some unmasked segment - if not arr.mask.all(): - linecol.append(arr) - - # plot timeseries data - lo = [] - for lc in linecol: - if not lc.mask.all(): - lo += ax.plot(lc[:, 0], lc[:, 1], **kwargs) - - return lo - - def plot_endpoint(self, ep, direction='ending', - selection=None, selection_direction=None, **kwargs): - """ - Plot the MODPATH endpoints. - - Parameters - ---------- - ep : rec array - A numpy recarray with the endpoint particle data from the - MODPATH 6 endpoint file - direction : str - String defining if starting or ending particle locations should be - considered. (default is 'ending') - selection : tuple - tuple that defines the zero-base layer, row, column location - (l, r, c) to use to make a selection of particle endpoints. - The selection could be a well location to determine capture zone - for the well. If selection is None, all particle endpoints for - the user-sepcified direction will be plotted. (default is None) - selection_direction : str - String defining is a selection should be made on starting or - ending particle locations. If selection is not None and - selection_direction is None, the selection direction will be set - to the opposite of direction. (default is None) - - kwargs : ax, c, s or size, colorbar, colorbar_label, shrink. The - remaining kwargs are passed into the matplotlib scatter - method. If colorbar is True a colorbar will be added to the plot. - If colorbar_label is passed in and colorbar is True then - colorbar_label will be passed to the colorbar set_label() - method. If shrink is passed in and colorbar is True then - the colorbar size will be set using shrink. - - Returns - ------- - sp : matplotlib.pyplot.scatter - - """ - ep = ep.copy() - direction = direction.lower() - if direction == 'starting': - xp, yp = 'x0', 'y0' - - elif direction == 'ending': - xp, yp = 'x', 'y' - - else: - errmsg = 'flopy.map.plot_endpoint direction must be "ending" ' + \ - 'or "starting".' - raise Exception(errmsg) - - if selection_direction is not None: - if selection_direction.lower() != 'starting' and \ - selection_direction.lower() != 'ending': - errmsg = 'flopy.map.plot_endpoint selection_direction ' + \ - 'must be "ending" or "starting".' - raise Exception(errmsg) - else: - if direction.lower() == 'starting': - selection_direction = 'ending' - elif direction.lower() == 'ending': - selection_direction = 'starting' - - # selection of endpoints - if selection is not None: - if isinstance(selection, int): - selection = tuple((selection,)) - try: - if len(selection) == 1: - node = selection[0] - if selection_direction.lower() == 'starting': - nsel = 'node0' - else: - nsel = 'node' - # make selection - idx = (ep[nsel] == node) - tep = ep[idx] - elif len(selection) == 3: - k, i, j = selection[0], selection[1], selection[2] - if selection_direction.lower() == 'starting': - ksel, isel, jsel = 'k0', 'i0', 'j0' - else: - ksel, isel, jsel = 'k', 'i', 'j' - # make selection - idx = (ep[ksel] == k) & (ep[isel] == i) & (ep[jsel] == j) - tep = ep[idx] - else: - errmsg = 'flopy.map.plot_endpoint selection must be ' + \ - 'a zero-based layer, row, column tuple ' + \ - '(l, r, c) or node number (MODPATH 7) of ' + \ - 'the location to evaluate (i.e., well location).' - raise Exception(errmsg) - except: - errmsg = 'flopy.map.plot_endpoint selection must be a ' + \ - 'zero-based layer, row, column tuple (l, r, c) ' + \ - 'or node number (MODPATH 7) of the location ' + \ - 'to evaluate (i.e., well location).' - raise Exception(errmsg) - # all endpoints - else: - tep = ep.copy() - - if 'ax' in kwargs: - ax = kwargs.pop('ax') - else: - ax = self.ax - - # scatter kwargs that users may redefine - if 'c' not in kwargs: - c = tep['time'] - tep['time0'] - else: - c = np.empty((tep.shape[0]), dtype="S30") - c.fill(kwargs.pop('c')) - - s = 50 - if 's' in kwargs: - s = float(kwargs.pop('s')) ** 2. - elif 'size' in kwargs: - s = float(kwargs.pop('size')) ** 2. - - # colorbar kwargs - createcb = False - if 'colorbar' in kwargs: - createcb = kwargs.pop('colorbar') - - colorbar_label = 'Endpoint Time' - if 'colorbar_label' in kwargs: - colorbar_label = kwargs.pop('colorbar_label') - - shrink = 1. - if 'shrink' in kwargs: - shrink = float(kwargs.pop('shrink')) - - # transform data! - x0r, y0r = geometry.transform(tep[xp], tep[yp], - self.mg.xoffset, - self.mg.yoffset, - self.mg.angrot_radians) - # build array to plot - arr = np.vstack((x0r, y0r)).T - - # plot the end point data - sp = ax.scatter(arr[:, 0], arr[:, 1], c=c, s=s, **kwargs) - - # add a colorbar for travel times - if createcb: - cb = plt.colorbar(sp, ax=ax, shrink=shrink) - cb.set_label(colorbar_label) - return sp - - -class DeprecatedMapView(PlotMapView): - """ - Deprecation handler for the PlotMapView class - - Parameters - ---------- - model : flopy.modflow.Modflow object - modelgrid : flopy.discretization.Grid object - ax : matplotlib.pyplot.axes object - layer : int - model layer to plot, default is layer 1 - extent : tuple of floats - (xmin, xmax, ymin, ymax) will be used to specify axes limits. If None - then these will be calculated based on grid, coordinates, and rotation. - - """ - - def __init__(self, model=None, modelgrid=None, ax=None, - layer=0, extent=None): - super(DeprecatedMapView, self).__init__(model=model, - modelgrid=modelgrid, - ax=ax, - layer=layer, - extent=extent) - - def plot_discharge(self, frf, fff, dis=None, - flf=None, head=None, istep=1, jstep=1, - normalize=False, **kwargs): - """ - Use quiver to plot vectors. Deprecated method that uses - the old function call to pass the method to PlotMapView - - Parameters - ---------- - frf : numpy.ndarray - MODFLOW's 'flow right face' - fff : numpy.ndarray - MODFLOW's 'flow front face' - dis : flopy.modflow.ModflowDis package - Depricated parameter - flf : numpy.ndarray - MODFLOW's 'flow lower face' (Default is None.) - head : numpy.ndarray - MODFLOW's head array. If not provided, then will assume confined - conditions in order to calculated saturated thickness. - istep : int - row frequency to plot. (Default is 1.) - jstep : int - column frequency to plot. (Default is 1.) - normalize : bool - boolean flag used to determine if discharge vectors should - be normalized using the magnitude of the specific discharge in each - cell. (default is False) - kwargs : dictionary - Keyword arguments passed to plt.quiver() - - Returns - ------- - quiver : matplotlib.pyplot.quiver - Vectors of specific discharge. - - """ - - if dis is not None: - self.mg = plotutil._depreciated_dis_handler(modelgrid=self.mg, - dis=dis) - - super(DeprecatedMapView, self).plot_discharge(frf=frf, fff=fff, - flf=flf, head=head, - istep=1, jstep=1, - normalize=normalize, - **kwargs) - - -class ModelMap(object): - """ - Pending Depreciation: ModelMap acts as a PlotMapView factory - object. Please migrate to PlotMapView for plotting - functionality and future code compatibility - - Parameters - ---------- - sr : flopy.utils.reference.SpatialReference - The spatial reference class (Default is None) - ax : matplotlib.pyplot axis - The plot axis. If not provided it, plt.gca() will be used. - If there is not a current axis then a new one will be created. - model : flopy.modflow object - flopy model object. (Default is None) - dis : flopy.modflow.ModflowDis object - flopy discretization object. (Default is None) - layer : int - Layer to plot. Default is 0. Must be between 0 and nlay - 1. - xul : float - x coordinate for upper left corner - yul : float - y coordinate for upper left corner. The default is the sum of the - delc array. - rotation : float - Angle of grid rotation around the upper left corner. A positive value - indicates clockwise rotation. Angles are in degrees. - extent : tuple of floats - (xmin, xmax, ymin, ymax) will be used to specify axes limits. If None - then these will be calculated based on grid, coordinates, and rotation. - length_multiplier : float - scaling factor for conversion from model units to another unit - length base ex. ft to m. - - Notes - ----- - ModelMap must know the position and rotation of the grid in order to make - the plot. This information is contained in the SpatialReference class - (sr), which can be passed. If sr is None, then it looks for sr in dis. - If dis is None, then it looks for sr in model.dis. If all of these - arguments are none, then it uses xul, yul, and rotation. If none of these - arguments are provided, then it puts the lower-left-hand corner of the - grid at (0, 0). - """ - - def __new__(cls, sr=None, ax=None, model=None, dis=None, layer=0, - extent=None, xul=None, yul=None, xll=None, yll=None, - rotation=None, length_multiplier=None): - - from ..utils.reference import SpatialReferenceUnstructured - # from ..plot.plotbase import DeprecatedMapView - - err_msg = "ModelMap will be replaced by " \ - "PlotMapView(); Calling PlotMapView()" - warnings.warn(err_msg, PendingDeprecationWarning) - - modelgrid = None - if model is not None: - if (xul, yul, xll, yll, rotation) != (None, None, - None, None, None): - modelgrid = plotutil._set_coord_info(model.modelgrid, - xul, yul, xll, yll, - rotation) - elif sr is not None: - if length_multiplier is not None: - sr.length_multiplier = length_multiplier - - if (xul, yul, xll, yll, rotation) != (None, None, - None, None, None): - sr.set_spatialreference(xul, yul, xll, yll, rotation) - - if isinstance(sr, SpatialReferenceUnstructured): - if dis is not None: - modelgrid = UnstructuredGrid(vertices=sr.verts, - iverts=sr.iverts, - xcenters=sr.xc, - ycenters=sr.yc, - top=dis.top.array, - botm=dis.botm.array, - ncpl=sr.ncpl) - else: - modelgrid = UnstructuredGrid(vertices=sr.verts, - iverts=sr.iverts, - xcenters=sr.xc, - ycenters=sr.yc, - ncpl=sr.ncpl) - - elif dis is not None: - modelgrid = StructuredGrid(delc=sr.delc, delr=sr.delr, - top=dis.top.array, - botm=dis.botm.array, - xoff=sr.xll, yoff=sr.yll, - angrot=sr.rotation) - else: - modelgrid = StructuredGrid(delc=sr.delc, delr=sr.delr, - xoff=sr.xll, yoff=sr.yll, - angrot=sr.rotation) - - else: - pass - - return DeprecatedMapView(model=model, modelgrid=modelgrid, ax=ax, - layer=layer, extent=extent) +import numpy as np +from ..discretization import StructuredGrid, UnstructuredGrid +from ..utils import geometry + +try: + import matplotlib.pyplot as plt + import matplotlib.colors + from matplotlib.collections import PatchCollection + from matplotlib.patches import Polygon +except ImportError: + plt = None + +from . import plotutil +import warnings + +warnings.simplefilter('always', PendingDeprecationWarning) + + +class PlotMapView(object): + """ + Class to create a map of the model. Delegates plotting + functionality based on model grid type. + + Parameters + ---------- + modelgrid : flopy.discretiztion.Grid + The modelgrid class can be StructuredGrid, VertexGrid, + or UnstructuredGrid (Default is None) + ax : matplotlib.pyplot axis + The plot axis. If not provided it, plt.gca() will be used. + If there is not a current axis then a new one will be created. + model : flopy.modflow object + flopy model object. (Default is None) + layer : int + Layer to plot. Default is 0. Must be between 0 and nlay - 1. + extent : tuple of floats + (xmin, xmax, ymin, ymax) will be used to specify axes limits. If None + then these will be calculated based on grid, coordinates, and rotation. + + Notes + ----- + + + """ + + def __init__(self, model=None, modelgrid=None, ax=None, + layer=0, extent=None): + + if plt is None: + s = 'Could not import matplotlib. Must install matplotlib ' + \ + ' in order to use ModelMap method' + raise ImportError(s) + + self.model = model + self.layer = layer + self.mg = None + + if model is not None: + self.mg = model.modelgrid + + elif modelgrid is not None: + self.mg = modelgrid + + else: + err_msg = "A model grid instance must be provided to PlotMapView" + raise AssertionError(err_msg) + + if self.mg.grid_type not in ("structured", "vertex", + "unstructured"): + err_msg = "Unrecognized modelgrid type {}" + raise TypeError(err_msg.format(self.mg.grid_type)) + + if ax is None: + try: + self.ax = plt.gca() + self.ax.set_aspect('equal') + except: + self.ax = plt.subplot(1, 1, 1, aspect='equal', axisbg="white") + else: + self.ax = ax + + if extent is not None: + self._extent = extent + else: + self._extent = None + + @property + def extent(self): + if self._extent is None: + self._extent = self.mg.extent + return self._extent + + def plot_array(self, a, masked_values=None, **kwargs): + """ + Plot an array. If the array is three-dimensional, then the method + will plot the layer tied to this class (self.layer). + + Parameters + ---------- + a : numpy.ndarray + Array to plot. + masked_values : iterable of floats, ints + Values to mask. + **kwargs : dictionary + keyword arguments passed to matplotlib.pyplot.pcolormesh + + Returns + ------- + quadmesh : matplotlib.collections.QuadMesh or + matplotlib.collections.PatchCollection + + """ + if not isinstance(a, np.ndarray): + a = np.array(a) + + if self.mg.grid_type == "structured": + if a.ndim == 3: + plotarray = a[self.layer, :, :] + elif a.ndim == 2: + plotarray = a + elif a.ndim == 1: + plotarray = a + else: + raise Exception('Array must be of dimension 1, 2, or 3') + + elif self.mg.grid_type == "vertex": + if a.ndim == 3: + if a.shape[0] == 1: + a = np.squeeze(a, axis=0) + plotarray = a[self.layer, :] + elif a.shape[1] == 1: + a = np.squeeze(a, axis=1) + plotarray = a[self.layer, :] + else: + raise Exception("Array must be of dimension 1 or 2") + elif a.ndim == 2: + plotarray = a[self.layer, :] + elif a.ndim == 1: + plotarray = a + else: + raise Exception('Array must be of dimension 1 or 2') + + elif self.mg.grid_type == "unstructured": + plotarray = a + + else: + raise TypeError( + "Unrecognized grid type {}".format(self.mg.grid_type)) + + if masked_values is not None: + for mval in masked_values: + plotarray = np.ma.masked_values(plotarray, mval) + + # add NaN values to mask + plotarray = np.ma.masked_where(np.isnan(plotarray), plotarray) + + if 'ax' in kwargs: + ax = kwargs.pop('ax') + else: + ax = self.ax + + if self.mg.grid_type in ("structured", "vertex"): + xgrid = np.array(self.mg.xvertices) + ygrid = np.array(self.mg.yvertices) + + if self.mg.grid_type == "structured": + quadmesh = ax.pcolormesh(xgrid, ygrid, plotarray) + + else: + patches = [Polygon(list(zip(xgrid[i], ygrid[i])), closed=True) + for i in range(xgrid.shape[0])] + + quadmesh = PatchCollection(patches) + quadmesh.set_array(plotarray) + + else: + quadmesh = plotutil.plot_cvfd(self.mg._vertices, self.mg._iverts, + a=plotarray, ax=ax) + + # set max and min + if 'vmin' in kwargs: + vmin = kwargs.pop('vmin') + else: + vmin = None + + if 'vmax' in kwargs: + vmax = kwargs.pop('vmax') + else: + vmax = None + + quadmesh.set_clim(vmin=vmin, vmax=vmax) + + # send rest of kwargs to quadmesh + quadmesh.set(**kwargs) + + # add collection to axis + ax.add_collection(quadmesh) + + # set limits + ax.set_xlim(self.extent[0], self.extent[1]) + ax.set_ylim(self.extent[2], self.extent[3]) + return quadmesh + + def contour_array(self, a, masked_values=None, **kwargs): + """ + Contour an array. If the array is three-dimensional, then the method + will contour the layer tied to this class (self.layer). + + Parameters + ---------- + a : numpy.ndarray + Array to plot. + masked_values : iterable of floats, ints + Values to mask. + **kwargs : dictionary + keyword arguments passed to matplotlib.pyplot.pcolormesh + + Returns + ------- + contour_set : matplotlib.pyplot.contour + + """ + try: + import matplotlib.tri as tri + except ImportError: + err_msg = "Matplotlib must be updated to use contour_array" + raise ImportError(err_msg) + + a = np.copy(a) + if not isinstance(a, np.ndarray): + a = np.array(a) + + xcentergrid = np.array(self.mg.xcellcenters) + ycentergrid = np.array(self.mg.ycellcenters) + + if self.mg.grid_type == "structured": + if a.ndim == 3: + plotarray = a[self.layer, :, :] + elif a.ndim == 2: + plotarray = a + elif a.ndim == 1: + plotarray = a + else: + raise Exception('Array must be of dimension 1, 2 or 3') + + elif self.mg.grid_type == "vertex": + if a.ndim == 3: + if a.shape[0] == 1: + a = np.squeeze(a, axis=0) + plotarray = a[self.layer, :] + elif a.shape[1] == 1: + a = np.squeeze(a, axis=1) + plotarray = a[self.layer, :] + else: + raise Exception("Array must be of dimension 1 or 2") + elif a.ndim == 2: + plotarray = a[self.layer, :] + elif a.ndim == 1: + plotarray = a + else: + raise Exception('Array must be of dimension 1, 2 or 3') + + else: + plotarray = a + + # work around for tri-contour ignore vmin & vmax + # necessary block for tri-contour NaN issue + if "levels" not in kwargs: + if "vmin" not in kwargs: + vmin = np.nanmin(plotarray) + else: + vmin = kwargs.pop("vmin") + if "vmax" not in kwargs: + vmax = np.nanmax(plotarray) + else: + vmax = kwargs.pop('vmax') + + levels = np.linspace(vmin, vmax, 7) + kwargs['levels'] = levels + + # workaround for tri-contour nan issue + # use -2**31 to allow for 32 bit int arrays + plotarray[np.isnan(plotarray)] = -2**31 + if masked_values is None: + masked_values = [-2**31] + else: + masked_values = list(masked_values) + if -2**31 not in masked_values: + masked_values.append(-2**31) + + ismasked = None + if masked_values is not None: + for mval in masked_values: + if ismasked is None: + ismasked = np.isclose(plotarray, mval) + else: + t = np.isclose(plotarray, mval) + ismasked += t + + if 'ax' in kwargs: + ax = kwargs.pop('ax') + else: + ax = self.ax + + if 'colors' in kwargs.keys(): + if 'cmap' in kwargs.keys(): + kwargs.pop('cmap') + + plot_triplot = False + if 'plot_triplot' in kwargs: + plot_triplot = kwargs.pop('plot_triplot') + + if 'extent' in kwargs: + extent = kwargs.pop('extent') + + if self.mg.grid_type in ('structured', 'vertex'): + idx = (xcentergrid >= extent[0]) & ( + xcentergrid <= extent[1]) & ( + ycentergrid >= extent[2]) & ( + ycentergrid <= extent[3]) + plotarray = plotarray[idx] + xcentergrid = xcentergrid[idx] + ycentergrid = ycentergrid[idx] + + plotarray = plotarray.flatten() + xcentergrid = xcentergrid.flatten() + ycentergrid = ycentergrid.flatten() + triang = tri.Triangulation(xcentergrid, ycentergrid) + + if ismasked is not None: + ismasked = ismasked.flatten() + mask = np.any(np.where(ismasked[triang.triangles], + True, False), axis=1) + triang.set_mask(mask) + + contour_set = ax.tricontour(triang, plotarray, **kwargs) + + if plot_triplot: + ax.triplot(triang, color='black', marker='o', lw=0.75) + + ax.set_xlim(self.extent[0], self.extent[1]) + ax.set_ylim(self.extent[2], self.extent[3]) + + return contour_set + + def plot_inactive(self, ibound=None, color_noflow='black', **kwargs): + """ + Make a plot of inactive cells. If not specified, then pull ibound + from the self.ml + + Parameters + ---------- + ibound : numpy.ndarray + ibound array to plot. (Default is ibound in 'BAS6' package.) + + color_noflow : string + (Default is 'black') + + Returns + ------- + quadmesh : matplotlib.collections.QuadMesh + + """ + if ibound is None: + if self.mg.idomain is None: + raise AssertionError("Ibound/Idomain array must be provided") + + ibound = self.mg.idomain + + plotarray = np.zeros(ibound.shape, dtype=np.int) + idx1 = (ibound == 0) + plotarray[idx1] = 1 + plotarray = np.ma.masked_equal(plotarray, 0) + cmap = matplotlib.colors.ListedColormap(['0', color_noflow]) + bounds = [0, 1, 2] + norm = matplotlib.colors.BoundaryNorm(bounds, cmap.N) + quadmesh = self.plot_array(plotarray, cmap=cmap, norm=norm, **kwargs) + return quadmesh + + def plot_ibound(self, ibound=None, color_noflow='black', color_ch='blue', + color_vpt='red', **kwargs): + """ + Make a plot of ibound. If not specified, then pull ibound from the + self.ml + + Parameters + ---------- + ibound : numpy.ndarray + ibound array to plot. (Default is ibound in the modelgrid) + color_noflow : string + (Default is 'black') + color_ch : string + Color for constant heads (Default is 'blue'.) + color_vpt: string + Color for vertical pass through cells (Default is 'red') + + Returns + ------- + quadmesh : matplotlib.collections.QuadMesh + + """ + import matplotlib.colors + + if ibound is None: + if self.model is not None: + if self.model.version == "mf6": + color_ch = color_vpt + + if self.mg.idomain is None: + raise AssertionError("Ibound/Idomain array must be provided") + + ibound = self.mg.idomain + + plotarray = np.zeros(ibound.shape, dtype=np.int) + idx1 = (ibound == 0) + idx2 = (ibound < 0) + plotarray[idx1] = 1 + plotarray[idx2] = 2 + plotarray = np.ma.masked_equal(plotarray, 0) + cmap = matplotlib.colors.ListedColormap(['0', color_noflow, color_ch]) + bounds = [0, 1, 2, 3] + norm = matplotlib.colors.BoundaryNorm(bounds, cmap.N) + quadmesh = self.plot_array(plotarray, cmap=cmap, norm=norm, **kwargs) + return quadmesh + + def plot_grid(self, **kwargs): + """ + Plot the grid lines. + + Parameters + ---------- + kwargs : ax, colors. The remaining kwargs are passed into the + the LineCollection constructor. + + Returns + ------- + lc : matplotlib.collections.LineCollection + + """ + from matplotlib.collections import LineCollection + + if 'ax' in kwargs: + ax = kwargs.pop('ax') + else: + ax = self.ax + + if 'colors' not in kwargs: + kwargs['colors'] = '0.5' + + lc = LineCollection(self.mg.grid_lines, **kwargs) + + ax.add_collection(lc) + ax.set_xlim(self.extent[0], self.extent[1]) + ax.set_ylim(self.extent[2], self.extent[3]) + + return lc + + def plot_bc(self, name=None, package=None, kper=0, color=None, + plotAll=False, **kwargs): + """ + Plot boundary conditions locations for a specific boundary + type from a flopy model + + Parameters + ---------- + name : string + Package name string ('WEL', 'GHB', etc.). (Default is None) + package : flopy.modflow.Modflow package class instance + flopy package class instance. (Default is None) + kper : int + Stress period to plot + color : string + matplotlib color string. (Default is None) + plotAll : bool + Boolean used to specify that boundary condition locations for all + layers will be plotted on the current ModelMap layer. + (Default is False) + **kwargs : dictionary + keyword arguments passed to matplotlib.collections.PatchCollection + + Returns + ------- + quadmesh : matplotlib.collections.QuadMesh + + """ + if 'ftype' in kwargs and name is None: + name = kwargs.pop('ftype') + + # Find package to plot + if package is not None: + p = package + name = p.name[0] + + elif self.model is not None: + if name is None: + raise Exception('ftype not specified') + name = name.upper() + p = self.model.get_package(name) + + else: + raise Exception('Cannot find package to plot') + + # trap for mf6 'cellid' vs mf2005 'k', 'i', 'j' convention + if isinstance(p, list) or p.parent.version == "mf6": + if not isinstance(p, list): + p = [p] + + idx = np.array([]) + for pp in p: + if pp.package_type in ('lak', 'sfr', 'maw', 'uzf'): + t = plotutil.advanced_package_bc_helper(pp, self.mg, + kper) + else: + try: + mflist = pp.stress_period_data.array[kper] + except Exception as e: + raise Exception("Not a list-style boundary package: " + + str(e)) + if mflist is None: + return + + t = np.array([list(i) for i in mflist['cellid']], + dtype=int).T + + if len(idx) == 0: + idx = np.copy(t) + else: + idx = np.append(idx, t, axis=1) + + else: + # modflow-2005 structured and unstructured grid + if p.package_type in ('uzf', 'lak'): + idx = plotutil.advanced_package_bc_helper(p, self.mg, kper) + else: + try: + mflist = p.stress_period_data[kper] + except Exception as e: + raise Exception("Not a list-style boundary package: " + + str(e)) + if mflist is None: + return + if len(self.mg.shape) == 3: + idx = [mflist['k'], mflist['i'], mflist['j']] + else: + idx = mflist['node'] + + nlay = self.mg.nlay + + # Plot the list locations + plotarray = np.zeros(self.mg.shape, dtype=np.int) + if plotAll and self.mg.grid_type != "unstructured": + pa = np.zeros(self.mg.shape[1:], dtype=np.int) + pa[list(idx[1:])] = 1 + for k in range(nlay): + plotarray[k] = pa.copy() + else: + plotarray[tuple(idx)] = 1 + + # mask the plot array + plotarray = np.ma.masked_equal(plotarray, 0) + + # set the colormap + if color is None: + # modflow 6 ftype fix, since multiple packages append _0, _1, etc: + key = name[:3].upper() + if key in plotutil.bc_color_dict: + c = plotutil.bc_color_dict[key] + else: + c = plotutil.bc_color_dict['default'] + else: + c = color + + cmap = matplotlib.colors.ListedColormap(['0', c]) + bounds = [0, 1, 2] + norm = matplotlib.colors.BoundaryNorm(bounds, cmap.N) + + # create normalized quadmesh or patch object depending on grid type + quadmesh = self.plot_array(plotarray, cmap=cmap, norm=norm, **kwargs) + + return quadmesh + + def plot_shapefile(self, shp, **kwargs): + """ + Plot a shapefile. The shapefile must be in the same coordinates as + the rotated and offset grid. + + Parameters + ---------- + shp : string or pyshp shapefile object + Name of the shapefile to plot + + kwargs : dictionary + Keyword arguments passed to plotutil.plot_shapefile() + + """ + if 'ax' in kwargs: + ax = kwargs.pop('ax') + else: + ax = self.ax + patch_collection = plotutil.plot_shapefile(shp, ax, **kwargs) + + return patch_collection + + def plot_cvfd(self, verts, iverts, **kwargs): + """ + Plot a cvfd grid. The vertices must be in the same coordinates as + the rotated and offset grid. + + Parameters + ---------- + verts : ndarray + 2d array of x and y points. + iverts : list of lists + should be of len(ncells) with a list of vertex number for each cell + + kwargs : dictionary + Keyword arguments passed to plotutil.plot_cvfd() + + """ + if 'ax' in kwargs: + ax = kwargs.pop('ax') + else: + ax = self.ax + patch_collection = plotutil.plot_cvfd(verts, iverts, ax, self.layer, + **kwargs) + return patch_collection + + def contour_array_cvfd(self, vertc, a, masked_values=None, **kwargs): + """ + Contour a cvfd array. If the array is three-dimensional, then the method + will contour the layer tied to this class (self.layer). The vertices + must be in the same coordinates as the rotated and offset grid. + + Parameters + ---------- + vertc : np.ndarray + Array with of size (nc, 2) with centroid location of cvfd + a : numpy.ndarray + Array to plot. + masked_values : iterable of floats, ints + Values to mask. + **kwargs : dictionary + keyword arguments passed to matplotlib.pyplot.pcolormesh + + Returns + ------- + contour_set : matplotlib.pyplot.contour + + """ + try: + import matplotlib.tri as tri + except ImportError: + err_msg = "Matplotlib must be updated to use contour_array" + raise ImportError(err_msg) + + if 'ncpl' in kwargs: + nlay = self.layer + 1 + ncpl = kwargs.pop('ncpl') + if isinstance(ncpl, int): + i = int(ncpl) + ncpl = np.ones((nlay,), dtype=np.int) * i + elif isinstance(ncpl, list) or isinstance(ncpl, tuple): + ncpl = np.array(ncpl) + i0 = 0 + i1 = 0 + for k in range(nlay): + i0 = i1 + i1 = i0 + ncpl[k] + # retain vertc in selected layer + vertc = vertc[i0:i1, :] + else: + i0 = 0 + i1 = vertc.shape[0] + + plotarray = a[i0:i1] + + ismasked = None + if masked_values is not None: + for mval in masked_values: + if ismasked is None: + ismasked = np.isclose(plotarray, mval) + else: + t = np.isclose(plotarray, mval) + ismasked += t + + # add NaN values to mask + if ismasked is None: + ismasked = np.isnan(plotarray) + else: + ismasked += np.isnan(plotarray) + + if 'ax' in kwargs: + ax = kwargs.pop('ax') + else: + ax = self.ax + + if 'colors' in kwargs.keys(): + if 'cmap' in kwargs.keys(): + kwargs.pop('cmap') + + triang = tri.Triangulation(vertc[:, 0], vertc[:, 1]) + + if ismasked is not None: + ismasked = ismasked.flatten() + mask = np.any(np.where(ismasked[triang.triangles], + True, False), axis=1) + triang.set_mask(mask) + + contour_set = ax.tricontour(triang, plotarray, **kwargs) + + return contour_set + + def plot_vector(self, vx, vy, istep=1, jstep=1, normalize=False, + masked_values=None, **kwargs): + """ + Plot a vector. + + Parameters + ---------- + vx : np.ndarray + x component of the vector to be plotted (non-rotated) + array shape must be (nlay, nrow, ncol) for a structured grid + array shape must be (nlay, ncpl) for a unstructured grid + vy : np.ndarray + y component of the vector to be plotted (non-rotated) + array shape must be (nlay, nrow, ncol) for a structured grid + array shape must be (nlay, ncpl) for a unstructured grid + istep : int + row frequency to plot (default is 1) + jstep : int + column frequency to plot (default is 1) + normalize : bool + boolean flag used to determine if vectors should be normalized + using the vector magnitude in each cell (default is False) + masked_values : iterable of floats + values to mask + kwargs : matplotlib.pyplot keyword arguments for the + plt.quiver method + + Returns + ------- + quiver : matplotlib.pyplot.quiver + result of the quiver function + + """ + if 'pivot' in kwargs: + pivot = kwargs.pop('pivot') + else: + pivot = 'middle' + + if 'ax' in kwargs: + ax = kwargs.pop('ax') + else: + ax = self.ax + + # get actual values to plot + if self.mg.grid_type == "structured": + x = self.mg.xcellcenters[::istep, ::jstep] + y = self.mg.ycellcenters[::istep, ::jstep] + u = vx[self.layer, ::istep, ::jstep] + v = vy[self.layer, ::istep, ::jstep] + else: + x = self.mg.xcellcenters[::istep] + y = self.mg.ycellcenters[::istep] + u = vx[self.layer, ::istep] + v = vy[self.layer, ::istep] + + # if necessary, copy to avoid changing the passed values + if masked_values is not None or normalize: + import copy + u = copy.copy(u) + v = copy.copy(v) + + # mask values + if masked_values is not None: + for mval in masked_values: + to_mask = np.logical_or(u==mval, v==mval) + u[to_mask] = np.nan + v[to_mask] = np.nan + + # normalize + if normalize: + vmag = np.sqrt(u ** 2. + v ** 2.) + idx = vmag > 0. + u[idx] /= vmag[idx] + v[idx] /= vmag[idx] + + # rotate and plot, offsets must be zero since + # these are vectors not locations + urot, vrot = geometry.rotate(u, v, 0., 0., self.mg.angrot_radians) + + # plot with quiver + quiver = ax.quiver(x, y, urot, vrot, pivot=pivot, **kwargs) + + return quiver + + def plot_specific_discharge(self, spdis, istep=1, + jstep=1, normalize=False, **kwargs): + """ + DEPRECATED. Use plot_vector() instead, which should follow after + postprocessing.get_specific_discharge(). + + Method to plot specific discharge from discharge vectors + provided by the cell by cell flow output file. In MODFLOW-6 + this option is controled in the NPF options block. This method + uses matplotlib quiver to create a matplotlib plot of the output. + + Parameters + ---------- + spdis : np.recarray + specific discharge recarray from cbc file + istep : int + row frequency to plot. (Default is 1.) + jstep : int + column frequency to plot. (Default is 1.) + kwargs : matplotlib.pyplot keyword arguments for the + plt.quiver method. + + Returns + ------- + quiver : matplotlib.pyplot.quiver + quiver plot of discharge vectors + + """ + warnings.warn('plot_specific_discharge() has been deprecated. Use ' + 'plot_vector() instead, which should follow after ' + 'postprocessing.get_specific_discharge()', + DeprecationWarning) + + if 'pivot' in kwargs: + pivot = kwargs.pop('pivot') + else: + pivot = 'middle' + + if 'ax' in kwargs: + ax = kwargs.pop('ax') + else: + ax = self.ax + + if isinstance(spdis, list): + print("Warning: Selecting the final stress period from Specific" + " Discharge list") + spdis = spdis[-1] + + if self.mg.grid_type == "structured": + ncpl = self.mg.nrow * self.mg.ncol + + else: + ncpl = self.mg.ncpl + + nlay = self.mg.nlay + + qx = np.zeros((nlay * ncpl)) + qy = np.zeros((nlay * ncpl)) + + idx = np.array(spdis['node']) - 1 + qx[idx] = spdis['qx'] + qy[idx] = spdis["qy"] + + if self.mg.grid_type == "structured": + qx.shape = (self.mg.nlay, self.mg.nrow, self.mg.ncol) + qy.shape = (self.mg.nlay, self.mg.nrow, self.mg.ncol) + x = self.mg.xcellcenters[::istep, ::jstep] + y = self.mg.ycellcenters[::istep, ::jstep] + u = qx[:, ::istep, ::jstep] + v = qy[:, ::istep, ::jstep] + else: + qx.shape = (self.mg.nlay, self.mg.ncpl) + qy.shape = (self.mg.nlay, self.mg.ncpl) + x = self.mg.xcellcenters[::istep] + y = self.mg.ycellcenters[::istep] + u = qx[:, ::istep] + v = qy[:, ::istep] + + # normalize + if normalize: + vmag = np.sqrt(u ** 2. + v ** 2.) + idx = vmag > 0. + u[idx] /= vmag[idx] + v[idx] /= vmag[idx] + + u[u == 0] = np.nan + v[v == 0] = np.nan + + u = u[self.layer, :] + v = v[self.layer, :] + # Rotate and plot, offsets must be zero since + # these are vectors not locations + urot, vrot = geometry.rotate(u, v, 0., 0., + self.mg.angrot_radians) + quiver = ax.quiver(x, y, urot, vrot, pivot=pivot, **kwargs) + return quiver + + def plot_discharge(self, frf=None, fff=None, + flf=None, head=None, istep=1, jstep=1, + normalize=False, **kwargs): + """ + DEPRECATED. Use plot_vector() instead, which should follow after + postprocessing.get_specific_discharge(). + + Use quiver to plot vectors. + + Parameters + ---------- + frf : numpy.ndarray + MODFLOW's 'flow right face' + fff : numpy.ndarray + MODFLOW's 'flow front face' + flf : numpy.ndarray + MODFLOW's 'flow lower face' (Default is None.) + head : numpy.ndarray + MODFLOW's head array. If not provided, then will assume confined + conditions in order to calculated saturated thickness. + istep : int + row frequency to plot. (Default is 1.) + jstep : int + column frequency to plot. (Default is 1.) + normalize : bool + boolean flag used to determine if discharge vectors should + be normalized using the magnitude of the specific discharge in each + cell. (default is False) + kwargs : dictionary + Keyword arguments passed to plt.quiver() + + Returns + ------- + quiver : matplotlib.pyplot.quiver + Vectors of specific discharge. + + """ + warnings.warn('plot_discharge() has been deprecated. Use ' + 'plot_vector() instead, which should follow after ' + 'postprocessing.get_specific_discharge()', + DeprecationWarning) + + if self.mg.grid_type != "structured": + err_msg = "Use plot_specific_discharge for " \ + "{} grids".format(self.mg.grid_type) + raise NotImplementedError(err_msg) + + else: + if self.mg.top is None: + err = "StructuredModelGrid must have top and " \ + "botm defined to use plot_discharge()" + raise AssertionError(err) + + ib = np.ones((self.mg.nlay, self.mg.nrow, self.mg.ncol)) + if self.mg.idomain is not None: + ib = self.mg.idomain + + delr = self.mg.delr + delc = self.mg.delc + top = np.copy(self.mg.top) + botm = np.copy(self.mg.botm) + laytyp = None + hnoflo = 999. + hdry = 999. + laycbd = None + + if self.model is not None: + if self.model.laytyp is not None: + laytyp = self.model.laytyp + + if self.model.hnoflo is not None: + hnoflo = self.model.hnoflo + + if self.model.hdry is not None: + hdry = self.model.hdry + + if self.model.laycbd is not None: + laycbd = self.model.laycbd + + if laycbd is not None and 1 in laycbd: + active = np.ones((botm.shape[0],), dtype=np.int) + kon = 0 + for cbd in laycbd: + if cbd > 0: + kon += 1 + active[kon] = 0 + botm = botm[active==1] + + # If no access to head or laytyp, then calculate confined saturated + # thickness by setting laytyp to zeros + if head is None or laytyp is None: + head = np.zeros(botm.shape, np.float32) + laytyp = np.zeros((botm.shape[0],), dtype=np.int) + + # calculate the saturated thickness + sat_thk = plotutil.PlotUtilities. \ + saturated_thickness(head, top, botm, laytyp, + [hnoflo, hdry]) + + # Calculate specific discharge + qx, qy, qz = plotutil.PlotUtilities. \ + centered_specific_discharge(frf, fff, flf, delr, + delc, sat_thk) + ib = ib.ravel() + qx = qx.ravel() + qy = qy.ravel() + del qz + + temp = [] + for ix, val in enumerate(ib): + if val != 0: + temp.append((ix + 1, qx[ix], qy[ix])) + + spdis = np.recarray((len(temp),), dtype=[('node', np.int), + ("qx", np.float), + ("qy", np.float)]) + for ix, tup in enumerate(temp): + spdis[ix] = tup + + return self.plot_specific_discharge(spdis, istep=istep, + jstep=jstep, + normalize=normalize, **kwargs) + + def plot_pathline(self, pl, travel_time=None, **kwargs): + """ + Plot the MODPATH pathlines. + + Parameters + ---------- + pl : list of rec arrays or a single rec array + rec array or list of rec arrays is data returned from + modpathfile PathlineFile get_data() or get_alldata() + methods. Data in rec array is 'x', 'y', 'z', 'time', + 'k', and 'particleid'. + travel_time : float or str + travel_time is a travel time selection for the displayed + pathlines. If a float is passed then pathlines with times + less than or equal to the passed time are plotted. If a + string is passed a variety logical constraints can be added + in front of a time value to select pathlines for a select + period of time. Valid logical constraints are <=, <, >=, and + >. For example, to select all pathlines less than 10000 days + travel_time='< 10000' would be passed to plot_pathline. + (default is None) + kwargs : layer, ax, colors. The remaining kwargs are passed + into the LineCollection constructor. If layer='all', + pathlines are output for all layers + + Returns + ------- + lc : matplotlib.collections.LineCollection + + """ + from matplotlib.collections import LineCollection + # make sure pathlines is a list + if not isinstance(pl, list): + pl = [pl] + + if 'layer' in kwargs: + kon = kwargs.pop('layer') + if isinstance(kon, bytes): + kon = kon.decode() + if isinstance(kon, str): + if kon.lower() == 'all': + kon = -1 + else: + kon = self.layer + else: + kon = self.layer + + if 'marker' in kwargs: + marker = kwargs.pop('marker') + else: + marker = None + + if 'markersize' in kwargs: + markersize = kwargs.pop('markersize') + elif 'ms' in kwargs: + markersize = kwargs.pop('ms') + else: + markersize = None + + if 'markercolor' in kwargs: + markercolor = kwargs.pop('markercolor') + else: + markercolor = None + + if 'markerevery' in kwargs: + markerevery = kwargs.pop('markerevery') + else: + markerevery = 1 + + if 'ax' in kwargs: + ax = kwargs.pop('ax') + else: + ax = self.ax + + if 'colors' not in kwargs: + kwargs['colors'] = '0.5' + + linecol = [] + markers = [] + for p in pl: + if travel_time is None: + tp = p.copy() + else: + if isinstance(travel_time, str): + if '<=' in travel_time: + time = float(travel_time.replace('<=', '')) + idx = (p['time'] <= time) + elif '<' in travel_time: + time = float(travel_time.replace('<', '')) + idx = (p['time'] < time) + elif '>=' in travel_time: + time = float(travel_time.replace('>=', '')) + idx = (p['time'] >= time) + elif '<' in travel_time: + time = float(travel_time.replace('>', '')) + idx = (p['time'] > time) + else: + try: + time = float(travel_time) + idx = (p['time'] <= time) + except: + errmsg = 'flopy.map.plot_pathline travel_time ' + \ + 'variable cannot be parsed. ' + \ + 'Acceptable logical variables are , ' + \ + '<=, <, >=, and >. ' + \ + 'You passed {}'.format(travel_time) + raise Exception(errmsg) + else: + time = float(travel_time) + idx = (p['time'] <= time) + tp = p[idx] + + # transform data! + x0r, y0r = geometry.transform(tp['x'], tp['y'], + self.mg.xoffset, + self.mg.yoffset, + self.mg.angrot_radians) + # build polyline array + arr = np.vstack((x0r, y0r)).T + # select based on layer + if kon >= 0: + kk = p['k'].copy().reshape(p.shape[0], 1) + kk = np.repeat(kk, 2, axis=1) + arr = np.ma.masked_where((kk != kon), arr) + else: + arr = np.ma.asarray(arr) + # append line to linecol if there is some unmasked segment + if not arr.mask.all(): + linecol.append(arr) + if not arr.mask.all(): + linecol.append(arr) + if marker is not None: + for xy in arr[::markerevery]: + if not xy.mask: + markers.append(xy) + # create line collection + lc = None + if len(linecol) > 0: + lc = LineCollection(linecol, **kwargs) + ax.add_collection(lc) + if marker is not None: + markers = np.array(markers) + ax.plot(markers[:, 0], markers[:, 1], lw=0, marker=marker, + color=markercolor, ms=markersize) + return lc + + def plot_timeseries(self, ts, travel_time=None, **kwargs): + """ + Plot the MODPATH timeseries. + + Parameters + ---------- + ts : list of rec arrays or a single rec array + rec array or list of rec arrays is data returned from + modpathfile TimeseriesFile get_data() or get_alldata() + methods. Data in rec array is 'x', 'y', 'z', 'time', + 'k', and 'particleid'. + travel_time : float or str + travel_time is a travel time selection for the displayed + pathlines. If a float is passed then pathlines with times + less than or equal to the passed time are plotted. If a + string is passed a variety logical constraints can be added + in front of a time value to select pathlines for a select + period of time. Valid logical constraints are <=, <, >=, and + >. For example, to select all pathlines less than 10000 days + travel_time='< 10000' would be passed to plot_pathline. + (default is None) + kwargs : layer, ax, colors. The remaining kwargs are passed + into the LineCollection constructor. If layer='all', + pathlines are output for all layers + + Returns + ------- + lo : list of Line2D objects + """ + + # make sure timeseries is a list + if not isinstance(ts, list): + ts = [ts] + + if 'layer' in kwargs: + kon = kwargs.pop('layer') + + if isinstance(kon, bytes): + kon = kon.decode() + + if isinstance(kon, str): + if kon.lower() == 'all': + kon = -1 + else: + kon = self.layer + else: + kon = self.layer + + if 'ax' in kwargs: + ax = kwargs.pop('ax') + + else: + ax = self.ax + + if 'color' not in kwargs: + kwargs['color'] = 'red' + + linecol = [] + for t in ts: + if travel_time is None: + tp = t.copy() + + else: + if isinstance(travel_time, str): + if '<=' in travel_time: + time = float(travel_time.replace('<=', '')) + idx = (t['time'] <= time) + elif '<' in travel_time: + time = float(travel_time.replace('<', '')) + idx = (t['time'] < time) + elif '>=' in travel_time: + time = float(travel_time.replace('>=', '')) + idx = (t['time'] >= time) + elif '<' in travel_time: + time = float(travel_time.replace('>', '')) + idx = (t['time'] > time) + else: + try: + time = float(travel_time) + idx = (t['time'] <= time) + except: + errmsg = 'flopy.map.plot_pathline travel_time ' + \ + 'variable cannot be parsed. ' + \ + 'Acceptable logical variables are , ' + \ + '<=, <, >=, and >. ' + \ + 'You passed {}'.format(travel_time) + raise Exception(errmsg) + else: + time = float(travel_time) + idx = (t['time'] <= time) + tp = ts[idx] + + x0r, y0r = geometry.transform(tp['x'], tp['y'], + self.mg.xoffset, + self.mg.yoffset, + self.mg.angrot_radians) + + # build polyline array + arr = np.vstack((x0r, y0r)).T + # select based on layer + if kon >= 0: + kk = t['k'].copy().reshape(t.shape[0], 1) + kk = np.repeat(kk, 2, axis=1) + arr = np.ma.masked_where((kk != kon), arr) + + else: + arr = np.ma.asarray(arr) + + # append line to linecol if there is some unmasked segment + if not arr.mask.all(): + linecol.append(arr) + + # plot timeseries data + lo = [] + for lc in linecol: + if not lc.mask.all(): + lo += ax.plot(lc[:, 0], lc[:, 1], **kwargs) + + return lo + + def plot_endpoint(self, ep, direction='ending', + selection=None, selection_direction=None, **kwargs): + """ + Plot the MODPATH endpoints. + + Parameters + ---------- + ep : rec array + A numpy recarray with the endpoint particle data from the + MODPATH 6 endpoint file + direction : str + String defining if starting or ending particle locations should be + considered. (default is 'ending') + selection : tuple + tuple that defines the zero-base layer, row, column location + (l, r, c) to use to make a selection of particle endpoints. + The selection could be a well location to determine capture zone + for the well. If selection is None, all particle endpoints for + the user-sepcified direction will be plotted. (default is None) + selection_direction : str + String defining is a selection should be made on starting or + ending particle locations. If selection is not None and + selection_direction is None, the selection direction will be set + to the opposite of direction. (default is None) + + kwargs : ax, c, s or size, colorbar, colorbar_label, shrink. The + remaining kwargs are passed into the matplotlib scatter + method. If colorbar is True a colorbar will be added to the plot. + If colorbar_label is passed in and colorbar is True then + colorbar_label will be passed to the colorbar set_label() + method. If shrink is passed in and colorbar is True then + the colorbar size will be set using shrink. + + Returns + ------- + sp : matplotlib.pyplot.scatter + + """ + ep = ep.copy() + direction = direction.lower() + if direction == 'starting': + xp, yp = 'x0', 'y0' + + elif direction == 'ending': + xp, yp = 'x', 'y' + + else: + errmsg = 'flopy.map.plot_endpoint direction must be "ending" ' + \ + 'or "starting".' + raise Exception(errmsg) + + if selection_direction is not None: + if selection_direction.lower() != 'starting' and \ + selection_direction.lower() != 'ending': + errmsg = 'flopy.map.plot_endpoint selection_direction ' + \ + 'must be "ending" or "starting".' + raise Exception(errmsg) + else: + if direction.lower() == 'starting': + selection_direction = 'ending' + elif direction.lower() == 'ending': + selection_direction = 'starting' + + # selection of endpoints + if selection is not None: + if isinstance(selection, int): + selection = tuple((selection,)) + try: + if len(selection) == 1: + node = selection[0] + if selection_direction.lower() == 'starting': + nsel = 'node0' + else: + nsel = 'node' + # make selection + idx = (ep[nsel] == node) + tep = ep[idx] + elif len(selection) == 3: + k, i, j = selection[0], selection[1], selection[2] + if selection_direction.lower() == 'starting': + ksel, isel, jsel = 'k0', 'i0', 'j0' + else: + ksel, isel, jsel = 'k', 'i', 'j' + # make selection + idx = (ep[ksel] == k) & (ep[isel] == i) & (ep[jsel] == j) + tep = ep[idx] + else: + errmsg = 'flopy.map.plot_endpoint selection must be ' + \ + 'a zero-based layer, row, column tuple ' + \ + '(l, r, c) or node number (MODPATH 7) of ' + \ + 'the location to evaluate (i.e., well location).' + raise Exception(errmsg) + except: + errmsg = 'flopy.map.plot_endpoint selection must be a ' + \ + 'zero-based layer, row, column tuple (l, r, c) ' + \ + 'or node number (MODPATH 7) of the location ' + \ + 'to evaluate (i.e., well location).' + raise Exception(errmsg) + # all endpoints + else: + tep = ep.copy() + + if 'ax' in kwargs: + ax = kwargs.pop('ax') + else: + ax = self.ax + + # scatter kwargs that users may redefine + if 'c' not in kwargs: + c = tep['time'] - tep['time0'] + else: + c = np.empty((tep.shape[0]), dtype="S30") + c.fill(kwargs.pop('c')) + + s = 50 + if 's' in kwargs: + s = float(kwargs.pop('s')) ** 2. + elif 'size' in kwargs: + s = float(kwargs.pop('size')) ** 2. + + # colorbar kwargs + createcb = False + if 'colorbar' in kwargs: + createcb = kwargs.pop('colorbar') + + colorbar_label = 'Endpoint Time' + if 'colorbar_label' in kwargs: + colorbar_label = kwargs.pop('colorbar_label') + + shrink = 1. + if 'shrink' in kwargs: + shrink = float(kwargs.pop('shrink')) + + # transform data! + x0r, y0r = geometry.transform(tep[xp], tep[yp], + self.mg.xoffset, + self.mg.yoffset, + self.mg.angrot_radians) + # build array to plot + arr = np.vstack((x0r, y0r)).T + + # plot the end point data + sp = ax.scatter(arr[:, 0], arr[:, 1], c=c, s=s, **kwargs) + + # add a colorbar for travel times + if createcb: + cb = plt.colorbar(sp, ax=ax, shrink=shrink) + cb.set_label(colorbar_label) + return sp + + +class DeprecatedMapView(PlotMapView): + """ + Deprecation handler for the PlotMapView class + + Parameters + ---------- + model : flopy.modflow.Modflow object + modelgrid : flopy.discretization.Grid object + ax : matplotlib.pyplot.axes object + layer : int + model layer to plot, default is layer 1 + extent : tuple of floats + (xmin, xmax, ymin, ymax) will be used to specify axes limits. If None + then these will be calculated based on grid, coordinates, and rotation. + + """ + + def __init__(self, model=None, modelgrid=None, ax=None, + layer=0, extent=None): + super(DeprecatedMapView, self).__init__(model=model, + modelgrid=modelgrid, + ax=ax, + layer=layer, + extent=extent) + + def plot_discharge(self, frf, fff, dis=None, + flf=None, head=None, istep=1, jstep=1, + normalize=False, **kwargs): + """ + Use quiver to plot vectors. Deprecated method that uses + the old function call to pass the method to PlotMapView + + Parameters + ---------- + frf : numpy.ndarray + MODFLOW's 'flow right face' + fff : numpy.ndarray + MODFLOW's 'flow front face' + dis : flopy.modflow.ModflowDis package + Depricated parameter + flf : numpy.ndarray + MODFLOW's 'flow lower face' (Default is None.) + head : numpy.ndarray + MODFLOW's head array. If not provided, then will assume confined + conditions in order to calculated saturated thickness. + istep : int + row frequency to plot. (Default is 1.) + jstep : int + column frequency to plot. (Default is 1.) + normalize : bool + boolean flag used to determine if discharge vectors should + be normalized using the magnitude of the specific discharge in each + cell. (default is False) + kwargs : dictionary + Keyword arguments passed to plt.quiver() + + Returns + ------- + quiver : matplotlib.pyplot.quiver + Vectors of specific discharge. + + """ + + if dis is not None: + self.mg = plotutil._depreciated_dis_handler(modelgrid=self.mg, + dis=dis) + + super(DeprecatedMapView, self).plot_discharge(frf=frf, fff=fff, + flf=flf, head=head, + istep=1, jstep=1, + normalize=normalize, + **kwargs) + + +class ModelMap(object): + """ + Pending Depreciation: ModelMap acts as a PlotMapView factory + object. Please migrate to PlotMapView for plotting + functionality and future code compatibility + + Parameters + ---------- + sr : flopy.utils.reference.SpatialReference + The spatial reference class (Default is None) + ax : matplotlib.pyplot axis + The plot axis. If not provided it, plt.gca() will be used. + If there is not a current axis then a new one will be created. + model : flopy.modflow object + flopy model object. (Default is None) + dis : flopy.modflow.ModflowDis object + flopy discretization object. (Default is None) + layer : int + Layer to plot. Default is 0. Must be between 0 and nlay - 1. + xul : float + x coordinate for upper left corner + yul : float + y coordinate for upper left corner. The default is the sum of the + delc array. + rotation : float + Angle of grid rotation around the upper left corner. A positive value + indicates clockwise rotation. Angles are in degrees. + extent : tuple of floats + (xmin, xmax, ymin, ymax) will be used to specify axes limits. If None + then these will be calculated based on grid, coordinates, and rotation. + length_multiplier : float + scaling factor for conversion from model units to another unit + length base ex. ft to m. + + Notes + ----- + ModelMap must know the position and rotation of the grid in order to make + the plot. This information is contained in the SpatialReference class + (sr), which can be passed. If sr is None, then it looks for sr in dis. + If dis is None, then it looks for sr in model.dis. If all of these + arguments are none, then it uses xul, yul, and rotation. If none of these + arguments are provided, then it puts the lower-left-hand corner of the + grid at (0, 0). + """ + + def __new__(cls, sr=None, ax=None, model=None, dis=None, layer=0, + extent=None, xul=None, yul=None, xll=None, yll=None, + rotation=None, length_multiplier=None): + + from ..utils.reference import SpatialReferenceUnstructured + # from ..plot.plotbase import DeprecatedMapView + + err_msg = "ModelMap will be replaced by " \ + "PlotMapView(); Calling PlotMapView()" + warnings.warn(err_msg, PendingDeprecationWarning) + + modelgrid = None + if model is not None: + if (xul, yul, xll, yll, rotation) != (None, None, + None, None, None): + modelgrid = plotutil._set_coord_info(model.modelgrid, + xul, yul, xll, yll, + rotation) + elif sr is not None: + if length_multiplier is not None: + sr.length_multiplier = length_multiplier + + if (xul, yul, xll, yll, rotation) != (None, None, + None, None, None): + sr.set_spatialreference(xul, yul, xll, yll, rotation) + + if isinstance(sr, SpatialReferenceUnstructured): + if dis is not None: + modelgrid = UnstructuredGrid(vertices=sr.verts, + iverts=sr.iverts, + xcenters=sr.xc, + ycenters=sr.yc, + top=dis.top.array, + botm=dis.botm.array, + ncpl=sr.ncpl) + else: + modelgrid = UnstructuredGrid(vertices=sr.verts, + iverts=sr.iverts, + xcenters=sr.xc, + ycenters=sr.yc, + ncpl=sr.ncpl) + + elif dis is not None: + modelgrid = StructuredGrid(delc=sr.delc, delr=sr.delr, + top=dis.top.array, + botm=dis.botm.array, + xoff=sr.xll, yoff=sr.yll, + angrot=sr.rotation) + else: + modelgrid = StructuredGrid(delc=sr.delc, delr=sr.delr, + xoff=sr.xll, yoff=sr.yll, + angrot=sr.rotation) + + else: + pass + + return DeprecatedMapView(model=model, modelgrid=modelgrid, ax=ax, + layer=layer, extent=extent) diff --git a/flopy/plot/plotbase.py b/flopy/plot/plotbase.py index 3e23b7e616..0eed5ec219 100644 --- a/flopy/plot/plotbase.py +++ b/flopy/plot/plotbase.py @@ -1,966 +1,966 @@ -import numpy as np -from ..plot.crosssection import _StructuredCrossSection -from ..plot.vcrosssection import _VertexCrossSection -from ..plot import plotutil - -try: - import matplotlib.pyplot as plt - import matplotlib.colors -except ImportError: - plt = None - - -class PlotCrossSection(object): - """ - Class to create a cross section of the model. - - Parameters - ---------- - ax : matplotlib.pyplot axis - The plot axis. If not provided it, plt.gca() will be used. - model : flopy.modflow object - flopy model object. (Default is None) - modelgrid : flopy.discretization.Grid object - can be a StructuredGrid, VertexGrid, or UnstructuredGrid object - line : dict - Dictionary with either "row", "column", or "line" key. If key - is "row" or "column" key value should be the zero-based row or - column index for cross-section. If key is "line" value should - be an array of (x, y) tuples with vertices of cross-section. - Vertices should be in map coordinates consistent with xul, - yul, and rotation. - extent : tuple of floats - (xmin, xmax, ymin, ymax) will be used to specify axes limits. If None - then these will be calculated based on grid, coordinates, and rotation. - geographic_coords : bool - boolean flag to allow the user to plot cross section lines in - geographic coordinates. If False (default), cross section is plotted - as the distance along the cross section line. - - """ - - def __init__(self, model=None, modelgrid=None, ax=None, - line=None, extent=None, geographic_coords=False): - if plt is None: - s = 'Could not import matplotlib. Must install matplotlib ' + \ - ' in order to use ModelMap method' - raise ImportError(s) - - if modelgrid is None and model is not None: - modelgrid = model.modelgrid - - # update this after unstructured grid is finished! - tmp = modelgrid.grid_type - - if tmp == "structured": - self.__cls = _StructuredCrossSection(ax=ax, model=model, - modelgrid=modelgrid, - line=line, extent=extent, - geographic_coords= - geographic_coords) - - elif tmp == "unstructured": - raise NotImplementedError("Unstructured xc not yet implemented") - - elif tmp == "vertex": - self.__cls = _VertexCrossSection(ax=ax, model=model, - modelgrid=modelgrid, - line=line, extent=extent, - geographic_coords= - geographic_coords) - - else: - raise ValueError("Unknown modelgrid type {}".format(tmp)) - - self.model = self.__cls.model - self.mg = self.__cls.mg - self.ax = self.__cls.ax - self.direction = self.__cls.direction - self.pts = self.__cls.pts - self.xpts = self.__cls.xpts - self.d = self.__cls.d - self.ncb = self.__cls.ncb - self.laycbd = self.__cls.laycbd - self.active = self.__cls.active - self.elev = self.__cls.elev - self.layer0 = self.__cls.layer0 - self.layer1 = self.__cls.layer1 - self.zpts = self.__cls.zpts - self.xcentergrid = self.__cls.xcentergrid - self.zcentergrid = self.__cls.zcentergrid - self.geographic_coords = self.__cls.geographic_coords - self.extent = self.__cls.extent - - def plot_array(self, a, masked_values=None, head=None, **kwargs): - """ - Plot a three-dimensional array as a patch collection. - - Parameters - ---------- - a : numpy.ndarray - Three-dimensional array to plot. - masked_values : iterable of floats, ints - Values to mask. - head : numpy.ndarray - Three-dimensional array to set top of patches to the minimum - of the top of a layer or the head value. Used to create - patches that conform to water-level elevations. - **kwargs : dictionary - keyword arguments passed to matplotlib.collections.PatchCollection - - Returns - ------- - patches : matplotlib.collections.PatchCollection - - """ - return self.__cls.plot_array(a=a, masked_values=masked_values, - head=head, **kwargs) - - def plot_surface(self, a, masked_values=None, **kwargs): - """ - Plot a two- or three-dimensional array as line(s). - - Parameters - ---------- - a : numpy.ndarray - Two- or three-dimensional array to plot. - masked_values : iterable of floats, ints - Values to mask. - **kwargs : dictionary - keyword arguments passed to matplotlib.pyplot.plot - - Returns - ------- - plot : list containing matplotlib.plot objects - - """ - return self.__cls.plot_surface(a=a, masked_values=masked_values, - **kwargs) - - def plot_fill_between(self, a, colors=('blue', 'red'), - masked_values=None, head=None, **kwargs): - """ - Plot a three-dimensional array as lines. - - Parameters - ---------- - a : numpy.ndarray - Three-dimensional array to plot. - colors: list - matplotlib fill colors, two required - masked_values : iterable of floats, ints - Values to mask. - head : numpy.ndarray - Three-dimensional array to set top of patches to the minimum - of the top of a layer or the head value. Used to create - patches that conform to water-level elevations. - **kwargs : dictionary - keyword arguments passed to matplotlib.pyplot.plot - - Returns - ------- - plot : list containing matplotlib.fillbetween objects - - """ - return self.__cls.plot_fill_between(a=a, colors=colors, - masked_values=masked_values, - head=head, **kwargs) - - def contour_array(self, a, masked_values=None, head=None, **kwargs): - """ - Contour a three-dimensional array. - - Parameters - ---------- - a : numpy.ndarray - Three-dimensional array to plot. - masked_values : iterable of floats, ints - Values to mask. - head : numpy.ndarray - Three-dimensional array to set top of patches to the minimum - of the top of a layer or the head value. Used to create - patches that conform to water-level elevations. - **kwargs : dictionary - keyword arguments passed to matplotlib.pyplot.contour - - Returns - ------- - contour_set : matplotlib.pyplot.contour - - """ - return self.__cls.contour_array(a=a, masked_values=masked_values, - head=head, **kwargs) - - def plot_inactive(self, ibound=None, color_noflow='black', **kwargs): - """ - Make a plot of inactive cells. If not specified, then pull ibound - from the self.ml - - Parameters - ---------- - ibound : numpy.ndarray - ibound array to plot. (Default is ibound in 'BAS6' package.) - - color_noflow : string - (Default is 'black') - - Returns - ------- - quadmesh : matplotlib.collections.QuadMesh - - """ - if ibound is None: - if self.mg.idomain is None: - raise AssertionError("An idomain array must be provided") - else: - ibound = self.mg.idomain - - plotarray = np.zeros(ibound.shape, dtype=np.int) - idx1 = (ibound == 0) - plotarray[idx1] = 1 - plotarray = np.ma.masked_equal(plotarray, 0) - cmap = matplotlib.colors.ListedColormap(['0', color_noflow]) - bounds = [0, 1, 2] - norm = matplotlib.colors.BoundaryNorm(bounds, cmap.N) - patches = self.plot_array(plotarray, cmap=cmap, norm=norm, **kwargs) - - return patches - - def plot_ibound(self, ibound=None, color_noflow='black', color_ch='blue', - color_vpt="red", head=None, **kwargs): - """ - Make a plot of ibound. If not specified, then pull ibound from the - self.model - - Parameters - ---------- - ibound : numpy.ndarray - ibound array to plot. (Default is ibound in 'BAS6' package.) - color_noflow : string - (Default is 'black') - color_ch : string - Color for constant heads (Default is 'blue'.) - head : numpy.ndarray - Three-dimensional array to set top of patches to the minimum - of the top of a layer or the head value. Used to create - patches that conform to water-level elevations. - **kwargs : dictionary - keyword arguments passed to matplotlib.collections.PatchCollection - - Returns - ------- - patches : matplotlib.collections.PatchCollection - - """ - if ibound is None: - if self.model is not None: - if self.model.version == "mf6": - color_ch = color_vpt - - if self.mg.idomain is None: - raise AssertionError("Ibound/Idomain array must be provided") - - ibound = self.mg.idomain - - plotarray = np.zeros(ibound.shape, dtype=np.int) - idx1 = (ibound == 0) - idx2 = (ibound < 0) - plotarray[idx1] = 1 - plotarray[idx2] = 2 - plotarray = np.ma.masked_equal(plotarray, 0) - cmap = matplotlib.colors.ListedColormap(['none', color_noflow, - color_ch]) - bounds = [0, 1, 2, 3] - norm = matplotlib.colors.BoundaryNorm(bounds, cmap.N) - # mask active cells - patches = self.plot_array(plotarray, masked_values=[0], head=head, - cmap=cmap, norm=norm, **kwargs) - return patches - - def plot_grid(self, **kwargs): - """ - Plot the grid lines. - - Parameters - ---------- - kwargs : ax, colors. The remaining kwargs are passed into the - the LineCollection constructor. - - Returns - ------- - lc : matplotlib.collections.LineCollection - - """ - if 'ax' in kwargs: - ax = kwargs.pop('ax') - else: - ax = self.ax - - col = self.get_grid_line_collection(**kwargs) - if col is not None: - ax.add_collection(col) - ax.set_xlim(self.extent[0], self.extent[1]) - ax.set_ylim(self.extent[2], self.extent[3]) - - return col - - def plot_bc(self, name=None, package=None, kper=0, color=None, - head=None, **kwargs): - """ - Plot boundary conditions locations for a specific boundary - type from a flopy model - - Parameters - ---------- - name : string - Package name string ('WEL', 'GHB', etc.). (Default is None) - package : flopy.modflow.Modflow package class instance - flopy package class instance. (Default is None) - kper : int - Stress period to plot - color : string - matplotlib color string. (Default is None) - head : numpy.ndarray - Three-dimensional array (structured grid) or - Two-dimensional array (vertex grid) - to set top of patches to the minimum of the top of a\ - layer or the head value. Used to create - patches that conform to water-level elevations. - **kwargs : dictionary - keyword arguments passed to matplotlib.collections.PatchCollection - - Returns - ------- - patches : matplotlib.collections.PatchCollection - - """ - if 'ftype' in kwargs and name is None: - name = kwargs.pop('ftype') - - # Find package to plot - if package is not None: - p = package - ftype = p.name[0] - elif self.model is not None: - if name is None: - raise Exception('ftype not specified') - name = name.upper() - p = self.model.get_package(name) - else: - raise Exception('Cannot find package to plot') - - # trap for mf6 'cellid' vs mf2005 'k', 'i', 'j' convention - if isinstance(p, list) or p.parent.version == "mf6": - if not isinstance(p, list): - p = [p] - - idx = np.array([]) - for pp in p: - if pp.package_type in ('lak', 'sfr', 'maw', 'uzf'): - t = plotutil.advanced_package_bc_helper(pp, self.mg, - kper) - else: - try: - mflist = pp.stress_period_data.array[kper] - except Exception as e: - raise Exception("Not a list-style boundary package: " - + str(e)) - if mflist is None: - return - - t = np.array([list(i) for i in mflist['cellid']], - dtype=int).T - - if len(idx) == 0: - idx = np.copy(t) - else: - idx = np.append(idx, t, axis=1) - - else: - # modflow-2005 structured and unstructured grid - if p.package_type in ('uzf', 'lak'): - idx = plotutil.advanced_package_bc_helper(p, self.mg, kper) - else: - try: - mflist = p.stress_period_data[kper] - except Exception as e: - raise Exception("Not a list-style boundary package: " - + str(e)) - if mflist is None: - return - if len(self.mg.shape) == 3: - idx = [mflist['k'], mflist['i'], mflist['j']] - else: - idx = mflist['node'] - - # Plot the list locations, change this to self.mg.shape - if len(self.mg.shape) != 3: - plotarray = np.zeros((self.mg.nlay, self.mg.ncpl), dtype=np.int) - plotarray[tuple(idx)] = 1 - else: - plotarray = np.zeros((self.mg.nlay, self.mg.nrow, self.mg.ncol), dtype=np.int) - plotarray[idx[0], idx[1], idx[2]] = 1 - - plotarray = np.ma.masked_equal(plotarray, 0) - if color is None: - key = name[:3].upper() - if key in plotutil.bc_color_dict: - c = plotutil.bc_color_dict[key] - else: - c = plotutil.bc_color_dict['default'] - else: - c = color - cmap = matplotlib.colors.ListedColormap(['none', c]) - bounds = [0, 1, 2] - norm = matplotlib.colors.BoundaryNorm(bounds, cmap.N) - patches = self.plot_array(plotarray, masked_values=[0], - head=head, cmap=cmap, norm=norm, **kwargs) - - return patches - - def plot_vector(self, vx, vy, vz, head=None, kstep=1, hstep=1, - normalize=False, masked_values=None, **kwargs): - """ - Plot a vector. - - Parameters - ---------- - vx : np.ndarray - x component of the vector to be plotted (non-rotated) - array shape must be (nlay, nrow, ncol) for a structured grid - array shape must be (nlay, ncpl) for a unstructured grid - vy : np.ndarray - y component of the vector to be plotted (non-rotated) - array shape must be (nlay, nrow, ncol) for a structured grid - array shape must be (nlay, ncpl) for a unstructured grid - vz : np.ndarray - y component of the vector to be plotted (non-rotated) - array shape must be (nlay, nrow, ncol) for a structured grid - array shape must be (nlay, ncpl) for a unstructured grid - head : numpy.ndarray - MODFLOW's head array. If not provided, then the quivers will be - plotted in the cell center. - kstep : int - layer frequency to plot (default is 1) - hstep : int - horizontal frequency to plot (default is 1) - normalize : bool - boolean flag used to determine if vectors should be normalized - using the vector magnitude in each cell (default is False) - masked_values : iterable of floats - values to mask - kwargs : matplotlib.pyplot keyword arguments for the - plt.quiver method - - Returns - ------- - quiver : matplotlib.pyplot.quiver - result of the quiver function - - """ - if 'pivot' in kwargs: - pivot = kwargs.pop('pivot') - else: - pivot = 'middle' - - if 'ax' in kwargs: - ax = kwargs.pop('ax') - else: - ax = self.ax - - # this function does not support arbitrary cross-sections, so check it - arbitrary = False - if self.mg.grid_type == 'structured': - if not (self.direction == 'x' or self.direction == 'y'): - arbitrary = True - else: - # check within a tolerance - pts = self.pts - xuniform = [True if abs(pts.T[0, 0] - i) < 1 - else False for i in pts.T[0]] - yuniform = [True if abs(pts.T[1, 0] - i) < 1 - else False for i in pts.T[1]] - if not np.all(xuniform) and not np.all(yuniform): - arbitrary = True - if arbitrary: - err_msg = "plot_specific_discharge() does not " \ - "support arbitrary cross-sections" - raise AssertionError(err_msg) - - # get the actual values to plot - if self.direction == 'x': - u_tmp = vx - elif self.direction == 'y': - u_tmp = -1. * vy - v_tmp = vz - if self.mg.grid_type == "structured": - if isinstance(head, np.ndarray): - zcentergrid = self.__cls.set_zcentergrid(head) - else: - zcentergrid = self.zcentergrid - - if self.geographic_coords: - xcentergrid = self.__cls.geographic_xcentergrid - else: - xcentergrid = self.xcentergrid - - if self.mg.nlay == 1: - x = [] - z = [] - for k in range(self.mg.nlay): - for i in range(xcentergrid.shape[1]): - x.append(xcentergrid[k, i]) - z.append(0.5 * (zcentergrid[k, i] + zcentergrid[k + 1, i])) - x = np.array(x).reshape((1, xcentergrid.shape[1])) - z = np.array(z).reshape((1, xcentergrid.shape[1])) - else: - x = xcentergrid - z = zcentergrid - - u = [] - v = [] - xedge, yedge = self.mg.xyedges - for k in range(self.mg.nlay): - u.append(plotutil.cell_value_points(self.xpts, xedge, - yedge, u_tmp[k, :, :])) - v.append(plotutil.cell_value_points(self.xpts, xedge, - yedge, v_tmp[k, :, :])) - u = np.array(u) - v = np.array(v) - x = x[::kstep, ::hstep] - z = z[::kstep, ::hstep] - u = u[::kstep, ::hstep] - v = v[::kstep, ::hstep] - - # upts and vpts has a value for the left and right - # sides of a cell. Sample every other value for quiver - u = u[:, ::2] - v = v[:, ::2] - - else: - # kstep implementation for vertex grid - projpts = {key: value for key, value in self.__cls.projpts.items() - if (key // self.mg.ncpl) % kstep == 0} - - # set x and z centers - if isinstance(head, np.ndarray): - # pipe kstep to set_zcentergrid to assure consistent array size - zcenters = self.__cls.set_zcentergrid(np.ravel(head), kstep=kstep) - else: - zcenters = [np.mean(np.array(v).T[1]) for i, v - in sorted(projpts.items())] - - u = np.array([u_tmp.ravel()[cell] for cell in sorted(projpts)]) - - x = np.array([np.mean(np.array(v).T[0]) for i, v - in sorted(projpts.items())]) - - z = np.ravel(zcenters) - v = np.array([v_tmp.ravel()[cell] for cell in sorted(projpts)]) - - x = x[::hstep] - z = z[::hstep] - u = u[::hstep] - v = v[::hstep] - - # mask values - if masked_values is not None: - for mval in masked_values: - to_mask = np.logical_or(u==mval, v==mval) - u[to_mask] = np.nan - v[to_mask] = np.nan - - # normalize - if normalize: - vmag = np.sqrt(u ** 2. + v ** 2.) - idx = vmag > 0. - u[idx] /= vmag[idx] - v[idx] /= vmag[idx] - - # plot with quiver - quiver = ax.quiver(x, z, u, v, pivot=pivot, **kwargs) - - return quiver - - def plot_specific_discharge(self, spdis, head=None, kstep=1, - hstep=1, normalize=False, **kwargs): - """ - DEPRECATED. Use plot_vector() instead, which should follow after - postprocessing.get_specific_discharge(). - - Use quiver to plot vectors. - - Parameters - ---------- - spdis : np.recarray - numpy recarray of specific discharge information. This - can be grabbed directly from the CBC file if SAVE_SPECIFIC_DISCHARGE - is used in the MF6 NPF file. - head : numpy.ndarray - MODFLOW's head array. If not provided, then the quivers will be plotted - in the cell center. - kstep : int - layer frequency to plot. (Default is 1.) - hstep : int - horizontal frequency to plot. (Default is 1.) - normalize : bool - boolean flag used to determine if discharge vectors should - be normalized using the magnitude of the specific discharge in each - cell. (default is False) - kwargs : dictionary - Keyword arguments passed to plt.quiver() - - Returns - ------- - quiver : matplotlib.pyplot.quiver - Vectors - - """ - import warnings - warnings.warn('plot_specific_discharge() has been deprecated. Use ' - 'plot_vector() instead, which should follow after ' - 'postprocessing.get_specific_discharge()', - DeprecationWarning) - - if 'pivot' in kwargs: - pivot = kwargs.pop('pivot') - else: - pivot = 'middle' - - if 'ax' in kwargs: - ax = kwargs.pop('ax') - else: - ax = self.ax - - if isinstance(spdis, list): - print("Warning: Selecting the final stress period from Specific" - " Discharge list") - spdis = spdis[-1] - - if self.mg.grid_type == "structured": - ncpl = self.mg.nrow * self.mg.ncol - - else: - ncpl = self.mg.ncpl - - nlay = self.mg.nlay - - qx = np.zeros((nlay * ncpl)) - qz = np.zeros((nlay * ncpl)) - ib = np.zeros((nlay * ncpl), dtype=bool) - - idx = np.array(spdis['node']) - 1 - - # check that vertex grid cross sections are not arbitrary - # within a tolerance! - if self.mg.grid_type != 'structured': - pts = self.pts - xuniform = [True if abs(pts.T[0, 0] - i) < 1 - else False for i in pts.T[0]] - yuniform = [True if abs(pts.T[1, 0] - i) < 1 - else False for i in pts.T[1]] - if not np.all(xuniform): - if not np.all(yuniform): - err_msg = "plot_specific_discharge does not " \ - "support aribtrary cross sections" - raise AssertionError(err_msg) - - if self.direction == 'x': - qx[idx] = spdis['qx'] - elif self.direction == 'y': - qx[idx] = spdis['qy'] * -1 - else: - err_msg = 'plot_specific_discharge does not ' \ - 'support arbitrary cross-sections' - raise AssertionError(err_msg) - - qz[idx] = spdis["qz"] - ib[idx] = True - - if self.mg.grid_type == "structured": - qx.shape = (self.mg.nlay, self.mg.nrow, self.mg.ncol) - qz.shape = (self.mg.nlay, self.mg.nrow, self.mg.ncol) - ib.shape = (self.mg.nlay, self.mg.nrow, self.mg.ncol) - - if isinstance(head, np.ndarray): - zcentergrid = self.__cls.set_zcentergrid(head) - else: - zcentergrid = self.zcentergrid - - if self.geographic_coords: - xcentergrid = self.__cls.geographic_xcentergrid - else: - xcentergrid = self.xcentergrid - - if nlay == 1: - x = [] - z = [] - for k in range(nlay): - for i in range(xcentergrid.shape[1]): - x.append(xcentergrid[k, i]) - z.append(0.5 * (zcentergrid[k, i] + zcentergrid[k + 1, i])) - x = np.array(x).reshape((1, xcentergrid.shape[1])) - z = np.array(z).reshape((1, xcentergrid.shape[1])) - else: - x = xcentergrid - z = zcentergrid - - u = [] - v = [] - ibx = [] - xedge, yedge = self.mg.xyedges - for k in range(self.mg.nlay): - u.append(plotutil.cell_value_points(self.xpts, xedge, - yedge, qx[k, :, :])) - v.append(plotutil.cell_value_points(self.xpts, xedge, - yedge, qz[k, :, :])) - ibx.append(plotutil.cell_value_points(self.xpts, xedge, - yedge, ib[k, :, :])) - u = np.array(u) - v = np.array(v) - ibx = np.array(ibx) - x = x[::kstep, ::hstep] - z = z[::kstep, ::hstep] - u = u[::kstep, ::hstep] - v = v[::kstep, ::hstep] - ib = ibx[::kstep, ::hstep] - - # upts and vpts has a value for the left and right - # sides of a cell. Sample every other value for quiver - u = u[:, ::2] - v = v[:, ::2] - ib = ib[:, ::2] - - else: - # kstep implementation for vertex grid - projpts = {key: value for key, value in self.__cls.projpts.items() - if (key // ncpl) % kstep == 0} - - # set x and z centers - if isinstance(head, np.ndarray): - # pipe kstep to set_zcentergrid to assure consistent array size - zcenters = self.__cls.set_zcentergrid(np.ravel(head), kstep=kstep) - else: - zcenters = [np.mean(np.array(v).T[1]) for i, v - in sorted(projpts.items())] - - u = np.array([qx[cell] for cell in sorted(projpts)]) - - x = np.array([np.mean(np.array(v).T[0]) for i, v - in sorted(projpts.items())]) - - z = np.ravel(zcenters) - v = np.array([qz[cell] for cell - in sorted(projpts)]) - ib = np.array([ib[cell] for cell - in sorted(projpts)]) - - x = x[::hstep] - z = z[::hstep] - u = u[::hstep] - v = v[::hstep] - ib = ib[::hstep] - - if normalize: - vmag = np.sqrt(u ** 2. + v ** 2.) - idx = vmag > 0. - u[idx] /= vmag[idx] - v[idx] /= vmag[idx] - - # mask with an ibound array - u[~ib] = np.nan - v[~ib] = np.nan - - quiver = ax.quiver(x, z, u, v, pivot=pivot, **kwargs) - - return quiver - - def plot_discharge(self, frf, fff, flf=None, - head=None, kstep=1, hstep=1, normalize=False, - **kwargs): - """ - DEPRECATED. Use plot_vector() instead, which should follow after - postprocessing.get_specific_discharge(). - - Use quiver to plot vectors. - - Parameters - ---------- - frf : numpy.ndarray - MODFLOW's 'flow right face' - fff : numpy.ndarray - MODFLOW's 'flow front face' - flf : numpy.ndarray - MODFLOW's 'flow lower face' (Default is None.) - head : numpy.ndarray - MODFLOW's head array. If not provided, then will assume confined - conditions in order to calculated saturated thickness. - kstep : int - layer frequency to plot. (Default is 1.) - hstep : int - horizontal frequency to plot. (Default is 1.) - normalize : bool - boolean flag used to determine if discharge vectors should - be normalized using the magnitude of the specific discharge in each - cell. (default is False) - kwargs : dictionary - Keyword arguments passed to plt.quiver() - - Returns - ------- - quiver : matplotlib.pyplot.quiver - Vectors - - """ - import warnings - warnings.warn('plot_discharge() has been deprecated. Use ' - 'plot_vector() instead, which should follow after ' - 'postprocessing.get_specific_discharge()', - DeprecationWarning) - - if self.mg.grid_type != "structured": - err_msg = "Use plot_specific_discharge for " \ - "{} grids".format(self.mg.grid_type) - raise NotImplementedError(err_msg) - - else: - ib = np.ones((self.mg.nlay, self.mg.nrow, self.mg.ncol)) - if self.mg.idomain is not None: - ib = self.mg.idomain - - delr = self.mg.delr - delc = self.mg.delc - top = self.mg.top - botm = self.mg.botm - if not np.all(self.active==1): - botm = botm[self.active==1] - nlay = botm.shape[0] - laytyp = None - hnoflo = 999. - hdry = 999. - - if self.model is not None: - if self.model.laytyp is not None: - laytyp = self.model.laytyp - - if self.model.hnoflo is not None: - hnoflo = self.model.hnoflo - - if self.model.hdry is not None: - hdry = self.model.hdry - - # If no access to head or laytyp, then calculate confined saturated - # thickness by setting laytyp to zeros - if head is None or laytyp is None: - head = np.zeros(botm.shape, np.float32) - laytyp = np.zeros((nlay), dtype=np.int) - head[0, :, :] = top - if nlay > 1: - head[1:, :, :] = botm[:-1, :, :] - - sat_thk = plotutil.PlotUtilities. \ - saturated_thickness(head, top, botm, - laytyp, [hnoflo, hdry]) - - # Calculate specific discharge - qx, qy, qz = plotutil.PlotUtilities. \ - centered_specific_discharge(frf, fff, flf, - delr, delc, sat_thk) - - if qz is None: - qz = np.zeros((qx.shape), dtype=np.float) - - ib = ib.ravel() - qx = qx.ravel() - qy = qy.ravel() * -1 - qz = qz.ravel() - - temp = [] - for ix, val in enumerate(ib): - if val != 0: - temp.append((ix + 1, qx[ix], -qy[ix], qz[ix])) - - spdis = np.recarray((len(temp),), dtype=[('node', np.int), - ("qx", np.float), - ("qy", np.float), - ("qz", np.float)]) - for ix, tup in enumerate(temp): - spdis[ix] = tup - - self.plot_specific_discharge(spdis, head=head, kstep=kstep, - hstep=hstep, normalize=normalize, - **kwargs) - - def get_grid_patch_collection(self, zpts, plotarray, **kwargs): - """ - Get a PatchCollection of plotarray in unmasked cells - - Parameters - ---------- - zpts : numpy.ndarray - array of z elevations that correspond to the x, y, and horizontal - distance along the cross-section (self.xpts). Constructed using - plotutil.cell_value_points(). - plotarray : numpy.ndarray - Three-dimensional array to attach to the Patch Collection. - **kwargs : dictionary - keyword arguments passed to matplotlib.collections.PatchCollection - - Returns - ------- - patches : matplotlib.collections.PatchCollection - - """ - if self.mg.grid_type == "structured": - return self.__cls.get_grid_patch_collection(zpts=zpts, plotarray=plotarray, - **kwargs) - elif self.mg.grid_type == "unstructured": - raise NotImplementedError() - - else: - return self.__cls.get_grid_patch_collection(projpts=zpts, plotarray=plotarray, - **kwargs) - - def get_grid_line_collection(self, **kwargs): - """ - Get a LineCollection of the grid - - Parameters - ---------- - **kwargs : dictionary - keyword arguments passed to matplotlib.collections.LineCollection - - Returns - ------- - linecollection : matplotlib.collections.LineCollection - """ - return self.__cls.get_grid_line_collection(**kwargs) - - -class DeprecatedCrossSection(PlotCrossSection): - """ - Deprecation handler for the PlotCrossSection class - - Parameters - ---------- - ax : matplotlib.pyplot.axes object - model : flopy.modflow.Modflow object - modelgrid : flopy.discretization.Grid object - line : dict - Dictionary with either "row", "column", or "line" key. If key - is "row" or "column" key value should be the zero-based row or - column index for cross-section. If key is "line" value should - be an array of (x, y) tuples with vertices of cross-section. - Vertices should be in map coordinates consistent with xul, - yul, and rotation. - extent : tuple of floats - (xmin, xmax, ymin, ymax) will be used to specify axes limits. If None - then these will be calculated based on grid, coordinates, and rotation. - - """ - def __init__(self, ax=None, model=None, modelgrid=None, - line=None, extent=None): - super(DeprecatedCrossSection, self).__init__(ax=ax, model=model, - modelgrid=modelgrid, - line=line, - extent=extent) +import numpy as np +from ..plot.crosssection import _StructuredCrossSection +from ..plot.vcrosssection import _VertexCrossSection +from ..plot import plotutil + +try: + import matplotlib.pyplot as plt + import matplotlib.colors +except ImportError: + plt = None + + +class PlotCrossSection(object): + """ + Class to create a cross section of the model. + + Parameters + ---------- + ax : matplotlib.pyplot axis + The plot axis. If not provided it, plt.gca() will be used. + model : flopy.modflow object + flopy model object. (Default is None) + modelgrid : flopy.discretization.Grid object + can be a StructuredGrid, VertexGrid, or UnstructuredGrid object + line : dict + Dictionary with either "row", "column", or "line" key. If key + is "row" or "column" key value should be the zero-based row or + column index for cross-section. If key is "line" value should + be an array of (x, y) tuples with vertices of cross-section. + Vertices should be in map coordinates consistent with xul, + yul, and rotation. + extent : tuple of floats + (xmin, xmax, ymin, ymax) will be used to specify axes limits. If None + then these will be calculated based on grid, coordinates, and rotation. + geographic_coords : bool + boolean flag to allow the user to plot cross section lines in + geographic coordinates. If False (default), cross section is plotted + as the distance along the cross section line. + + """ + + def __init__(self, model=None, modelgrid=None, ax=None, + line=None, extent=None, geographic_coords=False): + if plt is None: + s = 'Could not import matplotlib. Must install matplotlib ' + \ + ' in order to use ModelMap method' + raise ImportError(s) + + if modelgrid is None and model is not None: + modelgrid = model.modelgrid + + # update this after unstructured grid is finished! + tmp = modelgrid.grid_type + + if tmp == "structured": + self.__cls = _StructuredCrossSection(ax=ax, model=model, + modelgrid=modelgrid, + line=line, extent=extent, + geographic_coords= + geographic_coords) + + elif tmp == "unstructured": + raise NotImplementedError("Unstructured xc not yet implemented") + + elif tmp == "vertex": + self.__cls = _VertexCrossSection(ax=ax, model=model, + modelgrid=modelgrid, + line=line, extent=extent, + geographic_coords= + geographic_coords) + + else: + raise ValueError("Unknown modelgrid type {}".format(tmp)) + + self.model = self.__cls.model + self.mg = self.__cls.mg + self.ax = self.__cls.ax + self.direction = self.__cls.direction + self.pts = self.__cls.pts + self.xpts = self.__cls.xpts + self.d = self.__cls.d + self.ncb = self.__cls.ncb + self.laycbd = self.__cls.laycbd + self.active = self.__cls.active + self.elev = self.__cls.elev + self.layer0 = self.__cls.layer0 + self.layer1 = self.__cls.layer1 + self.zpts = self.__cls.zpts + self.xcentergrid = self.__cls.xcentergrid + self.zcentergrid = self.__cls.zcentergrid + self.geographic_coords = self.__cls.geographic_coords + self.extent = self.__cls.extent + + def plot_array(self, a, masked_values=None, head=None, **kwargs): + """ + Plot a three-dimensional array as a patch collection. + + Parameters + ---------- + a : numpy.ndarray + Three-dimensional array to plot. + masked_values : iterable of floats, ints + Values to mask. + head : numpy.ndarray + Three-dimensional array to set top of patches to the minimum + of the top of a layer or the head value. Used to create + patches that conform to water-level elevations. + **kwargs : dictionary + keyword arguments passed to matplotlib.collections.PatchCollection + + Returns + ------- + patches : matplotlib.collections.PatchCollection + + """ + return self.__cls.plot_array(a=a, masked_values=masked_values, + head=head, **kwargs) + + def plot_surface(self, a, masked_values=None, **kwargs): + """ + Plot a two- or three-dimensional array as line(s). + + Parameters + ---------- + a : numpy.ndarray + Two- or three-dimensional array to plot. + masked_values : iterable of floats, ints + Values to mask. + **kwargs : dictionary + keyword arguments passed to matplotlib.pyplot.plot + + Returns + ------- + plot : list containing matplotlib.plot objects + + """ + return self.__cls.plot_surface(a=a, masked_values=masked_values, + **kwargs) + + def plot_fill_between(self, a, colors=('blue', 'red'), + masked_values=None, head=None, **kwargs): + """ + Plot a three-dimensional array as lines. + + Parameters + ---------- + a : numpy.ndarray + Three-dimensional array to plot. + colors: list + matplotlib fill colors, two required + masked_values : iterable of floats, ints + Values to mask. + head : numpy.ndarray + Three-dimensional array to set top of patches to the minimum + of the top of a layer or the head value. Used to create + patches that conform to water-level elevations. + **kwargs : dictionary + keyword arguments passed to matplotlib.pyplot.plot + + Returns + ------- + plot : list containing matplotlib.fillbetween objects + + """ + return self.__cls.plot_fill_between(a=a, colors=colors, + masked_values=masked_values, + head=head, **kwargs) + + def contour_array(self, a, masked_values=None, head=None, **kwargs): + """ + Contour a three-dimensional array. + + Parameters + ---------- + a : numpy.ndarray + Three-dimensional array to plot. + masked_values : iterable of floats, ints + Values to mask. + head : numpy.ndarray + Three-dimensional array to set top of patches to the minimum + of the top of a layer or the head value. Used to create + patches that conform to water-level elevations. + **kwargs : dictionary + keyword arguments passed to matplotlib.pyplot.contour + + Returns + ------- + contour_set : matplotlib.pyplot.contour + + """ + return self.__cls.contour_array(a=a, masked_values=masked_values, + head=head, **kwargs) + + def plot_inactive(self, ibound=None, color_noflow='black', **kwargs): + """ + Make a plot of inactive cells. If not specified, then pull ibound + from the self.ml + + Parameters + ---------- + ibound : numpy.ndarray + ibound array to plot. (Default is ibound in 'BAS6' package.) + + color_noflow : string + (Default is 'black') + + Returns + ------- + quadmesh : matplotlib.collections.QuadMesh + + """ + if ibound is None: + if self.mg.idomain is None: + raise AssertionError("An idomain array must be provided") + else: + ibound = self.mg.idomain + + plotarray = np.zeros(ibound.shape, dtype=np.int) + idx1 = (ibound == 0) + plotarray[idx1] = 1 + plotarray = np.ma.masked_equal(plotarray, 0) + cmap = matplotlib.colors.ListedColormap(['0', color_noflow]) + bounds = [0, 1, 2] + norm = matplotlib.colors.BoundaryNorm(bounds, cmap.N) + patches = self.plot_array(plotarray, cmap=cmap, norm=norm, **kwargs) + + return patches + + def plot_ibound(self, ibound=None, color_noflow='black', color_ch='blue', + color_vpt="red", head=None, **kwargs): + """ + Make a plot of ibound. If not specified, then pull ibound from the + self.model + + Parameters + ---------- + ibound : numpy.ndarray + ibound array to plot. (Default is ibound in 'BAS6' package.) + color_noflow : string + (Default is 'black') + color_ch : string + Color for constant heads (Default is 'blue'.) + head : numpy.ndarray + Three-dimensional array to set top of patches to the minimum + of the top of a layer or the head value. Used to create + patches that conform to water-level elevations. + **kwargs : dictionary + keyword arguments passed to matplotlib.collections.PatchCollection + + Returns + ------- + patches : matplotlib.collections.PatchCollection + + """ + if ibound is None: + if self.model is not None: + if self.model.version == "mf6": + color_ch = color_vpt + + if self.mg.idomain is None: + raise AssertionError("Ibound/Idomain array must be provided") + + ibound = self.mg.idomain + + plotarray = np.zeros(ibound.shape, dtype=np.int) + idx1 = (ibound == 0) + idx2 = (ibound < 0) + plotarray[idx1] = 1 + plotarray[idx2] = 2 + plotarray = np.ma.masked_equal(plotarray, 0) + cmap = matplotlib.colors.ListedColormap(['none', color_noflow, + color_ch]) + bounds = [0, 1, 2, 3] + norm = matplotlib.colors.BoundaryNorm(bounds, cmap.N) + # mask active cells + patches = self.plot_array(plotarray, masked_values=[0], head=head, + cmap=cmap, norm=norm, **kwargs) + return patches + + def plot_grid(self, **kwargs): + """ + Plot the grid lines. + + Parameters + ---------- + kwargs : ax, colors. The remaining kwargs are passed into the + the LineCollection constructor. + + Returns + ------- + lc : matplotlib.collections.LineCollection + + """ + if 'ax' in kwargs: + ax = kwargs.pop('ax') + else: + ax = self.ax + + col = self.get_grid_line_collection(**kwargs) + if col is not None: + ax.add_collection(col) + ax.set_xlim(self.extent[0], self.extent[1]) + ax.set_ylim(self.extent[2], self.extent[3]) + + return col + + def plot_bc(self, name=None, package=None, kper=0, color=None, + head=None, **kwargs): + """ + Plot boundary conditions locations for a specific boundary + type from a flopy model + + Parameters + ---------- + name : string + Package name string ('WEL', 'GHB', etc.). (Default is None) + package : flopy.modflow.Modflow package class instance + flopy package class instance. (Default is None) + kper : int + Stress period to plot + color : string + matplotlib color string. (Default is None) + head : numpy.ndarray + Three-dimensional array (structured grid) or + Two-dimensional array (vertex grid) + to set top of patches to the minimum of the top of a\ + layer or the head value. Used to create + patches that conform to water-level elevations. + **kwargs : dictionary + keyword arguments passed to matplotlib.collections.PatchCollection + + Returns + ------- + patches : matplotlib.collections.PatchCollection + + """ + if 'ftype' in kwargs and name is None: + name = kwargs.pop('ftype') + + # Find package to plot + if package is not None: + p = package + ftype = p.name[0] + elif self.model is not None: + if name is None: + raise Exception('ftype not specified') + name = name.upper() + p = self.model.get_package(name) + else: + raise Exception('Cannot find package to plot') + + # trap for mf6 'cellid' vs mf2005 'k', 'i', 'j' convention + if isinstance(p, list) or p.parent.version == "mf6": + if not isinstance(p, list): + p = [p] + + idx = np.array([]) + for pp in p: + if pp.package_type in ('lak', 'sfr', 'maw', 'uzf'): + t = plotutil.advanced_package_bc_helper(pp, self.mg, + kper) + else: + try: + mflist = pp.stress_period_data.array[kper] + except Exception as e: + raise Exception("Not a list-style boundary package: " + + str(e)) + if mflist is None: + return + + t = np.array([list(i) for i in mflist['cellid']], + dtype=int).T + + if len(idx) == 0: + idx = np.copy(t) + else: + idx = np.append(idx, t, axis=1) + + else: + # modflow-2005 structured and unstructured grid + if p.package_type in ('uzf', 'lak'): + idx = plotutil.advanced_package_bc_helper(p, self.mg, kper) + else: + try: + mflist = p.stress_period_data[kper] + except Exception as e: + raise Exception("Not a list-style boundary package: " + + str(e)) + if mflist is None: + return + if len(self.mg.shape) == 3: + idx = [mflist['k'], mflist['i'], mflist['j']] + else: + idx = mflist['node'] + + # Plot the list locations, change this to self.mg.shape + if len(self.mg.shape) != 3: + plotarray = np.zeros((self.mg.nlay, self.mg.ncpl), dtype=np.int) + plotarray[tuple(idx)] = 1 + else: + plotarray = np.zeros((self.mg.nlay, self.mg.nrow, self.mg.ncol), dtype=np.int) + plotarray[idx[0], idx[1], idx[2]] = 1 + + plotarray = np.ma.masked_equal(plotarray, 0) + if color is None: + key = name[:3].upper() + if key in plotutil.bc_color_dict: + c = plotutil.bc_color_dict[key] + else: + c = plotutil.bc_color_dict['default'] + else: + c = color + cmap = matplotlib.colors.ListedColormap(['none', c]) + bounds = [0, 1, 2] + norm = matplotlib.colors.BoundaryNorm(bounds, cmap.N) + patches = self.plot_array(plotarray, masked_values=[0], + head=head, cmap=cmap, norm=norm, **kwargs) + + return patches + + def plot_vector(self, vx, vy, vz, head=None, kstep=1, hstep=1, + normalize=False, masked_values=None, **kwargs): + """ + Plot a vector. + + Parameters + ---------- + vx : np.ndarray + x component of the vector to be plotted (non-rotated) + array shape must be (nlay, nrow, ncol) for a structured grid + array shape must be (nlay, ncpl) for a unstructured grid + vy : np.ndarray + y component of the vector to be plotted (non-rotated) + array shape must be (nlay, nrow, ncol) for a structured grid + array shape must be (nlay, ncpl) for a unstructured grid + vz : np.ndarray + y component of the vector to be plotted (non-rotated) + array shape must be (nlay, nrow, ncol) for a structured grid + array shape must be (nlay, ncpl) for a unstructured grid + head : numpy.ndarray + MODFLOW's head array. If not provided, then the quivers will be + plotted in the cell center. + kstep : int + layer frequency to plot (default is 1) + hstep : int + horizontal frequency to plot (default is 1) + normalize : bool + boolean flag used to determine if vectors should be normalized + using the vector magnitude in each cell (default is False) + masked_values : iterable of floats + values to mask + kwargs : matplotlib.pyplot keyword arguments for the + plt.quiver method + + Returns + ------- + quiver : matplotlib.pyplot.quiver + result of the quiver function + + """ + if 'pivot' in kwargs: + pivot = kwargs.pop('pivot') + else: + pivot = 'middle' + + if 'ax' in kwargs: + ax = kwargs.pop('ax') + else: + ax = self.ax + + # this function does not support arbitrary cross-sections, so check it + arbitrary = False + if self.mg.grid_type == 'structured': + if not (self.direction == 'x' or self.direction == 'y'): + arbitrary = True + else: + # check within a tolerance + pts = self.pts + xuniform = [True if abs(pts.T[0, 0] - i) < 1 + else False for i in pts.T[0]] + yuniform = [True if abs(pts.T[1, 0] - i) < 1 + else False for i in pts.T[1]] + if not np.all(xuniform) and not np.all(yuniform): + arbitrary = True + if arbitrary: + err_msg = "plot_specific_discharge() does not " \ + "support arbitrary cross-sections" + raise AssertionError(err_msg) + + # get the actual values to plot + if self.direction == 'x': + u_tmp = vx + elif self.direction == 'y': + u_tmp = -1. * vy + v_tmp = vz + if self.mg.grid_type == "structured": + if isinstance(head, np.ndarray): + zcentergrid = self.__cls.set_zcentergrid(head) + else: + zcentergrid = self.zcentergrid + + if self.geographic_coords: + xcentergrid = self.__cls.geographic_xcentergrid + else: + xcentergrid = self.xcentergrid + + if self.mg.nlay == 1: + x = [] + z = [] + for k in range(self.mg.nlay): + for i in range(xcentergrid.shape[1]): + x.append(xcentergrid[k, i]) + z.append(0.5 * (zcentergrid[k, i] + zcentergrid[k + 1, i])) + x = np.array(x).reshape((1, xcentergrid.shape[1])) + z = np.array(z).reshape((1, xcentergrid.shape[1])) + else: + x = xcentergrid + z = zcentergrid + + u = [] + v = [] + xedge, yedge = self.mg.xyedges + for k in range(self.mg.nlay): + u.append(plotutil.cell_value_points(self.xpts, xedge, + yedge, u_tmp[k, :, :])) + v.append(plotutil.cell_value_points(self.xpts, xedge, + yedge, v_tmp[k, :, :])) + u = np.array(u) + v = np.array(v) + x = x[::kstep, ::hstep] + z = z[::kstep, ::hstep] + u = u[::kstep, ::hstep] + v = v[::kstep, ::hstep] + + # upts and vpts has a value for the left and right + # sides of a cell. Sample every other value for quiver + u = u[:, ::2] + v = v[:, ::2] + + else: + # kstep implementation for vertex grid + projpts = {key: value for key, value in self.__cls.projpts.items() + if (key // self.mg.ncpl) % kstep == 0} + + # set x and z centers + if isinstance(head, np.ndarray): + # pipe kstep to set_zcentergrid to assure consistent array size + zcenters = self.__cls.set_zcentergrid(np.ravel(head), kstep=kstep) + else: + zcenters = [np.mean(np.array(v).T[1]) for i, v + in sorted(projpts.items())] + + u = np.array([u_tmp.ravel()[cell] for cell in sorted(projpts)]) + + x = np.array([np.mean(np.array(v).T[0]) for i, v + in sorted(projpts.items())]) + + z = np.ravel(zcenters) + v = np.array([v_tmp.ravel()[cell] for cell in sorted(projpts)]) + + x = x[::hstep] + z = z[::hstep] + u = u[::hstep] + v = v[::hstep] + + # mask values + if masked_values is not None: + for mval in masked_values: + to_mask = np.logical_or(u==mval, v==mval) + u[to_mask] = np.nan + v[to_mask] = np.nan + + # normalize + if normalize: + vmag = np.sqrt(u ** 2. + v ** 2.) + idx = vmag > 0. + u[idx] /= vmag[idx] + v[idx] /= vmag[idx] + + # plot with quiver + quiver = ax.quiver(x, z, u, v, pivot=pivot, **kwargs) + + return quiver + + def plot_specific_discharge(self, spdis, head=None, kstep=1, + hstep=1, normalize=False, **kwargs): + """ + DEPRECATED. Use plot_vector() instead, which should follow after + postprocessing.get_specific_discharge(). + + Use quiver to plot vectors. + + Parameters + ---------- + spdis : np.recarray + numpy recarray of specific discharge information. This + can be grabbed directly from the CBC file if SAVE_SPECIFIC_DISCHARGE + is used in the MF6 NPF file. + head : numpy.ndarray + MODFLOW's head array. If not provided, then the quivers will be plotted + in the cell center. + kstep : int + layer frequency to plot. (Default is 1.) + hstep : int + horizontal frequency to plot. (Default is 1.) + normalize : bool + boolean flag used to determine if discharge vectors should + be normalized using the magnitude of the specific discharge in each + cell. (default is False) + kwargs : dictionary + Keyword arguments passed to plt.quiver() + + Returns + ------- + quiver : matplotlib.pyplot.quiver + Vectors + + """ + import warnings + warnings.warn('plot_specific_discharge() has been deprecated. Use ' + 'plot_vector() instead, which should follow after ' + 'postprocessing.get_specific_discharge()', + DeprecationWarning) + + if 'pivot' in kwargs: + pivot = kwargs.pop('pivot') + else: + pivot = 'middle' + + if 'ax' in kwargs: + ax = kwargs.pop('ax') + else: + ax = self.ax + + if isinstance(spdis, list): + print("Warning: Selecting the final stress period from Specific" + " Discharge list") + spdis = spdis[-1] + + if self.mg.grid_type == "structured": + ncpl = self.mg.nrow * self.mg.ncol + + else: + ncpl = self.mg.ncpl + + nlay = self.mg.nlay + + qx = np.zeros((nlay * ncpl)) + qz = np.zeros((nlay * ncpl)) + ib = np.zeros((nlay * ncpl), dtype=bool) + + idx = np.array(spdis['node']) - 1 + + # check that vertex grid cross sections are not arbitrary + # within a tolerance! + if self.mg.grid_type != 'structured': + pts = self.pts + xuniform = [True if abs(pts.T[0, 0] - i) < 1 + else False for i in pts.T[0]] + yuniform = [True if abs(pts.T[1, 0] - i) < 1 + else False for i in pts.T[1]] + if not np.all(xuniform): + if not np.all(yuniform): + err_msg = "plot_specific_discharge does not " \ + "support aribtrary cross sections" + raise AssertionError(err_msg) + + if self.direction == 'x': + qx[idx] = spdis['qx'] + elif self.direction == 'y': + qx[idx] = spdis['qy'] * -1 + else: + err_msg = 'plot_specific_discharge does not ' \ + 'support arbitrary cross-sections' + raise AssertionError(err_msg) + + qz[idx] = spdis["qz"] + ib[idx] = True + + if self.mg.grid_type == "structured": + qx.shape = (self.mg.nlay, self.mg.nrow, self.mg.ncol) + qz.shape = (self.mg.nlay, self.mg.nrow, self.mg.ncol) + ib.shape = (self.mg.nlay, self.mg.nrow, self.mg.ncol) + + if isinstance(head, np.ndarray): + zcentergrid = self.__cls.set_zcentergrid(head) + else: + zcentergrid = self.zcentergrid + + if self.geographic_coords: + xcentergrid = self.__cls.geographic_xcentergrid + else: + xcentergrid = self.xcentergrid + + if nlay == 1: + x = [] + z = [] + for k in range(nlay): + for i in range(xcentergrid.shape[1]): + x.append(xcentergrid[k, i]) + z.append(0.5 * (zcentergrid[k, i] + zcentergrid[k + 1, i])) + x = np.array(x).reshape((1, xcentergrid.shape[1])) + z = np.array(z).reshape((1, xcentergrid.shape[1])) + else: + x = xcentergrid + z = zcentergrid + + u = [] + v = [] + ibx = [] + xedge, yedge = self.mg.xyedges + for k in range(self.mg.nlay): + u.append(plotutil.cell_value_points(self.xpts, xedge, + yedge, qx[k, :, :])) + v.append(plotutil.cell_value_points(self.xpts, xedge, + yedge, qz[k, :, :])) + ibx.append(plotutil.cell_value_points(self.xpts, xedge, + yedge, ib[k, :, :])) + u = np.array(u) + v = np.array(v) + ibx = np.array(ibx) + x = x[::kstep, ::hstep] + z = z[::kstep, ::hstep] + u = u[::kstep, ::hstep] + v = v[::kstep, ::hstep] + ib = ibx[::kstep, ::hstep] + + # upts and vpts has a value for the left and right + # sides of a cell. Sample every other value for quiver + u = u[:, ::2] + v = v[:, ::2] + ib = ib[:, ::2] + + else: + # kstep implementation for vertex grid + projpts = {key: value for key, value in self.__cls.projpts.items() + if (key // ncpl) % kstep == 0} + + # set x and z centers + if isinstance(head, np.ndarray): + # pipe kstep to set_zcentergrid to assure consistent array size + zcenters = self.__cls.set_zcentergrid(np.ravel(head), kstep=kstep) + else: + zcenters = [np.mean(np.array(v).T[1]) for i, v + in sorted(projpts.items())] + + u = np.array([qx[cell] for cell in sorted(projpts)]) + + x = np.array([np.mean(np.array(v).T[0]) for i, v + in sorted(projpts.items())]) + + z = np.ravel(zcenters) + v = np.array([qz[cell] for cell + in sorted(projpts)]) + ib = np.array([ib[cell] for cell + in sorted(projpts)]) + + x = x[::hstep] + z = z[::hstep] + u = u[::hstep] + v = v[::hstep] + ib = ib[::hstep] + + if normalize: + vmag = np.sqrt(u ** 2. + v ** 2.) + idx = vmag > 0. + u[idx] /= vmag[idx] + v[idx] /= vmag[idx] + + # mask with an ibound array + u[~ib] = np.nan + v[~ib] = np.nan + + quiver = ax.quiver(x, z, u, v, pivot=pivot, **kwargs) + + return quiver + + def plot_discharge(self, frf, fff, flf=None, + head=None, kstep=1, hstep=1, normalize=False, + **kwargs): + """ + DEPRECATED. Use plot_vector() instead, which should follow after + postprocessing.get_specific_discharge(). + + Use quiver to plot vectors. + + Parameters + ---------- + frf : numpy.ndarray + MODFLOW's 'flow right face' + fff : numpy.ndarray + MODFLOW's 'flow front face' + flf : numpy.ndarray + MODFLOW's 'flow lower face' (Default is None.) + head : numpy.ndarray + MODFLOW's head array. If not provided, then will assume confined + conditions in order to calculated saturated thickness. + kstep : int + layer frequency to plot. (Default is 1.) + hstep : int + horizontal frequency to plot. (Default is 1.) + normalize : bool + boolean flag used to determine if discharge vectors should + be normalized using the magnitude of the specific discharge in each + cell. (default is False) + kwargs : dictionary + Keyword arguments passed to plt.quiver() + + Returns + ------- + quiver : matplotlib.pyplot.quiver + Vectors + + """ + import warnings + warnings.warn('plot_discharge() has been deprecated. Use ' + 'plot_vector() instead, which should follow after ' + 'postprocessing.get_specific_discharge()', + DeprecationWarning) + + if self.mg.grid_type != "structured": + err_msg = "Use plot_specific_discharge for " \ + "{} grids".format(self.mg.grid_type) + raise NotImplementedError(err_msg) + + else: + ib = np.ones((self.mg.nlay, self.mg.nrow, self.mg.ncol)) + if self.mg.idomain is not None: + ib = self.mg.idomain + + delr = self.mg.delr + delc = self.mg.delc + top = self.mg.top + botm = self.mg.botm + if not np.all(self.active==1): + botm = botm[self.active==1] + nlay = botm.shape[0] + laytyp = None + hnoflo = 999. + hdry = 999. + + if self.model is not None: + if self.model.laytyp is not None: + laytyp = self.model.laytyp + + if self.model.hnoflo is not None: + hnoflo = self.model.hnoflo + + if self.model.hdry is not None: + hdry = self.model.hdry + + # If no access to head or laytyp, then calculate confined saturated + # thickness by setting laytyp to zeros + if head is None or laytyp is None: + head = np.zeros(botm.shape, np.float32) + laytyp = np.zeros((nlay), dtype=np.int) + head[0, :, :] = top + if nlay > 1: + head[1:, :, :] = botm[:-1, :, :] + + sat_thk = plotutil.PlotUtilities. \ + saturated_thickness(head, top, botm, + laytyp, [hnoflo, hdry]) + + # Calculate specific discharge + qx, qy, qz = plotutil.PlotUtilities. \ + centered_specific_discharge(frf, fff, flf, + delr, delc, sat_thk) + + if qz is None: + qz = np.zeros((qx.shape), dtype=np.float) + + ib = ib.ravel() + qx = qx.ravel() + qy = qy.ravel() * -1 + qz = qz.ravel() + + temp = [] + for ix, val in enumerate(ib): + if val != 0: + temp.append((ix + 1, qx[ix], -qy[ix], qz[ix])) + + spdis = np.recarray((len(temp),), dtype=[('node', np.int), + ("qx", np.float), + ("qy", np.float), + ("qz", np.float)]) + for ix, tup in enumerate(temp): + spdis[ix] = tup + + self.plot_specific_discharge(spdis, head=head, kstep=kstep, + hstep=hstep, normalize=normalize, + **kwargs) + + def get_grid_patch_collection(self, zpts, plotarray, **kwargs): + """ + Get a PatchCollection of plotarray in unmasked cells + + Parameters + ---------- + zpts : numpy.ndarray + array of z elevations that correspond to the x, y, and horizontal + distance along the cross-section (self.xpts). Constructed using + plotutil.cell_value_points(). + plotarray : numpy.ndarray + Three-dimensional array to attach to the Patch Collection. + **kwargs : dictionary + keyword arguments passed to matplotlib.collections.PatchCollection + + Returns + ------- + patches : matplotlib.collections.PatchCollection + + """ + if self.mg.grid_type == "structured": + return self.__cls.get_grid_patch_collection(zpts=zpts, plotarray=plotarray, + **kwargs) + elif self.mg.grid_type == "unstructured": + raise NotImplementedError() + + else: + return self.__cls.get_grid_patch_collection(projpts=zpts, plotarray=plotarray, + **kwargs) + + def get_grid_line_collection(self, **kwargs): + """ + Get a LineCollection of the grid + + Parameters + ---------- + **kwargs : dictionary + keyword arguments passed to matplotlib.collections.LineCollection + + Returns + ------- + linecollection : matplotlib.collections.LineCollection + """ + return self.__cls.get_grid_line_collection(**kwargs) + + +class DeprecatedCrossSection(PlotCrossSection): + """ + Deprecation handler for the PlotCrossSection class + + Parameters + ---------- + ax : matplotlib.pyplot.axes object + model : flopy.modflow.Modflow object + modelgrid : flopy.discretization.Grid object + line : dict + Dictionary with either "row", "column", or "line" key. If key + is "row" or "column" key value should be the zero-based row or + column index for cross-section. If key is "line" value should + be an array of (x, y) tuples with vertices of cross-section. + Vertices should be in map coordinates consistent with xul, + yul, and rotation. + extent : tuple of floats + (xmin, xmax, ymin, ymax) will be used to specify axes limits. If None + then these will be calculated based on grid, coordinates, and rotation. + + """ + def __init__(self, ax=None, model=None, modelgrid=None, + line=None, extent=None): + super(DeprecatedCrossSection, self).__init__(ax=ax, model=model, + modelgrid=modelgrid, + line=line, + extent=extent) diff --git a/flopy/plot/plotutil.py b/flopy/plot/plotutil.py index 4b98fba31c..96a741b895 100644 --- a/flopy/plot/plotutil.py +++ b/flopy/plot/plotutil.py @@ -1,2795 +1,2795 @@ -""" -Module containing helper functions for plotting model data -using ModelMap and ModelCrossSection. Functions for plotting -shapefiles are also included. - -""" -from __future__ import print_function -import os -import sys -import math -import numpy as np -from ..utils import Util3d -from ..datbase import DataType, DataInterface - -try: - import shapefile -except ImportError: - shapefile = None - -try: - import matplotlib.pyplot as plt -except ImportError: - plt = None - -try: - from matplotlib.colors import LinearSegmentedColormap - - cm_data = [[0.26700401, 0.00487433, 0.32941519], - [0.26851048, 0.00960483, 0.33542652], - [0.26994384, 0.01462494, 0.34137895], - [0.27130489, 0.01994186, 0.34726862], - [0.27259384, 0.02556309, 0.35309303], - [0.27380934, 0.03149748, 0.35885256], - [0.27495242, 0.03775181, 0.36454323], - [0.27602238, 0.04416723, 0.37016418], - [0.2770184, 0.05034437, 0.37571452], - [0.27794143, 0.05632444, 0.38119074], - [0.27879067, 0.06214536, 0.38659204], - [0.2795655, 0.06783587, 0.39191723], - [0.28026658, 0.07341724, 0.39716349], - [0.28089358, 0.07890703, 0.40232944], - [0.28144581, 0.0843197, 0.40741404], - [0.28192358, 0.08966622, 0.41241521], - [0.28232739, 0.09495545, 0.41733086], - [0.28265633, 0.10019576, 0.42216032], - [0.28291049, 0.10539345, 0.42690202], - [0.28309095, 0.11055307, 0.43155375], - [0.28319704, 0.11567966, 0.43611482], - [0.28322882, 0.12077701, 0.44058404], - [0.28318684, 0.12584799, 0.44496], - [0.283072, 0.13089477, 0.44924127], - [0.28288389, 0.13592005, 0.45342734], - [0.28262297, 0.14092556, 0.45751726], - [0.28229037, 0.14591233, 0.46150995], - [0.28188676, 0.15088147, 0.46540474], - [0.28141228, 0.15583425, 0.46920128], - [0.28086773, 0.16077132, 0.47289909], - [0.28025468, 0.16569272, 0.47649762], - [0.27957399, 0.17059884, 0.47999675], - [0.27882618, 0.1754902, 0.48339654], - [0.27801236, 0.18036684, 0.48669702], - [0.27713437, 0.18522836, 0.48989831], - [0.27619376, 0.19007447, 0.49300074], - [0.27519116, 0.1949054, 0.49600488], - [0.27412802, 0.19972086, 0.49891131], - [0.27300596, 0.20452049, 0.50172076], - [0.27182812, 0.20930306, 0.50443413], - [0.27059473, 0.21406899, 0.50705243], - [0.26930756, 0.21881782, 0.50957678], - [0.26796846, 0.22354911, 0.5120084], - [0.26657984, 0.2282621, 0.5143487], - [0.2651445, 0.23295593, 0.5165993], - [0.2636632, 0.23763078, 0.51876163], - [0.26213801, 0.24228619, 0.52083736], - [0.26057103, 0.2469217, 0.52282822], - [0.25896451, 0.25153685, 0.52473609], - [0.25732244, 0.2561304, 0.52656332], - [0.25564519, 0.26070284, 0.52831152], - [0.25393498, 0.26525384, 0.52998273], - [0.25219404, 0.26978306, 0.53157905], - [0.25042462, 0.27429024, 0.53310261], - [0.24862899, 0.27877509, 0.53455561], - [0.2468114, 0.28323662, 0.53594093], - [0.24497208, 0.28767547, 0.53726018], - [0.24311324, 0.29209154, 0.53851561], - [0.24123708, 0.29648471, 0.53970946], - [0.23934575, 0.30085494, 0.54084398], - [0.23744138, 0.30520222, 0.5419214], - [0.23552606, 0.30952657, 0.54294396], - [0.23360277, 0.31382773, 0.54391424], - [0.2316735, 0.3181058, 0.54483444], - [0.22973926, 0.32236127, 0.54570633], - [0.22780192, 0.32659432, 0.546532], - [0.2258633, 0.33080515, 0.54731353], - [0.22392515, 0.334994, 0.54805291], - [0.22198915, 0.33916114, 0.54875211], - [0.22005691, 0.34330688, 0.54941304], - [0.21812995, 0.34743154, 0.55003755], - [0.21620971, 0.35153548, 0.55062743], - [0.21429757, 0.35561907, 0.5511844], - [0.21239477, 0.35968273, 0.55171011], - [0.2105031, 0.36372671, 0.55220646], - [0.20862342, 0.36775151, 0.55267486], - [0.20675628, 0.37175775, 0.55311653], - [0.20490257, 0.37574589, 0.55353282], - [0.20306309, 0.37971644, 0.55392505], - [0.20123854, 0.38366989, 0.55429441], - [0.1994295, 0.38760678, 0.55464205], - [0.1976365, 0.39152762, 0.55496905], - [0.19585993, 0.39543297, 0.55527637], - [0.19410009, 0.39932336, 0.55556494], - [0.19235719, 0.40319934, 0.55583559], - [0.19063135, 0.40706148, 0.55608907], - [0.18892259, 0.41091033, 0.55632606], - [0.18723083, 0.41474645, 0.55654717], - [0.18555593, 0.4185704, 0.55675292], - [0.18389763, 0.42238275, 0.55694377], - [0.18225561, 0.42618405, 0.5571201], - [0.18062949, 0.42997486, 0.55728221], - [0.17901879, 0.43375572, 0.55743035], - [0.17742298, 0.4375272, 0.55756466], - [0.17584148, 0.44128981, 0.55768526], - [0.17427363, 0.4450441, 0.55779216], - [0.17271876, 0.4487906, 0.55788532], - [0.17117615, 0.4525298, 0.55796464], - [0.16964573, 0.45626209, 0.55803034], - [0.16812641, 0.45998802, 0.55808199], - [0.1666171, 0.46370813, 0.55811913], - [0.16511703, 0.4674229, 0.55814141], - [0.16362543, 0.47113278, 0.55814842], - [0.16214155, 0.47483821, 0.55813967], - [0.16066467, 0.47853961, 0.55811466], - [0.15919413, 0.4822374, 0.5580728], - [0.15772933, 0.48593197, 0.55801347], - [0.15626973, 0.4896237, 0.557936], - [0.15481488, 0.49331293, 0.55783967], - [0.15336445, 0.49700003, 0.55772371], - [0.1519182, 0.50068529, 0.55758733], - [0.15047605, 0.50436904, 0.55742968], - [0.14903918, 0.50805136, 0.5572505], - [0.14760731, 0.51173263, 0.55704861], - [0.14618026, 0.51541316, 0.55682271], - [0.14475863, 0.51909319, 0.55657181], - [0.14334327, 0.52277292, 0.55629491], - [0.14193527, 0.52645254, 0.55599097], - [0.14053599, 0.53013219, 0.55565893], - [0.13914708, 0.53381201, 0.55529773], - [0.13777048, 0.53749213, 0.55490625], - [0.1364085, 0.54117264, 0.55448339], - [0.13506561, 0.54485335, 0.55402906], - [0.13374299, 0.54853458, 0.55354108], - [0.13244401, 0.55221637, 0.55301828], - [0.13117249, 0.55589872, 0.55245948], - [0.1299327, 0.55958162, 0.55186354], - [0.12872938, 0.56326503, 0.55122927], - [0.12756771, 0.56694891, 0.55055551], - [0.12645338, 0.57063316, 0.5498411], - [0.12539383, 0.57431754, 0.54908564], - [0.12439474, 0.57800205, 0.5482874], - [0.12346281, 0.58168661, 0.54744498], - [0.12260562, 0.58537105, 0.54655722], - [0.12183122, 0.58905521, 0.54562298], - [0.12114807, 0.59273889, 0.54464114], - [0.12056501, 0.59642187, 0.54361058], - [0.12009154, 0.60010387, 0.54253043], - [0.11973756, 0.60378459, 0.54139999], - [0.11951163, 0.60746388, 0.54021751], - [0.11942341, 0.61114146, 0.53898192], - [0.11948255, 0.61481702, 0.53769219], - [0.11969858, 0.61849025, 0.53634733], - [0.12008079, 0.62216081, 0.53494633], - [0.12063824, 0.62582833, 0.53348834], - [0.12137972, 0.62949242, 0.53197275], - [0.12231244, 0.63315277, 0.53039808], - [0.12344358, 0.63680899, 0.52876343], - [0.12477953, 0.64046069, 0.52706792], - [0.12632581, 0.64410744, 0.52531069], - [0.12808703, 0.64774881, 0.52349092], - [0.13006688, 0.65138436, 0.52160791], - [0.13226797, 0.65501363, 0.51966086], - [0.13469183, 0.65863619, 0.5176488], - [0.13733921, 0.66225157, 0.51557101], - [0.14020991, 0.66585927, 0.5134268], - [0.14330291, 0.66945881, 0.51121549], - [0.1466164, 0.67304968, 0.50893644], - [0.15014782, 0.67663139, 0.5065889], - [0.15389405, 0.68020343, 0.50417217], - [0.15785146, 0.68376525, 0.50168574], - [0.16201598, 0.68731632, 0.49912906], - [0.1663832, 0.69085611, 0.49650163], - [0.1709484, 0.69438405, 0.49380294], - [0.17570671, 0.6978996, 0.49103252], - [0.18065314, 0.70140222, 0.48818938], - [0.18578266, 0.70489133, 0.48527326], - [0.19109018, 0.70836635, 0.48228395], - [0.19657063, 0.71182668, 0.47922108], - [0.20221902, 0.71527175, 0.47608431], - [0.20803045, 0.71870095, 0.4728733], - [0.21400015, 0.72211371, 0.46958774], - [0.22012381, 0.72550945, 0.46622638], - [0.2263969, 0.72888753, 0.46278934], - [0.23281498, 0.73224735, 0.45927675], - [0.2393739, 0.73558828, 0.45568838], - [0.24606968, 0.73890972, 0.45202405], - [0.25289851, 0.74221104, 0.44828355], - [0.25985676, 0.74549162, 0.44446673], - [0.26694127, 0.74875084, 0.44057284], - [0.27414922, 0.75198807, 0.4366009], - [0.28147681, 0.75520266, 0.43255207], - [0.28892102, 0.75839399, 0.42842626], - [0.29647899, 0.76156142, 0.42422341], - [0.30414796, 0.76470433, 0.41994346], - [0.31192534, 0.76782207, 0.41558638], - [0.3198086, 0.77091403, 0.41115215], - [0.3277958, 0.77397953, 0.40664011], - [0.33588539, 0.7770179, 0.40204917], - [0.34407411, 0.78002855, 0.39738103], - [0.35235985, 0.78301086, 0.39263579], - [0.36074053, 0.78596419, 0.38781353], - [0.3692142, 0.78888793, 0.38291438], - [0.37777892, 0.79178146, 0.3779385], - [0.38643282, 0.79464415, 0.37288606], - [0.39517408, 0.79747541, 0.36775726], - [0.40400101, 0.80027461, 0.36255223], - [0.4129135, 0.80304099, 0.35726893], - [0.42190813, 0.80577412, 0.35191009], - [0.43098317, 0.80847343, 0.34647607], - [0.44013691, 0.81113836, 0.3409673], - [0.44936763, 0.81376835, 0.33538426], - [0.45867362, 0.81636288, 0.32972749], - [0.46805314, 0.81892143, 0.32399761], - [0.47750446, 0.82144351, 0.31819529], - [0.4870258, 0.82392862, 0.31232133], - [0.49661536, 0.82637633, 0.30637661], - [0.5062713, 0.82878621, 0.30036211], - [0.51599182, 0.83115784, 0.29427888], - [0.52577622, 0.83349064, 0.2881265], - [0.5356211, 0.83578452, 0.28190832], - [0.5455244, 0.83803918, 0.27562602], - [0.55548397, 0.84025437, 0.26928147], - [0.5654976, 0.8424299, 0.26287683], - [0.57556297, 0.84456561, 0.25641457], - [0.58567772, 0.84666139, 0.24989748], - [0.59583934, 0.84871722, 0.24332878], - [0.60604528, 0.8507331, 0.23671214], - [0.61629283, 0.85270912, 0.23005179], - [0.62657923, 0.85464543, 0.22335258], - [0.63690157, 0.85654226, 0.21662012], - [0.64725685, 0.85839991, 0.20986086], - [0.65764197, 0.86021878, 0.20308229], - [0.66805369, 0.86199932, 0.19629307], - [0.67848868, 0.86374211, 0.18950326], - [0.68894351, 0.86544779, 0.18272455], - [0.69941463, 0.86711711, 0.17597055], - [0.70989842, 0.86875092, 0.16925712], - [0.72039115, 0.87035015, 0.16260273], - [0.73088902, 0.87191584, 0.15602894], - [0.74138803, 0.87344918, 0.14956101], - [0.75188414, 0.87495143, 0.14322828], - [0.76237342, 0.87642392, 0.13706449], - [0.77285183, 0.87786808, 0.13110864], - [0.78331535, 0.87928545, 0.12540538], - [0.79375994, 0.88067763, 0.12000532], - [0.80418159, 0.88204632, 0.11496505], - [0.81457634, 0.88339329, 0.11034678], - [0.82494028, 0.88472036, 0.10621724], - [0.83526959, 0.88602943, 0.1026459], - [0.84556056, 0.88732243, 0.09970219], - [0.8558096, 0.88860134, 0.09745186], - [0.86601325, 0.88986815, 0.09595277], - [0.87616824, 0.89112487, 0.09525046], - [0.88627146, 0.89237353, 0.09537439], - [0.89632002, 0.89361614, 0.09633538], - [0.90631121, 0.89485467, 0.09812496], - [0.91624212, 0.89609127, 0.1007168], - [0.92610579, 0.89732977, 0.10407067], - [0.93590444, 0.8985704, 0.10813094], - [0.94563626, 0.899815, 0.11283773], - [0.95529972, 0.90106534, 0.11812832], - [0.96489353, 0.90232311, 0.12394051], - [0.97441665, 0.90358991, 0.13021494], - [0.98386829, 0.90486726, 0.13689671], - [0.99324789, 0.90615657, 0.1439362]] - - viridis = LinearSegmentedColormap.from_list(__file__, cm_data) -except: - pass - - -bc_color_dict = {'default': 'black', 'WEL': 'red', 'DRN': 'yellow', - 'RIV': 'teal', 'GHB': 'cyan', 'CHD': 'navy', - 'STR': 'purple', 'SFR': 'teal', 'UZF': 'peru', - 'LAK': 'royalblue'} - - -class PlotException(Exception): - def __init__(self, message): - super(PlotException, self).__init__(message) - - -class PlotUtilities(object): - """ - Class which groups a collection of plotting utilities - which Flopy and Flopy6 can use to generate map based plots - """ - - @staticmethod - def _plot_simulation_helper(simulation, model_list, - SelPackList, **kwargs): - """ - Plot 2-D, 3-D, transient 2-D, and stress period list (MfList) - model input data from a model instance - - Parameters - ---------- - simulation : flopy.mf6.Simulation object - model_list : list - list of model names to plot - SelPackList : list - list of package names to plot, if none - all packages will be plotted - - **kwargs : dict - filename_base : str - Base file name that will be used to automatically generate file - names for output image files. Plots will be exported as image - files if file_name_base is not None. (default is None) - file_extension : str - Valid matplotlib.pyplot file extension for savefig(). Only used - if filename_base is not None. (default is 'png') - mflay : int - MODFLOW zero-based layer number to return. If None, then all - all layers will be included. (default is None) - kper : int - MODFLOW zero-based stress period number to return. - (default is zero) - key : str - MfList dictionary key. (default is None) - - Returns - ------- - axes : list - Empty list is returned if filename_base is not None. Otherwise - a list of matplotlib.pyplot.axis are returned. - """ - defaults = {"kper": 0, "mflay": None, "filename_base": None, - "file_extension": "png", "key": None} - - for key in defaults: - if key in kwargs: - if key == 'file_extension': - defaults[key] = kwargs[key].replace(".", "") - else: - defaults[key] = kwargs[key] - - kwargs.pop(key) - - filename_base = defaults['filename_base'] - - if model_list is None: - model_list = simulation.model_names - - - axes = [] - ifig = 0 - for model_name in model_list: - model = simulation.get_model(model_name) - - model_filename_base = None - if filename_base is not None: - model_filename_base = filename_base + "_" + model_name - - if model.verbose: - print(" Plotting Model: ", model_name) - - caxs = PlotUtilities._plot_model_helper( - model, - SelPackList=SelPackList, - kper=defaults['kper'], - mflay=defaults['mflay'], - filename_base=model_filename_base, - file_extension=defaults['file_extension'], - key=defaults['key'], - initial_fig=ifig, - model_name=model_name, - **kwargs) - - if isinstance(caxs, list): - for c in caxs: - axes.append(c) - else: - axes.append(caxs) - - ifig = len(axes) + 1 - - return axes - - - @staticmethod - def _plot_model_helper(model, SelPackList, **kwargs): - """ - Plot 2-D, 3-D, transient 2-D, and stress period list (MfList) - model input data from a model instance - - Parameters - ---------- - model : Flopy model instance - SelPackList : list - list of package names to plot, if none - all packages will be plotted - - **kwargs : dict - filename_base : str - Base file name that will be used to automatically generate file - names for output image files. Plots will be exported as image - files if file_name_base is not None. (default is None) - file_extension : str - Valid matplotlib.pyplot file extension for savefig(). Only used - if filename_base is not None. (default is 'png') - mflay : int - MODFLOW zero-based layer number to return. If None, then all - all layers will be included. (default is None) - kper : int - MODFLOW zero-based stress period number to return. - (default is zero) - key : str - MfList dictionary key. (default is None) - - Returns - ------- - axes : list - Empty list is returned if filename_base is not None. Otherwise - a list of matplotlib.pyplot.axis are returned. - """ - # valid keyword arguments - defaults = {"kper": 0, "mflay": None, "filename_base": None, - "file_extension": "png", "key": None, "model_name": "", - "initial_fig": 0} - - for key in defaults: - if key in kwargs: - if key == 'file_extension': - defaults[key] = kwargs[key].replace(".", "") - else: - defaults[key] = kwargs[key] - - kwargs.pop(key) - - axes = [] - ifig = defaults['initial_fig'] - if SelPackList is None: - for p in model.packagelist: - caxs = PlotUtilities._plot_package_helper( - p, - initial_fig=ifig, - filename_base=defaults['filename_base'], - file_extension=defaults['file_extension'], - kper=defaults['kper'], - mflay=defaults['mflay'], - key=defaults['key'], - model_name=defaults['model_name']) - # unroll nested lists of axes into a single list of axes - if isinstance(caxs, list): - for c in caxs: - axes.append(c) - else: - axes.append(caxs) - # update next active figure number - ifig = len(axes) + 1 - - else: - for pon in SelPackList: - for p in model.packagelist: - if pon in p.name: - if model.verbose: - print(' Plotting Package: ', p.name[0]) - caxs = PlotUtilities._plot_package_helper( - p, - initial_fig=ifig, - filename_base=defaults['filename_base'], - file_extension=defaults['file_extension'], - kper=defaults['kper'], - mflay=defaults['mflay'], - key=defaults['key'], - model_name=defaults['model_name']) - - # unroll nested lists of axes into a single list of axes - if isinstance(caxs, list): - for c in caxs: - axes.append(c) - else: - axes.append(caxs) - # update next active figure number - ifig = len(axes) + 1 - break - if model.verbose: - print(' ') - return axes - - @staticmethod - def _plot_package_helper(package, **kwargs): - """ - Plot 2-D, 3-D, transient 2-D, and stress period list (MfList) - package input data - - Parameters - ---------- - package: flopy.pakbase.Package - package instance supplied for plotting - - **kwargs : dict - filename_base : str - Base file name that will be used to automatically generate file - names for output image files. Plots will be exported as image - files if file_name_base is not None. (default is None) - file_extension : str - Valid matplotlib.pyplot file extension for savefig(). Only used - if filename_base is not None. (default is 'png') - mflay : int - MODFLOW zero-based layer number to return. If None, then all - all layers will be included. (default is None) - kper : int - MODFLOW zero-based stress period number to return. (default is - zero) - key : str - MfList dictionary key. (default is None) - - Returns - ------- - axes : list - Empty list is returned if filename_base is not None. Otherwise - a list of matplotlib.pyplot.axis are returned. - - """ - defaults = {"kper": 0, 'filename_base': None, - "file_extension": "png", 'mflay': None, - "key": None, "initial_fig": 0, - "model_name": ""} - - for key in defaults: - if key in kwargs: - if key == "file_extension": - defaults[key] = kwargs[key].replace(".", "") - elif key == "initial_fig": - defaults[key] = int(kwargs[key]) - else: - defaults[key] = kwargs[key] - - kwargs.pop(key) - - model_name = defaults.pop("model_name") - - inc = package.parent.modelgrid.nlay - if defaults['mflay'] is not None: - inc = 1 - - axes = [] - for item, value in package.__dict__.items(): - caxs = [] - # trap non-flopy specific data_types. - - if isinstance(value, list): - for v in value: - if isinstance(v, Util3d): - if package.parent.verbose: - print( - 'plotting {} package Util3d instance: {}'.format( - package.name[0], item)) - fignum = list(range(defaults['initial_fig'], - defaults['initial_fig'] + inc)) - defaults['initial_fig'] = fignum[-1] + 1 - caxs.append( - PlotUtilities._plot_util3d_helper( - v, - filename_base=defaults['filename_base'], - file_extension=defaults['file_extension'], - mflay=defaults['mflay'], - fignum=fignum, model_name=model_name, - colorbar=True)) - - elif isinstance(value, DataInterface): - if value.data_type == DataType.transientlist: # isinstance(value, (MfList, MFTransientList)): - if package.parent.verbose: - print('plotting {} package MfList instance: {}'.format( - package.name[0], item)) - if defaults['key'] is None: - names = ['{} {} location stress period {} layer {}'.format( - model_name, package.name[0], - defaults['kper'] + 1, k + 1) - for k in range(package.parent.modelgrid.nlay)] - colorbar = False - else: - names = ['{} {} {} data stress period {} layer {}'.format( - model_name, package.name[0], defaults['key'], - defaults['kper'] + 1, k + 1) - for k in range(package.parent.modelgrid.nlay)] - colorbar = True - - fignum = list(range(defaults['initial_fig'], - defaults['initial_fig'] + inc)) - defaults['initial_fig'] = fignum[-1] + 1 - # need to keep this as value.plot() because of mf6 datatype issues - ax = value.plot(defaults['key'], - names, - defaults['kper'], - filename_base=defaults['filename_base'], - file_extension=defaults['file_extension'], - mflay=defaults['mflay'], - fignum=fignum, colorbar=colorbar, - **kwargs) - - if ax is not None: - caxs.append(ax) - - elif value.data_type == DataType.array3d: # isinstance(value, Util3d): - if value.array is not None: - if package.parent.verbose: - print('plotting {} package Util3d instance: {}'.format( - package.name[0], item)) - # fignum = list(range(ifig, ifig + inc)) - fignum = list(range(defaults['initial_fig'], - defaults['initial_fig'] + value.array.shape[0])) - defaults['initial_fig'] = fignum[-1] + 1 - - caxs.append(PlotUtilities._plot_util3d_helper( - value, - filename_base=defaults['filename_base'], - file_extension=defaults['file_extension'], - mflay=defaults['mflay'], - fignum=fignum, - model_name=model_name, - colorbar=True)) - - elif value.data_type == DataType.array2d: # isinstance(value, Util2d): - if value.array is not None: - if len(value.array.shape) == 2: # is this necessary? - if package.parent.verbose: - print('plotting {} package Util2d instance: {}'.format( - package.name[0], item)) - fignum = list(range(defaults['initial_fig'], - defaults['initial_fig'] + 1)) - defaults['initial_fig'] = fignum[-1] + 1 - - caxs.append(PlotUtilities._plot_util2d_helper( - value, - filename_base=defaults['filename_base'], - file_extension=defaults['file_extension'], - fignum=fignum, - model_name=model_name, - colorbar=True)) - - elif value.data_type == DataType.transient2d: # isinstance(value, Transient2d): - if value.array is not None: - if package.parent.verbose: - print( - 'plotting {} package Transient2d instance: {}'.format( - package.name[0], item)) - fignum = list(range(defaults['initial_fig'], - defaults['initial_fig'] + inc)) - defaults['initial_fig'] = fignum[-1] + 1 - - caxs.append(PlotUtilities._plot_transient2d_helper( - value, - filename_base=defaults['filename_base'], - file_extension=defaults['file_extension'], - kper=defaults['kper'], - fignum=fignum, - colorbar=True)) - - else: - pass - - else: - pass - - # unroll nested lists os axes into a single list of axes - if isinstance(caxs, list): - for c in caxs: - if isinstance(c, list): - for cc in c: - axes.append(cc) - else: - axes.append(c) - else: - axes.append(caxs) - - return axes - - @staticmethod - def _plot_mflist_helper(mflist, key=None, names=None, kper=0, - filename_base=None, file_extension=None, - mflay=None, **kwargs): - """ - Plot stress period boundary condition (MfList) data for a specified - stress period - - Parameters - ---------- - mflist: flopy.utils.util_list.MfList object - - key : str - MfList dictionary key. (default is None) - names : list - List of names for figure titles. (default is None) - kper : int - MODFLOW zero-based stress period number to return. (default is zero) - filename_base : str - Base file name that will be used to automatically generate file - names for output image files. Plots will be exported as image - files if file_name_base is not None. (default is None) - file_extension : str - Valid matplotlib.pyplot file extension for savefig(). Only used - if filename_base is not None. (default is 'png') - mflay : int - MODFLOW zero-based layer number to return. If None, then all - all layers will be included. (default is None) - **kwargs : dict - axes : list of matplotlib.pyplot.axis - List of matplotlib.pyplot.axis that will be used to plot - data for each layer. If axes=None axes will be generated. - (default is None) - pcolor : bool - Boolean used to determine if matplotlib.pyplot.pcolormesh - plot will be plotted. (default is True) - colorbar : bool - Boolean used to determine if a color bar will be added to - the matplotlib.pyplot.pcolormesh. Only used if pcolor=True. - (default is False) - inactive : bool - Boolean used to determine if a black overlay in inactive - cells in a layer will be displayed. (default is True) - contour : bool - Boolean used to determine if matplotlib.pyplot.contour - plot will be plotted. (default is False) - clabel : bool - Boolean used to determine if matplotlib.pyplot.clabel - will be plotted. Only used if contour=True. (default is False) - grid : bool - Boolean used to determine if the model grid will be plotted - on the figure. (default is False) - masked_values : list - List of unique values to be excluded from the plot. - - Returns - ------- - axes : list - Empty list is returned if filename_base is not None. Otherwise - a list of matplotlib.pyplot.axis is returned. - - """ - if file_extension is not None: - fext = file_extension - else: - fext = 'png' - - model_name = "" - if "model_name" in kwargs: - model_name = kwargs.pop('model_name') + " " - - filenames = None - if filename_base is not None: - if mflay is not None: - i0 = int(mflay) - if i0 + 1 >= mflist.model.modelgrid.nlay: - i0 = mflist.model.modelgrid.nlay - 1 - i1 = i0 + 1 - else: - i0 = 0 - i1 = mflist.model.modelgrid.nlay - # build filenames - package_name = mflist.package.name[0].upper() - filenames = ['{}_{}_StressPeriod{}_Layer{}.{}'.format( - filename_base, package_name, - kper + 1, k + 1, fext) - for k in range(i0, i1)] - - if names is None: - if key is None: - names = ['{}{} location stress period: {} layer: {}'.format( - model_name, mflist.package.name[0], kper + 1, k + 1) - for k in range(mflist.model.modelgrid.nlay)] - else: - names = ['{}{} {} stress period: {} layer: {}'.format( - model_name, mflist.package.name[0], - key, kper + 1, k + 1) - for k in range(mflist.model.modelgrid.nlay)] - - if key is None: - axes = PlotUtilities._plot_bc_helper(mflist.package, - kper, - names=names, - filenames=filenames, - mflay=mflay, **kwargs) - else: - arr_dict = mflist.to_array(kper, mask=True) - - try: - arr = arr_dict[key] - except: - err_msg = 'Cannot find key to plot\n' - err_msg += ' Provided key={}\n Available keys='.format(key) - for name, arr in arr_dict.items(): - err_msg += '{}, '.format(name) - err_msg += '\n' - raise PlotException(err_msg) - - axes = PlotUtilities._plot_array_helper(arr, - model=mflist.model, - names=names, - filenames=filenames, - mflay=mflay, - **kwargs) - return axes - - @staticmethod - def _plot_util2d_helper(util2d, title=None, filename_base=None, - file_extension=None, fignum=None, **kwargs): - """ - Plot 2-D model input data - - Parameters - ---------- - util2d : flopy.util.util_array.Util2d object - title : str - Plot title. If a plot title is not provide one will be - created based on data name (self.name). (default is None) - filename_base : str - Base file name that will be used to automatically generate file - names for output image files. Plots will be exported as image - files if file_name_base is not None. (default is None) - file_extension : str - Valid matplotlib.pyplot file extension for savefig(). Only used - if filename_base is not None. (default is 'png') - fignum : list - list of figure numbers - **kwargs : dict - axes : list of matplotlib.pyplot.axis - List of matplotlib.pyplot.axis that will be used to plot - data for each layer. If axes=None axes will be generated. - (default is None) - pcolor : bool - Boolean used to determine if matplotlib.pyplot.pcolormesh - plot will be plotted. (default is True) - colorbar : bool - Boolean used to determine if a color bar will be added to - the matplotlib.pyplot.pcolormesh. Only used if pcolor=True. - (default is False) - inactive : bool - Boolean used to determine if a black overlay in inactive - cells in a layer will be displayed. (default is True) - contour : bool - Boolean used to determine if matplotlib.pyplot.contour - plot will be plotted. (default is False) - clabel : bool - Boolean used to determine if matplotlib.pyplot.clabel - will be plotted. Only used if contour=True. (default is False) - grid : bool - Boolean used to determine if the model grid will be plotted - on the figure. (default is False) - masked_values : list - List of unique values to be excluded from the plot. - - Returns - ------- - axes : list - Empty list is returned if filename_base is not None. Otherwise - a list of matplotlib.pyplot.axis is returned. - - """ - model_name = "" - if "model_name" in kwargs: - model_name = kwargs.pop("model_name") + " " - - if title is None: - title = "{}{}".format(model_name, util2d.name) - - if file_extension is not None: - fext = file_extension - else: - fext = 'png' - - filename = None - if filename_base is not None: - filename = '{}_{}.{}'.format(filename_base, - util2d.name, fext) - - axes = PlotUtilities._plot_array_helper(util2d.array, - util2d.model, - names=title, - filenames=filename, - fignum=fignum, - **kwargs) - return axes - - @staticmethod - def _plot_util3d_helper(util3d, filename_base=None, - file_extension=None, mflay=None, - fignum=None, **kwargs): - """ - Plot 3-D model input data - - Parameters - ---------- - util3d : flopy.util.util_array.Util3d object - filename_base : str - Base file name that will be used to automatically generate file - names for output image files. Plots will be exported as image - files if file_name_base is not None. (default is None) - file_extension : str - Valid matplotlib.pyplot file extension for savefig(). Only used - if filename_base is not None. (default is 'png') - mflay : int - MODFLOW zero-based layer number to return. If None, then all - all layers will be included. (default is None) - fignum : list - list of figure numbers - **kwargs : dict - axes : list of matplotlib.pyplot.axis - List of matplotlib.pyplot.axis that will be used to plot - data for each layer. If axes=None axes will be generated. - (default is None) - pcolor : bool - Boolean used to determine if matplotlib.pyplot.pcolormesh - plot will be plotted. (default is True) - colorbar : bool - Boolean used to determine if a color bar will be added to - the matplotlib.pyplot.pcolormesh. Only used if pcolor=True. - (default is False) - inactive : bool - Boolean used to determine if a black overlay in inactive - cells in a layer will be displayed. (default is True) - contour : bool - Boolean used to determine if matplotlib.pyplot.contour - plot will be plotted. (default is False) - clabel : bool - Boolean used to determine if matplotlib.pyplot.clabel - will be plotted. Only used if contour=True. (default is False) - grid : bool - Boolean used to determine if the model grid will be plotted - on the figure. (default is False) - masked_values : list - List of unique values to be excluded from the plot. - - Returns - ------- - axes : list - Empty list is returned if filename_base is not None. Otherwise - a list of matplotlib.pyplot.axis is returned. - - """ - model_name = "" - if "model_name" in kwargs: - model_name = kwargs.pop('model_name') - - if file_extension is not None: - fext = file_extension - else: - fext = 'png' - - # flopy6 adaption - array = util3d.array - name = util3d.name - if isinstance(name, str): - name = [name] * array.shape[0] - - names = ['{}{} layer {}'.format(model_name, - name[k], k + 1) for k in - range(array.shape[0])] - - filenames = None - if filename_base is not None: - if mflay is not None: - i0 = int(mflay) - if i0 + 1 >= array.shape[0]: - i0 = array.shape[0] - 1 - i1 = i0 + 1 - else: - i0 = 0 - i1 = array.shape[0] - # build filenames, use local "name" variable (flopy6 adaptation) - filenames = ['{}_{}_Layer{}.{}'.format( - filename_base, name[k], - k + 1, fext) - for k in range(i0, i1)] - - axes = PlotUtilities._plot_array_helper(array, - util3d.model, - names=names, - filenames=filenames, - mflay=mflay, - fignum=fignum, - **kwargs) - return axes - - @staticmethod - def _plot_transient2d_helper(transient2d, filename_base=None, - file_extension=None, kper=0, - fignum=None, **kwargs): - """ - Plot transient 2-D model input data - - Parameters - ---------- - transient2d : flopy.utils.util_array.Transient2D object - filename_base : str - Base file name that will be used to automatically generate file - names for output image files. Plots will be exported as image - files if file_name_base is not None. (default is None) - file_extension : str - Valid matplotlib.pyplot file extension for savefig(). Only used - if filename_base is not None. (default is 'png') - kper : int - zero based stress period number - fignum : list - list of figure numbers - **kwargs : dict - axes : list of matplotlib.pyplot.axis - List of matplotlib.pyplot.axis that will be used to plot - data for each layer. If axes=None axes will be generated. - (default is None) - pcolor : bool - Boolean used to determine if matplotlib.pyplot.pcolormesh - plot will be plotted. (default is True) - colorbar : bool - Boolean used to determine if a color bar will be added to - the matplotlib.pyplot.pcolormesh. Only used if pcolor=True. - (default is False) - inactive : bool - Boolean used to determine if a black overlay in inactive - cells in a layer will be displayed. (default is True) - contour : bool - Boolean used to determine if matplotlib.pyplot.contour - plot will be plotted. (default is False) - clabel : bool - Boolean used to determine if matplotlib.pyplot.clabel - will be plotted. Only used if contour=True. (default is False) - grid : bool - Boolean used to determine if the model grid will be plotted - on the figure. (default is False) - masked_values : list - List of unique values to be excluded from the plot. - kper : str - MODFLOW zero-based stress period number to return. If - kper='all' then data for all stress period will be - extracted. (default is zero). - - Returns - ------- - axes : list - Empty list is returned if filename_base is not None. Otherwise - a list of matplotlib.pyplot.axis is returned. - - """ - if file_extension is not None: - fext = file_extension - else: - fext = 'png' - - if isinstance(kper, int): - k0 = kper - k1 = kper + 1 - - elif isinstance(kper, str): - if kper.lower() == "all": - k0 = 0 - k1 = transient2d.model.nper - - else: - k0 = int(kper) - k1 = k0 + 1 - - else: - k0 = int(kper) - k1 = k0 + 1 - - if fignum is not None: - if not isinstance(fignum, list): - fignum = list(fignum) - else: - fignum = list(range(k0, k1)) - - if 'mflay' in kwargs: - kwargs.pop('mflay') - - axes = [] - for idx, kper in enumerate(range(k0, k1)): - title = '{} stress period {:d}'.format( - transient2d.name.replace('_', '').upper(), - kper + 1) - - if filename_base is not None: - filename = filename_base + '_{:05d}.{}'.format(kper + 1, fext) - else: - filename = None - - axes.append(PlotUtilities._plot_array_helper( - transient2d.array[kper], - transient2d.model, - names=title, - filenames=filename, - fignum=fignum[idx], - **kwargs)) - return axes - - @staticmethod - def _plot_scalar_helper(scalar, filename_base=None, - file_extension=None, **kwargs): - """ - Helper method to plot scalar objects - - Parameters - ---------- - scalar : flopy.mf6.data.mfscalar object - filename_base : str - Base file name that will be used to automatically generate file - names for output image files. Plots will be exported as image - files if file_name_base is not None. (default is None) - file_extension : str - Valid matplotlib.pyplot file extension for savefig(). Only used - if filename_base is not None. (default is 'png') - - Returns - ------- - axes: list matplotlib.axes object - - """ - if file_extension is not None: - fext = file_extension - else: - fext = 'png' - - if 'mflay' in kwargs: - kwargs.pop('mflay') - - title = '{}'.format(scalar.name.replace('_', '').upper()) - - if filename_base is not None: - filename = filename_base + '.{}'.format(fext) - else: - filename = None - - axes = PlotUtilities._plot_array_helper(scalar.array, - scalar.model, - names=title, - filenames=filename, - **kwargs) - return axes - - @staticmethod - def _plot_array_helper(plotarray, model=None, modelgrid=None, axes=None, - names=None, filenames=None, fignum=None, - mflay=None, **kwargs): - """ - Helper method to plot array objects - - Parameters - ---------- - plotarray : np.array object - model: fp.modflow.Modflow object - optional if spatial reference is provided - modelgrid: fp.discretization.ModelGrid object - object that defines the spatial orientation of a modflow - grid within flopy. Optional if model object is provided - axes: matplotlib.axes object - existing matplotlib axis object to layer additional - plotting on to. Optional. - names: list - list of figure titles (optional) - filenames: list - list of filenames to save figures to (optional) - fignum: - list of figure numbers (optional) - mflay: int - modflow model layer - **kwargs: - keyword arguments - - Returns: - axes: list matplotlib.axes object - - """ - from .map import PlotMapView - - defaults = {'figsize': None, 'masked_values': None, - 'pcolor': True, 'inactive': True, - 'contour': False, 'clabel': False, - 'colorbar': False, 'grid': False, - 'levels': None, 'colors': "black", - 'dpi': None, 'fmt': "%1.3f", 'modelgrid': None} - - # check that matplotlib is installed - if plt is None: - err_msg = 'Could not import matplotlib. ' \ - 'Must install matplotlib ' + \ - ' in order to plot LayerFile data.' - raise PlotException(err_msg) - - for key in defaults: - if key in kwargs: - defaults[key] = kwargs.pop(key) - - plotarray = plotarray.astype(float) - - # test if this is vertex or structured grid - if model is not None: - grid_type = model.modelgrid.grid_type - hnoflo = model.hnoflo - hdry = model.hdry - if defaults['masked_values'] is None: - t = [] - if hnoflo is not None: - t.append(hnoflo) - if hdry is not None: - t.append(hdry) - if t: - defaults['masked_values'] = t - else: - if hnoflo is not None: - defaults['masked_values'].append(hnoflo) - if hdry is not None: - defaults['masked_values'].append(hdry) - - elif modelgrid is not None: - grid_type = modelgrid.grid_type - - else: - grid_type = "structured" - - ib = None - if modelgrid is not None: - if modelgrid.idomain is not None: - ib = modelgrid.idomain - - else: - if ib is None: - try: - ib = model.modelgrid.idomain - except: - pass - - # reshape 2d arrays to 3d for convenience - if len(plotarray.shape) == 2 and grid_type == "structured": - plotarray = plotarray.reshape((1, plotarray.shape[0], - plotarray.shape[1])) - - # setup plotting routines - # consider refactoring maxlay to nlay - maxlay = plotarray.shape[0] - i0, i1 = PlotUtilities._set_layer_range(mflay, maxlay) - names = PlotUtilities._set_names(names, maxlay) - filenames = PlotUtilities._set_names(filenames, maxlay) - fignum = PlotUtilities._set_fignum(fignum, maxlay, i0, i1) - axes = PlotUtilities._set_axes(axes, mflay, maxlay, i0, i1, - defaults, names, fignum) - - for idx, k in enumerate(range(i0, i1)): - fig = plt.figure(num=fignum[idx]) - pmv = PlotMapView(ax=axes[idx], model=model, - modelgrid=modelgrid, layer=k) - if defaults['pcolor']: - cm = pmv.plot_array(plotarray[k], - masked_values=defaults['masked_values'], - ax=axes[idx], **kwargs) - - if defaults['colorbar']: - label = '' - if not isinstance(defaults['colorbar'], bool): - label = str(defaults['colorbar']) - plt.colorbar(cm, ax=axes[idx], shrink=0.5, label=label) - - if defaults['contour']: - cl = pmv.contour_array(plotarray[k], - masked_values=defaults['masked_values'], - ax=axes[idx], - colors=defaults['colors'], - levels=defaults['levels'], - **kwargs) - if defaults['clabel']: - axes[idx].clabel(cl, fmt=defaults['fmt'],**kwargs) - - if defaults['grid']: - pmv.plot_grid(ax=axes[idx]) - - if defaults['inactive']: - if ib is not None: - pmv.plot_inactive(ibound=ib, ax=axes[idx]) - - if len(axes) == 1: - axes = axes[0] - - if filenames is not None: - for idx, k in enumerate(range(i0, i1)): - fig = plt.figure(num=fignum[idx]) - fig.savefig(filenames[idx], dpi=defaults['dpi']) - print(' created...{}'.format(os.path.basename(filenames[idx]))) - # there will be nothing to return when done - axes = None - plt.close('all') - - return axes - - @staticmethod - def _plot_bc_helper(package, kper, - axes=None, names=None, filenames=None, fignum=None, - mflay=None, **kwargs): - """ - Helper method to plot bc objects from flopy packages - - Parameters - ---------- - package : flopy.pakbase.Package objects - kper : int - zero based stress period number - axes: matplotlib.axes object - existing matplotlib axis object to layer additional - plotting on to. Optional. - names: list - list of figure titles (optional) - filenames: list - list of filenames to save figures to (optional) - fignum: - list of figure numbers (optional) - mflay: int - modflow model layer - **kwargs: - keyword arguments - - Returns - ------- - axes: list matplotlib.axes object - """ - - from .map import PlotMapView - - if plt is None: - s = 'Could not import matplotlib. Must install matplotlib ' +\ - ' in order to plot boundary condition data.' - raise PlotException(s) - - defaults = {'figsize': None, "inactive": True, - 'grid': False, "dpi": None, - "masked_values": None} - - # parse kwargs - for key in defaults: - if key in kwargs: - defaults[key] = kwargs.pop(key) - - ftype = package.name[0] - - color = "black" - if "CHD" in ftype.upper(): - color = bc_color_dict[ftype.upper()[:3]] - - # flopy-modflow vs. flopy-modflow6 trap - try: - model = package.parent - except AttributeError: - model = package._model_or_sim - - nlay = model.modelgrid.nlay - - # set up plotting routines - i0, i1 = PlotUtilities._set_layer_range(mflay, nlay) - names = PlotUtilities._set_names(names, nlay) - filenames = PlotUtilities._set_names(filenames, i1 - i0) - fignum = PlotUtilities._set_fignum(fignum, i1 - i0, i0, i1) - axes = PlotUtilities._set_axes(axes, mflay, nlay, i0, i1, - defaults, names, fignum) - - for idx, k in enumerate(range(i0, i1)): - pmv = PlotMapView(ax=axes[idx], model=model, layer=k) - fig = plt.figure(num=fignum[idx]) - pmv.plot_bc(ftype=ftype, package=package, kper=kper, ax=axes[idx], - color=color) - - if defaults['grid']: - pmv.plot_grid(ax=axes[idx]) - - if defaults['inactive']: - if model.modelgrid is not None: - ib = model.modelgrid.idomain - if ib is not None: - pmv.plot_inactive(ibound=ib, ax=axes[idx]) - - if len(axes) == 1: - axes = axes[0] - - if filenames is not None: - for idx, k in enumerate(range(i0, i1)): - fig = plt.figure(num=fignum[idx]) - fig.savefig(filenames[idx], dpi=defaults['dpi']) - plt.close(fignum[idx]) - print(' created...{}'.format(os.path.basename(filenames[idx]))) - # there will be nothing to return when done - axes = None - plt.close('all') - - return axes - - @staticmethod - def _set_layer_range(mflay, maxlay): - """ - Re-usable method to check for mflay and set - the range of plottable layers - - Parameters - ---------- - mflay : int - zero based layer number - maxlay : int - maximum number of layers in the plotting array - - Returns - ------- - i0, i1 : int, int - minimum and maximum bounds on the layer range - - """ - if mflay is not None: - i0 = int(mflay) - if i0+1 >= maxlay: - i0 = maxlay - 1 - i1 = i0 + 1 - else: - i0 = 0 - i1 = maxlay - - return i0, i1 - - @staticmethod - def _set_names(names, maxlay): - """ - Checks the supplied name variable for shape - - Parameters - ---------- - names : list of str - if names is not none, asserts that there is - a name supplied for each plot that will be - generated - - maxlay : int - maximum number of layers in the plotting array - - Returns - ------- - names : list or None - list of names or None - - """ - if names is not None: - if not isinstance(names, list): - if maxlay > 1: - names = ["{} layer {}".format(names, i + 1) - for i in range(maxlay)] - else: - names = [names] - assert len(names) == maxlay - return names - - @staticmethod - def _set_fignum(fignum, maxlay, i0, i1): - """ - Method to generate a list of matplotlib figure - numbers to join to figure objects. Checks - for existing figures. - - Parameters - ---------- - fignum : list - list of figure numbers - maxlay : int - maximum number of layers in the plotting array - i0 : int - minimum layer range - i1 : int - maximum layer range - - Returns - ------- - fignum : list - - """ - if fignum is not None: - if not isinstance(fignum, list): - fignum = [fignum] - assert len(fignum) == maxlay - # check for existing figures - f0 = fignum[0] - for i in plt.get_fignums(): - if i >= f0: - f0 = i + 1 - finc = f0 - fignum[0] - for idx, _ in enumerate(fignum): - fignum[idx] += finc - else: - # check for existing figures - f0 = 0 - for i in plt.get_fignums(): - if i >= f0: - f0 += 1 - f1 = f0 + (i1 - i0) - fignum = np.arange(f0, f1) - - return fignum - - @staticmethod - def _set_axes(axes, mflay, maxlay, i0, i1, - defaults, names, fignum): - """ - Method to prepare axes objects for plotting - - Parameters - ---------- - axes : list - matplotlib.axes objects - mflay : int - layer to plot or None - i0 : int - minimum range of layers to plot - i1 : int - maximum range of layers to plot - defaults : dict - the default dictionary from the parent plotting method - fignum : list - list of figure numbers - - Returns - ------- - axes : list - matplotlib.axes objects - - """ - if axes is not None: - if not isinstance(axes, list): - axes = [axes] - assert len(axes) == maxlay - - else: - # prepare some axis objects for use - axes = [] - for idx, k in enumerate(range(i0, i1)): - plt.figure(figsize=defaults['figsize'], - num=fignum[idx]) - ax = plt.subplot(1, 1, 1, aspect='equal') - if names is not None: - title = names[k] - else: - klay = k - if mflay is not None: - klay = int(mflay) - title = '{} Layer {}'.format('data', klay+1) - ax.set_title(title) - axes.append(ax) - - return axes - - @staticmethod - def saturated_thickness(head, top, botm, laytyp, mask_values=None): - """ - Calculate the saturated thickness. - - Parameters - ---------- - head : numpy.ndarray - head array - top : numpy.ndarray - top array of shape (nrow, ncol) - botm : numpy.ndarray - botm array of shape (nlay, nrow, ncol) - laytyp : numpy.ndarray - confined (0) or convertible (1) of shape (nlay) - mask_values : list of floats - If head is one of these values, then set sat to top - bot - - Returns - ------- - sat_thk : numpy.ndarray - Saturated thickness of shape (nlay, nrow, ncol). - - """ - if head.ndim == 3: - head = np.copy(head) - nlay, nrow, ncol = head.shape - ncpl = nrow * ncol - head.shape = (nlay, ncpl) - top.shape = (ncpl,) - botm.shape = (nlay, ncpl) - if laytyp.ndim == 3: - laytyp.shape = (nlay, ncpl) - - else: - nrow, ncol = None, None - nlay, ncpl = head.shape - - # cast a laytyp flag for each cell if modflow-2005 based, - # which makes it consistent with the mf6 iconvert array - if laytyp.ndim == 1: - t = np.zeros(head.shape) - for ix, _ in enumerate(laytyp): - t[ix, :] = laytyp[ix] - laytyp = t - del t - - sat_thk_conf = np.empty(head.shape, dtype=head.dtype) - sat_thk_unconf = np.empty(head.shape, dtype=head.dtype) - - for k in range(nlay): - if k == 0: - t = top - else: - t = botm[k - 1, :] - sat_thk_conf[k, :] = t - botm[k, :] - - for k in range(nlay): - dh = np.zeros((ncpl,), dtype=head.dtype) - s = sat_thk_conf[k, :] - - for mv in mask_values: - idx = (head[k, :] == mv) - dh[idx] = s[idx] - - if k == 0: - t = top - else: - t = botm[k - 1, :] - - t = np.where(head[k, :] > t, t, head[k, :]) - dh = np.where(dh == 0, t - botm[k, :], dh) - sat_thk_unconf[k, :] = dh[:] - - sat_thk = np.where(laytyp != 0, sat_thk_unconf, sat_thk_conf) - - if nrow is not None and ncol is not None: - sat_thk.shape = (nlay, nrow, ncol) - - return sat_thk - - @staticmethod - def centered_specific_discharge(Qx, Qy, Qz, delr, delc, sat_thk): - """ - DEPRECATED. Use postprocessing.get_specific_discharge() instead. - - Using the MODFLOW discharge, calculate the cell centered specific discharge - by dividing by the flow width and then averaging to the cell center. - - Parameters - ---------- - Qx : numpy.ndarray - MODFLOW 'flow right face' - Qy : numpy.ndarray - MODFLOW 'flow front face'. The sign on this array will be flipped - by this function so that the y axis is positive to north. - Qz : numpy.ndarray - MODFLOW 'flow lower face'. The sign on this array will be flipped by - this function so that the z axis is positive in the upward direction. - delr : numpy.ndarray - MODFLOW delr array - delc : numpy.ndarray - MODFLOW delc array - sat_thk : numpy.ndarray - Saturated thickness for each cell - - Returns - ------- - (qx, qy, qz) : tuple of numpy.ndarrays - Specific discharge arrays that have been interpolated to cell centers. - - """ - import warnings - warnings.warn('centered_specific_discharge() has been deprecated. Use ' - 'postprocessing.get_specific_discharge() instead.', - DeprecationWarning) - - qx = None - qy = None - qz = None - - if Qx is not None: - - nlay, nrow, ncol = Qx.shape - qx = np.zeros(Qx.shape, dtype=Qx.dtype) - - for k in range(nlay): - for j in range(ncol - 1): - area = delc[:] * 0.5 * (sat_thk[k, :, j] + sat_thk[k, :, j + 1]) - idx = area > 0. - qx[k, idx, j] = Qx[k, idx, j] / area[idx] - - qx[:, :, 1:] = 0.5 * (qx[:, :, 0:ncol - 1] + qx[:, :, 1:ncol]) - qx[:, :, 0] = 0.5 * qx[:, :, 0] - - if Qy is not None: - - nlay, nrow, ncol = Qy.shape - qy = np.zeros(Qy.shape, dtype=Qy.dtype) - - for k in range(nlay): - for i in range(nrow - 1): - area = delr[:] * 0.5 * (sat_thk[k, i, :] + sat_thk[k, i + 1, :]) - idx = area > 0. - qy[k, i, idx] = Qy[k, i, idx] / area[idx] - - qy[:, 1:, :] = 0.5 * (qy[:, 0:nrow - 1, :] + qy[:, 1:nrow, :]) - qy[:, 0, :] = 0.5 * qy[:, 0, :] - qy = -qy - - if Qz is not None: - qz = np.zeros(Qz.shape, dtype=Qz.dtype) - dr = delr.reshape((1, delr.shape[0])) - dc = delc.reshape((delc.shape[0], 1)) - area = dr * dc - for k in range(nlay): - qz[k, :, :] = Qz[k, :, :] / area[:, :] - qz[1:, :, :] = 0.5 * (qz[0:nlay - 1, :, :] + qz[1:nlay, :, :]) - qz[0, :, :] = 0.5 * qz[0, :, :] - qz = -qz - - return (qx, qy, qz) - - -class UnstructuredPlotUtilities(object): - """ - Collection of unstructured grid and vertex grid compatible - plotting helper functions - """ - - @staticmethod - def line_intersect_grid(ptsin, xgrid, ygrid): - """ - Uses cross product method to find which cells intersect with the - line and then uses the parameterized line equation to caluculate - intersection x, y vertex points. Should be quite fast for large model - grids! - - Parameters - ---------- - pts : list - list of tuple line vertex pairs (ex. [(1, 0), (10, 0)] - xgrid : np.array - model grid x vertices - ygrid : np.array - model grid y vertices - - Returns - ------- - vdict : dict of cell vertices - - """ - # make sure xedge and yedge are numpy arrays - if not isinstance(xgrid, np.ndarray): - xgrid = np.array(xgrid) - if not isinstance(ygrid, np.ndarray): - ygrid = np.array(ygrid) - - npts = len(ptsin) - - # use a vector cross product to find which - # cells intersect the line - vdict = {} - for ix in range(1, npts): - xmin = np.min([ptsin[ix - 1][0], ptsin[ix][0]]) - xmax = np.max([ptsin[ix - 1][0], ptsin[ix][0]]) - ymin = np.min([ptsin[ix - 1][1], ptsin[ix][1]]) - ymax = np.max([ptsin[ix - 1][1], ptsin[ix][1]]) - x1 = np.ones(xgrid.shape) * ptsin[ix - 1][0] - y1 = np.ones(ygrid.shape) * ptsin[ix - 1][1] - x2 = np.ones(xgrid.shape) * ptsin[ix][0] - y2 = np.ones(ygrid.shape) * ptsin[ix][1] - x3 = xgrid - y3 = ygrid - x4 = np.zeros(xgrid.shape) - y4 = np.zeros(ygrid.shape) - x4[:, :-1] = xgrid[:, 1:] - x4[:, -1] = xgrid[:, 0] - y4[:, :-1] = ygrid[:, 1:] - y4[:, -1] = ygrid[:, 0] - - # find where intersection is - v1 = [x2 - x1, y2 - y1] - v2 = [x2 - x3, y2 - y3] - xp = v1[0] * v2[1] - v1[1] * v2[0] - - # loop finds which edges the line intersects - cells = [] - cell_vertex_ix = [] - for cell, cpv in enumerate(xp): - if np.all([t < 0 for t in cpv]): - continue - elif np.all([t > 0 for t in cpv]): - continue - - else: - # only cycle through the cells that intersect - # the infinite line - cvert_ix = [] - for ix in range(len(cpv)): - if cpv[ix - 1] < 0 and cpv[ix] > 0: - cvert_ix.append(ix - 1) - elif cpv[ix -1] > 0 and cpv[ix] < 0: - cvert_ix.append(ix - 1) - elif cpv[ix - 1] == 0 and cpv[ix] == 0: - cvert_ix += [ix - 1, ix] - else: - pass - - if cvert_ix: - cells.append(cell) - cell_vertex_ix.append(cvert_ix) - - # find interesection vertices - numa = (x4 - x3) * (y1 - y3) - (y4 - y3) * (x1 - x3) - numb = (x2 - x1) * (y1 - y3) - (y2 - y1) * (x1 - x3) - denom = (y4 - y3) * (x2 - x1) - (x4 - x3) * (y2 - y1) - ua = numa / denom - # ub = numb / denom - del numa - del numb - del denom - - x = x1 + ua * (x2 - x1) - y = y1 + ua * (y2 - y1) - - for ix, cell in enumerate(cells): - xc = x[cell] - yc = y[cell] - verts = [(xt, yt) for xt, yt in - zip(xc[cell_vertex_ix[ix]], - yc[cell_vertex_ix[ix]])] - - if cell in vdict: - for i in verts: - # finally check that verts are - # within the line segment range - if i[0] < xmin or i[0] > xmax: - continue - elif i[1] < ymin or i[1] > ymax: - continue - elif i in vdict[cell]: - continue - elif np.isnan(i[0]) or np.isinf(i[0]) \ - or np.isinf(i[1]) or np.isnan(i[1]): - continue - else: - vdict[cell].append(i) - else: - # finally check that verts are - # within the line segment range - t = [] - for i in verts: - if i[0] < xmin or i[0] > xmax: - continue - elif i[1] < ymin or i[1] > ymax: - continue - elif i in t: - continue - elif np.isnan(i[0]) or np.isinf(i[0]) \ - or np.isinf(i[1]) or np.isnan(i[1]): - continue - else: - t.append(i) - - if t: - vdict[cell] = t - - return vdict - - @staticmethod - def irregular_shape_patch(xverts, yverts): - """ - Patch for vertex cross section plotting when - we have an irregular shape type throughout the - model grid or multiple shape types. - - Parameters - ---------- - xverts : list - xvertices - yverts : list - yvertices - - Returns - ------- - xverts, yverts as np.ndarray - - """ - max_verts = 0 - - for xv in xverts: - if len(xv) > max_verts: - max_verts = len(xv) - - for yv in yverts: - if len(yv) > max_verts: - max_verts = len(yv) - - adj_xverts = [] - for xv in xverts: - if len(xv) < max_verts: - n = max_verts - len(xv) - adj_xverts.append(xv + [xv[-1]] * n) - else: - adj_xverts.append(xv) - - adj_yverts = [] - for yv in yverts: - if len(yv) < max_verts: - n = max_verts - len(yv) - adj_yverts.append(yv + [yv[-1]] * n) - else: - adj_yverts.append(yv) - - xverts = np.array(adj_xverts) - yverts = np.array(adj_yverts) - - return xverts, yverts - - @staticmethod - def arctan2(verts): - """ - Reads 2 dimensional set of verts and orders them using the arctan 2 method - - Parameters - ---------- - verts : np.array of floats - Nx2 array of verts - - Returns - ------- - verts : np.array of float - Nx2 array of verts - - """ - center = verts.mean(axis=0) - x = verts.T[0] - center[0] - z = verts.T[1] - center[1] - - angles = np.arctan2(z, x) * 180 / np.pi - angleidx = angles.argsort() - - verts = verts[angleidx] - return verts - - -class SwiConcentration(): - """ - The binary_header class is a class to create headers for MODFLOW - binary files - - """ - def __init__(self, model=None, botm=None, istrat=1, nu=None): - if model is None: - if isinstance(botm, list): - botm = np.array(botm) - self.__botm = botm - if isinstance(nu, list): - nu = np.array(nu) - self.__nu = nu - self.__istrat = istrat - if istrat == 1: - self.__nsrf = self.nu.shape - 1 - else: - self.__nsrf = self.nu.shape - 2 - else: - try: - dis = model.get_package('DIS') - except: - sys.stdout.write('Error: DIS package not available.\n') - self.__botm = np.zeros((dis.nlay+1, dis.nrow, dis.ncol), np.float) - self.__botm[0, :, :] = dis.top.array - self.__botm[1:, :, :] = dis.botm.array - try: - swi = model.get_package('SWI2') - self.__nu = swi.nu.array - self.__istrat = swi.istrat - self.__nsrf = swi.nsrf - except (AttributeError, ValueError): - sys.stdout.write('Error: SWI2 package not available...\n') - self.__nlay = self.__botm.shape[0] - 1 - self.__nrow = self.__botm[0, :, :].shape[0] - self.__ncol = self.__botm[0, :, :].shape[1] - self.__b = self.__botm[0:-1, :, :] - self.__botm[1:, :, :] - - def calc_conc(self, zeta, layer=None): - """ - Calculate concentrations for a given time step using passed zeta. - - Parameters - ---------- - zeta : dictionary of numpy arrays - Dictionary of zeta results. zeta keys are zero-based zeta surfaces. - layer : int - Concentration will be calculated for the specified layer. If layer - is None, then the concentration will be calculated for all layers. - (default is None). - - Returns - ------- - conc : numpy array - Calculated concentration. - - Examples - -------- - - >>> import flopy - >>> m = flopy.modflow.Modflow.load('test') - >>> c = flopy.plot.SwiConcentration(model=m) - >>> conc = c.calc_conc(z, layer=0) - - """ - conc = np.zeros((self.__nlay, self.__nrow, self.__ncol), np.float) - - pct = {} - for isrf in range(self.__nsrf): - z = zeta[isrf] - pct[isrf] = (self.__botm[:-1, :, :] - z[:, :, :]) / self.__b[:, :, :] - for isrf in range(self.__nsrf): - p = pct[isrf] - if self.__istrat == 1: - conc[:, :, :] += self.__nu[isrf] * p[:, :, :] - if isrf+1 == self.__nsrf: - conc[:, :, :] += self.__nu[isrf+1] * (1. - p[:, :, :]) - #TODO linear option - if layer is None: - return conc - else: - return conc[layer, :, :] - - -def shapefile_extents(shp): - """ - Determine the extents of a shapefile - - Parameters - ---------- - shp : string - Name of the shapefile to convert to a PatchCollection. - - Returns - ------- - extents : tuple - tuple with xmin, xmax, ymin, ymax from shapefile. - - Examples - -------- - - >>> import flopy - >>> fshp = 'myshapefile' - >>> extent = flopy.plot.plotutil.shapefile_extents(fshp) - - """ - if shapefile is None: - s = 'Could not import shapefile. Must install pyshp in order to plot shapefiles.' - raise PlotException(s) - - sf = shapefile.Reader(shp) - shapes = sf.shapes() - nshp = len(shapes) - xmin, xmax, ymin, ymax = 1.e20, -1.e20, 1.e20, -1.e20 - - for n in range(nshp): - for p in shapes[n].points: - xmin, xmax = min(xmin, p[0]), max(xmax, p[0]) - ymin, ymax = min(ymin, p[1]), max(ymax, p[1]) - return xmin, xmax, ymin, ymax - - -def shapefile_get_vertices(shp): - """ - Get vertices for the features in a shapefile - - Parameters - ---------- - shp : string - Name of the shapefile to extract shapefile feature vertices. - - Returns - ------- - vertices : list - Vertices is a list with vertices for each feature in the shapefile. - Individual feature vertices are x, y tuples and contained in a list. - A list with a single x, y tuple is returned for point shapefiles. A - list with multiple x, y tuples is returned for polyline and polygon - shapefiles. - - Examples - -------- - - >>> import flopy - >>> fshp = 'myshapefile' - >>> lines = flopy.plot.plotutil.shapefile_get_vertices(fshp) - - """ - if shapefile is None: - s = 'Could not import shapefile. Must install pyshp in order to plot shapefiles.' - raise PlotException(s) - - sf = shapefile.Reader(shp) - shapes = sf.shapes() - nshp = len(shapes) - vertices = [] - for n in range(nshp): - st = shapes[n].shapeType - if st in [1, 8, 11, 21]: - #points - for p in shapes[n].points: - vertices.append([(p[0], p[1])]) - elif st in [3, 13, 23]: - #line - line = [] - for p in shapes[n].points: - line.append((p[0], p[1])) - line = np.array(line) - vertices.append(line) - elif st in [5, 25, 31]: - #polygons - pts = np.array(shapes[n].points) - prt = shapes[n].parts - par = list(prt) + [pts.shape[0]] - for pij in range(len(prt)): - vertices.append(pts[par[pij]:par[pij+1]]) - return vertices - - -def shapefile_to_patch_collection(shp, radius=500., idx=None): - """ - Create a patch collection from the shapes in a shapefile - - Parameters - ---------- - shp : string - Name of the shapefile to convert to a PatchCollection. - radius : float - Radius of circle for points in the shapefile. (Default is 500.) - idx : iterable int - A list or array that contains shape numbers to include in the - patch collection. Return all shapes if not specified. - - Returns - ------- - pc : matplotlib.collections.PatchCollection - Patch collection of shapes in the shapefile - - """ - if shapefile is None: - s = 'Could not import shapefile. Must install pyshp in order to plot shapefiles.' - raise PlotException(s) - - from matplotlib.patches import Polygon, Circle, Path, PathPatch - from matplotlib.collections import PatchCollection - if isinstance(shp, str): - sf = shapefile.Reader(shp) - else: - sf = shp - shapes = sf.shapes() - nshp = len(shapes) - ptchs = [] - if idx is None: - idx = range(nshp) - for n in idx: - st = shapes[n].shapeType - if st in [1, 8, 11, 21]: - # points - for p in shapes[n].points: - ptchs.append(Circle( (p[0], p[1]), radius=radius)) - elif st in [3, 13, 23]: - # line - vertices = [] - for p in shapes[n].points: - vertices.append([p[0], p[1]]) - vertices = np.array(vertices) - path = Path(vertices) - ptchs.append(PathPatch(path, fill=False)) - elif st in [5, 25, 31]: - # polygons - pts = np.array(shapes[n].points) - prt = shapes[n].parts - par = list(prt) + [pts.shape[0]] - for pij in range(len(prt)): - ptchs.append(Polygon(pts[par[pij]:par[pij+1]])) - pc = PatchCollection(ptchs) - return pc - - -def plot_shapefile(shp, ax=None, radius=500., cmap='Dark2', - edgecolor='scaled', facecolor='scaled', - a=None, masked_values=None, idx=None, **kwargs): - """ - Generic function for plotting a shapefile. - - Parameters - ---------- - shp : string - Name of the shapefile to plot. - ax : matplolib.pyplot.axes object - - radius : float - Radius of circle for points. (Default is 500.) - cmap : string - Name of colormap to use for polygon shading (default is 'Dark2') - edgecolor : string - Color name. (Default is 'scaled' to scale the edge colors.) - facecolor : string - Color name. (Default is 'scaled' to scale the face colors.) - a : numpy.ndarray - Array to plot. - masked_values : iterable of floats, ints - Values to mask. - idx : iterable int - A list or array that contains shape numbers to include in the - patch collection. Return all shapes if not specified. - kwargs : dictionary - Keyword arguments that are passed to PatchCollection.set(``**kwargs``). - Some common kwargs would be 'linewidths', 'linestyles', 'alpha', etc. - - Returns - ------- - pc : matplotlib.collections.PatchCollection - - Examples - -------- - - """ - - if shapefile is None: - s = 'Could not import shapefile. Must install pyshp in order to plot shapefiles.' - raise PlotException(s) - - if 'vmin' in kwargs: - vmin = kwargs.pop('vmin') - else: - vmin = None - - if 'vmax' in kwargs: - vmax = kwargs.pop('vmax') - else: - vmax = None - - if ax is None: - ax = plt.gca() - cm = plt.get_cmap(cmap) - pc = shapefile_to_patch_collection(shp, radius=radius, idx=idx) - pc.set(**kwargs) - if a is None: - nshp = len(pc.get_paths()) - cccol = cm(1. * np.arange(nshp) / nshp) - if facecolor == 'scaled': - pc.set_facecolor(cccol) - else: - pc.set_facecolor(facecolor) - if edgecolor == 'scaled': - pc.set_edgecolor(cccol) - else: - pc.set_edgecolor(edgecolor) - else: - pc.set_cmap(cm) - if masked_values is not None: - for mval in masked_values: - a = np.ma.masked_equal(a, mval) - if edgecolor == 'scaled': - pc.set_edgecolor('none') - else: - pc.set_edgecolor(edgecolor) - pc.set_array(a) - pc.set_clim(vmin=vmin, vmax=vmax) - # add the patch collection to the axis - ax.add_collection(pc) - return pc - - -def cvfd_to_patch_collection(verts, iverts): - """ - Create a patch collection from control volume vertices and incidence list - - Parameters - ---------- - verts : ndarray - 2d array of x and y points. - iverts : list of lists - should be of len(ncells) with a list of vertex numbers for each cell - - """ - from matplotlib.patches import Polygon - from matplotlib.collections import PatchCollection - ptchs = [] - for ivertlist in iverts: - points = [] - for iv in ivertlist: - points.append((verts[iv, 0], verts[iv, 1])) - # close the polygon, if necessary - if ivertlist[0] != ivertlist[-1]: - iv = ivertlist[0] - points.append((verts[iv, 0], verts[iv, 1])) - ptchs.append(Polygon(points)) - pc = PatchCollection(ptchs) - return pc - - -def plot_cvfd(verts, iverts, ax=None, layer=0, cmap='Dark2', - edgecolor='scaled', facecolor='scaled', a=None, - masked_values=None, **kwargs): - """ - Generic function for plotting a control volume finite difference grid of - information. - - Parameters - ---------- - verts : ndarray - 2d array of x and y points. - iverts : list of lists - should be of len(ncells) with a list of vertex number for each cell - ax : matplotlib.pylot axis - matplotlib.pyplot axis instance. Default is None - layer : int - layer to extract. Used in combination to the optional ncpl - parameter. Default is 0 - cmap : string - Name of colormap to use for polygon shading (default is 'Dark2') - edgecolor : string - Color name. (Default is 'scaled' to scale the edge colors.) - facecolor : string - Color name. (Default is 'scaled' to scale the face colors.) - a : numpy.ndarray - Array to plot. - masked_values : iterable of floats, ints - Values to mask. - kwargs : dictionary - Keyword arguments that are passed to PatchCollection.set(``**kwargs``). - Some common kwargs would be 'linewidths', 'linestyles', 'alpha', etc. - - Returns - ------- - pc : matplotlib.collections.PatchCollection - - Examples - -------- - - """ - import matplotlib.pyplot as plt - - if 'vmin' in kwargs: - vmin = kwargs.pop('vmin') - else: - vmin = None - - if 'vmax' in kwargs: - vmax = kwargs.pop('vmax') - else: - vmax = None - - if 'ncpl' in kwargs: - nlay = layer + 1 - ncpl = kwargs.pop('ncpl') - if isinstance(ncpl, int): - i = int(ncpl) - ncpl = np.ones((nlay), dtype=np.int) * i - elif isinstance(ncpl, list) or isinstance(ncpl, tuple): - ncpl = np.array(ncpl) - i0 = 0 - i1 = 0 - for k in range(nlay): - i0 = i1 - i1 = i0 + ncpl[k] - # retain iverts in selected layer - iverts = iverts[i0:i1] - # retain vertices in selected layer - tverts = [] - for iv in iverts: - for iloc in iv: - tverts.append((verts[iloc, 0], verts[iloc, 1])) - verts = np.array(tverts) - # calculate offset for starting vertex in layer based on - # global vertex numbers - iadj = iverts[0][0] - # reset iverts to relative vertices in selected layer - tiverts = [] - for iv in iverts: - i = [] - for t in iv: - i.append(t-iadj) - tiverts.append(i) - iverts = tiverts - else: - i0 = 0 - i1 = len(iverts) - - # get current axis - if ax is None: - ax = plt.gca() - cm = plt.get_cmap(cmap) - - pc = cvfd_to_patch_collection(verts, iverts) - pc.set(**kwargs) - - # set colors - if a is None: - nshp = len(pc.get_paths()) - cccol = cm(1. * np.arange(nshp) / nshp) - if facecolor == 'scaled': - pc.set_facecolor(cccol) - else: - pc.set_facecolor(facecolor) - if edgecolor == 'scaled': - pc.set_edgecolor(cccol) - else: - pc.set_edgecolor(edgecolor) - else: - pc.set_cmap(cm) - if masked_values is not None: - for mval in masked_values: - a = np.ma.masked_equal(a, mval) - - # add NaN values to mask - a = np.ma.masked_where(np.isnan(a), a) - - if edgecolor == 'scaled': - pc.set_edgecolor('none') - else: - pc.set_edgecolor(edgecolor) - pc.set_array(a[i0:i1]) - pc.set_clim(vmin=vmin, vmax=vmax) - # add the patch collection to the axis - ax.add_collection(pc) - return pc - - -def findrowcolumn(pt, xedge, yedge): - """ - Find the MODFLOW cell containing the x- and y- point provided. - - Parameters - ---------- - pt : list or tuple - A list or tuple containing a x- and y- coordinate - xedge : numpy.ndarray - x-coordinate of the edge of each MODFLOW column. xedge is dimensioned - to NCOL + 1. If xedge is not a numpy.ndarray it is converted to a - numpy.ndarray. - yedge : numpy.ndarray - y-coordinate of the edge of each MODFLOW row. yedge is dimensioned - to NROW + 1. If yedge is not a numpy.ndarray it is converted to a - numpy.ndarray. - - Returns - ------- - irow, jcol : int - Row and column location containing x- and y- point passed to function. - - Examples - -------- - >>> import flopy - >>> irow, jcol = flopy.plotutil.findrowcolumn(pt, xedge, yedge) - - """ - - # make sure xedge and yedge are numpy arrays - if not isinstance(xedge, np.ndarray): - xedge = np.array(xedge) - if not isinstance(yedge, np.ndarray): - yedge = np.array(yedge) - - # find column - jcol = -100 - for jdx, xmf in enumerate(xedge): - if xmf > pt[0]: - jcol = jdx - 1 - break - - # find row - irow = -100 - for jdx, ymf in enumerate(yedge): - if ymf < pt[1]: - irow = jdx - 1 - break - return irow, jcol - - -def line_intersect_grid(ptsin, xedge, yedge, returnvertices=False): - """ - Intersect a list of polyline vertices with a rectilinear MODFLOW - grid. Vertices at the intersection of the polyline with the grid - cell edges is returned. Optionally the original polyline vertices - are returned. - - Parameters - ---------- - ptsin : list - A list of x, y points defining the vertices of a polyline that will be - intersected with the rectilinear MODFLOW grid - xedge : numpy.ndarray - x-coordinate of the edge of each MODFLOW column. xedge is dimensioned - to NCOL + 1. If xedge is not a numpy.ndarray it is converted to a - numpy.ndarray. - yedge : numpy.ndarray - y-coordinate of the edge of each MODFLOW row. yedge is dimensioned - to NROW + 1. If yedge is not a numpy.ndarray it is converted to a - numpy.ndarray. - returnvertices: bool - Return the original polyline vertices in the list of numpy.ndarray - containing vertices resulting from intersection of the provided - polygon and the MODFLOW model grid if returnvertices=True. - (default is False). - - Returns - ------- - (x, y, dlen) : numpy.ndarray of tuples - numpy.ndarray of tuples containing the x, y, and segment length of the - intersection of the provided polyline with the rectilinear MODFLOW - grid. - - Examples - -------- - >>> import flopy - >>> ptsout = flopy.plotutil.line_intersect_grid(ptsin, xedge, yedge) - - """ - - small_value = 1.0e-4 - - # make sure xedge and yedge are numpy arrays - if not isinstance(xedge, np.ndarray): - xedge = np.array(xedge) - if not isinstance(yedge, np.ndarray): - yedge = np.array(yedge) - - # build list of points along current line - pts = [] - npts = len(ptsin) - dlen = 0. - for idx in range(1, npts): - x0 = ptsin[idx - 1][0] - x1 = ptsin[idx][0] - y0 = ptsin[idx - 1][1] - y1 = ptsin[idx][1] - a = x1 - x0 - b = y1 - y0 - c = math.sqrt(math.pow(a, 2.) + math.pow(b, 2.)) - # find cells with (x0, y0) and (x1, y1) - irow0, jcol0 = findrowcolumn((x0, y0), xedge, yedge) - irow1, jcol1 = findrowcolumn((x1, y1), xedge, yedge) - # determine direction to go in the x- and y-directions - jx = 0 - incx = abs(small_value * a / c) - iy = 0 - incy = -abs(small_value * b / c) - if a == 0.: - incx = 0. - # go to the right - elif a > 0.: - jx = 1 - incx *= -1. - if b == 0.: - incy = 0. - # go down - elif b < 0.: - iy = 1 - incy *= -1. - # process data - if irow0 >= 0 and jcol0 >= 0: - iadd = True - if idx > 1 and returnvertices: - iadd = False - if iadd: - pts.append((x0, y0, dlen)) - icnt = 0 - while True: - icnt += 1 - dx = xedge[jcol0 + jx] - x0 - dlx = 0. - if a != 0.: - dlx = c * dx / a - dy = yedge[irow0 + iy] - y0 - dly = 0. - if b != 0.: - dly = c * dy / b - if dlx != 0. and dly != 0.: - if abs(dlx) < abs(dly): - dy = dx * b / a - else: - dx = dy * a / b - xt = x0 + dx + incx - yt = y0 + dy + incy - dl = math.sqrt(math.pow((xt - x0), 2.) + math.pow((yt - y0), 2.)) - dlen += dl - if not returnvertices: - pts.append((xt, yt, dlen)) - x0, y0 = xt, yt - xt = x0 - 2. * incx - yt = y0 - 2. * incy - dl = math.sqrt(math.pow((xt - x0), 2.) + math.pow((yt - y0), 2.)) - dlen += dl - x0, y0 = xt, yt - irow0, jcol0 = findrowcolumn((x0, y0), xedge, yedge) - if irow0 >= 0 and jcol0 >= 0: - if not returnvertices: - pts.append((xt, yt, dlen)) - elif irow1 < 0 or jcol1 < 0: - dl = math.sqrt(math.pow((x1 - x0), 2.) + math.pow((y1 - y0), 2.)) - dlen += dl - break - if irow0 == irow1 and jcol0 == jcol1: - dl = math.sqrt(math.pow((x1 - x0), 2.) + math.pow((y1 - y0), 2.)) - dlen += dl - pts.append((x1, y1, dlen)) - break - return np.array(pts) - - -def cell_value_points(pts, xedge, yedge, vdata): - """ - Intersect a list of polyline vertices with a rectilinear MODFLOW - grid. Vertices at the intersection of the polyline with the grid - cell edges is returned. Optionally the original polyline vertices - are returned. - - Parameters - ---------- - pts : list - A list of x, y points and polyline length to extract defining the - vertices of a polyline that - xedge : numpy.ndarray - x-coordinate of the edge of each MODFLOW column. The shape of xedge is - (NCOL + 1). If xedge is not a numpy.ndarray it is converted to a - numpy.ndarray. - yedge : numpy.ndarray - y-coordinate of the edge of each MODFLOW row. The shape of yedge is - (NROW + 1). If yedge is not a numpy.ndarray it is converted to a - numpy.ndarray. - vdata : numpy.ndarray - Data (i.e., head, hk, etc.) for a rectilinear MODFLOW model grid. The - shape of vdata is (NROW, NCOL). If vdata is not a numpy.ndarray it is - converted to a numpy.ndarray. - - Returns - ------- - vcell : numpy.ndarray - numpy.ndarray of of data values from the vdata numpy.ndarray at x- and - y-coordinate locations in pts. - - Examples - -------- - >>> import flopy - >>> vcell = flopy.plotutil.cell_value_points(xpts, xedge, yedge, head[0, :, :]) - - """ - - # make sure xedge and yedge are numpy arrays - if not isinstance(xedge, np.ndarray): - xedge = np.array(xedge) - if not isinstance(yedge, np.ndarray): - yedge = np.array(yedge) - if not isinstance(vdata, np.ndarray): - vdata = np.array(vdata) - - vcell = [] - for (xt, yt, _) in pts: - # find the modflow cell containing point - irow, jcol = findrowcolumn((xt, yt), xedge, yedge) - if irow >= 0 and jcol >= 0: - if np.isnan(vdata[irow, jcol]): - vcell.append(np.nan) - else: - v = np.asarray(vdata[irow, jcol]) - vcell.append(v) - - return np.array(vcell) - - -def _set_coord_info(mg, xul, yul, xll, yll, rotation): - """ - - Parameters - ---------- - mg : fp.discretization.Grid object - - xul : float - upper left x-coordinate location - yul : float - upper left y-coordinate location - xll : float - lower left x-coordinate location - yll : float - lower left y-coordinate location - rotation : float - model grid rotation - - Returns - ------- - mg : fp.discretization.Grid object - """ - import warnings - if xul is not None and yul is not None: - warnings.warn('xul/yul have been deprecated. Use xll/yll instead.', - DeprecationWarning) - if rotation is not None: - mg._angrot = rotation - - mg.set_coord_info(xoff=mg._xul_to_xll(xul), - yoff=mg._yul_to_yll(yul), - angrot=rotation) - elif xll is not None and xll is not None: - mg.set_coord_info(xoff=xll, yoff=yll, angrot=rotation) - - elif rotation is not None: - mg.set_coord_info(xoff=xll, yoff=yll, angrot=rotation) - - return mg - - -def _depreciated_dis_handler(modelgrid, dis): - """ - PlotMapView handler for the deprecated dis parameter - which adds top and botm information to the modelgrid - - Parmaeter - --------- - modelgrid : fp.discretization.Grid object - - dis : fp.modflow.ModflowDis object - - Returns - ------- - modelgrid : fp.discretization.Grid - - """ - # creates a new modelgrid instance with the dis information - from ..discretization import StructuredGrid, VertexGrid, UnstructuredGrid - import warnings - warnings.warn('the dis parameter has been depreciated.', - PendingDeprecationWarning) - if modelgrid.grid_type == "vertex": - modelgrid = VertexGrid(modelgrid.vertices, - modelgrid.cell2d, - dis.top.array, - dis.botm.array, - idomain=modelgrid.idomain, - xoff=modelgrid.xoffset, - yoff=modelgrid.yoffset, - angrot=modelgrid.angrot) - if modelgrid.grid_type == "unstructured": - modelgrid = UnstructuredGrid(modelgrid._vertices, - modelgrid._iverts, - modelgrid._xc, - modelgrid._yc, - dis.top.array, - dis.botm.array, - idomain=modelgrid.idomain, - xoff=modelgrid.xoffset, - yoff=modelgrid.yoffset, - angrot=modelgrid.angrot) - else: - modelgrid = StructuredGrid(delc=dis.delc.array, - delr=dis.delr.array, - top=dis.top.array, - botm=dis.botm.array, - idomain=modelgrid.idomain, - xoff=modelgrid.xoffset, - yoff=modelgrid.yoffset, - angrot=modelgrid.angrot) - return modelgrid - - -def advanced_package_bc_helper(pkg, modelgrid, kper): - """ - Helper function for plotting boundary conditions from "advanced" packages - - Parameters - ---------- - pkg : flopy Package objects - modelgrid : flopy.discretization.Grid object - - Returns - ------- - """ - if pkg.package_type in ('sfr', 'uzf'): - if pkg.parent.version == 'mf6': - mflist = pkg.packagedata.array - idx = np.array([list(i) for i in mflist['cellid']], dtype=int).T - else: - iuzfbnd = pkg.iuzfbnd.array - idx = np.where(iuzfbnd != 0) - idx = np.append([[0] * idx[-1].size], idx, axis=0) - elif pkg.package_type in ('lak', 'maw'): - if pkg.parent.version == "mf6": - mflist = pkg.connectiondata.array - idx = np.array([list(i) for i in mflist['cellid']], dtype=int).T - else: - lakarr = pkg.lakarr.array[kper] - idx = np.where(lakarr != 0) - idx = np.array(idx) - else: - raise NotImplementedError("Pkg {} not implemented for bc plotting" - .format(pkg.package_type)) - return idx +""" +Module containing helper functions for plotting model data +using ModelMap and ModelCrossSection. Functions for plotting +shapefiles are also included. + +""" +from __future__ import print_function +import os +import sys +import math +import numpy as np +from ..utils import Util3d +from ..datbase import DataType, DataInterface + +try: + import shapefile +except ImportError: + shapefile = None + +try: + import matplotlib.pyplot as plt +except ImportError: + plt = None + +try: + from matplotlib.colors import LinearSegmentedColormap + + cm_data = [[0.26700401, 0.00487433, 0.32941519], + [0.26851048, 0.00960483, 0.33542652], + [0.26994384, 0.01462494, 0.34137895], + [0.27130489, 0.01994186, 0.34726862], + [0.27259384, 0.02556309, 0.35309303], + [0.27380934, 0.03149748, 0.35885256], + [0.27495242, 0.03775181, 0.36454323], + [0.27602238, 0.04416723, 0.37016418], + [0.2770184, 0.05034437, 0.37571452], + [0.27794143, 0.05632444, 0.38119074], + [0.27879067, 0.06214536, 0.38659204], + [0.2795655, 0.06783587, 0.39191723], + [0.28026658, 0.07341724, 0.39716349], + [0.28089358, 0.07890703, 0.40232944], + [0.28144581, 0.0843197, 0.40741404], + [0.28192358, 0.08966622, 0.41241521], + [0.28232739, 0.09495545, 0.41733086], + [0.28265633, 0.10019576, 0.42216032], + [0.28291049, 0.10539345, 0.42690202], + [0.28309095, 0.11055307, 0.43155375], + [0.28319704, 0.11567966, 0.43611482], + [0.28322882, 0.12077701, 0.44058404], + [0.28318684, 0.12584799, 0.44496], + [0.283072, 0.13089477, 0.44924127], + [0.28288389, 0.13592005, 0.45342734], + [0.28262297, 0.14092556, 0.45751726], + [0.28229037, 0.14591233, 0.46150995], + [0.28188676, 0.15088147, 0.46540474], + [0.28141228, 0.15583425, 0.46920128], + [0.28086773, 0.16077132, 0.47289909], + [0.28025468, 0.16569272, 0.47649762], + [0.27957399, 0.17059884, 0.47999675], + [0.27882618, 0.1754902, 0.48339654], + [0.27801236, 0.18036684, 0.48669702], + [0.27713437, 0.18522836, 0.48989831], + [0.27619376, 0.19007447, 0.49300074], + [0.27519116, 0.1949054, 0.49600488], + [0.27412802, 0.19972086, 0.49891131], + [0.27300596, 0.20452049, 0.50172076], + [0.27182812, 0.20930306, 0.50443413], + [0.27059473, 0.21406899, 0.50705243], + [0.26930756, 0.21881782, 0.50957678], + [0.26796846, 0.22354911, 0.5120084], + [0.26657984, 0.2282621, 0.5143487], + [0.2651445, 0.23295593, 0.5165993], + [0.2636632, 0.23763078, 0.51876163], + [0.26213801, 0.24228619, 0.52083736], + [0.26057103, 0.2469217, 0.52282822], + [0.25896451, 0.25153685, 0.52473609], + [0.25732244, 0.2561304, 0.52656332], + [0.25564519, 0.26070284, 0.52831152], + [0.25393498, 0.26525384, 0.52998273], + [0.25219404, 0.26978306, 0.53157905], + [0.25042462, 0.27429024, 0.53310261], + [0.24862899, 0.27877509, 0.53455561], + [0.2468114, 0.28323662, 0.53594093], + [0.24497208, 0.28767547, 0.53726018], + [0.24311324, 0.29209154, 0.53851561], + [0.24123708, 0.29648471, 0.53970946], + [0.23934575, 0.30085494, 0.54084398], + [0.23744138, 0.30520222, 0.5419214], + [0.23552606, 0.30952657, 0.54294396], + [0.23360277, 0.31382773, 0.54391424], + [0.2316735, 0.3181058, 0.54483444], + [0.22973926, 0.32236127, 0.54570633], + [0.22780192, 0.32659432, 0.546532], + [0.2258633, 0.33080515, 0.54731353], + [0.22392515, 0.334994, 0.54805291], + [0.22198915, 0.33916114, 0.54875211], + [0.22005691, 0.34330688, 0.54941304], + [0.21812995, 0.34743154, 0.55003755], + [0.21620971, 0.35153548, 0.55062743], + [0.21429757, 0.35561907, 0.5511844], + [0.21239477, 0.35968273, 0.55171011], + [0.2105031, 0.36372671, 0.55220646], + [0.20862342, 0.36775151, 0.55267486], + [0.20675628, 0.37175775, 0.55311653], + [0.20490257, 0.37574589, 0.55353282], + [0.20306309, 0.37971644, 0.55392505], + [0.20123854, 0.38366989, 0.55429441], + [0.1994295, 0.38760678, 0.55464205], + [0.1976365, 0.39152762, 0.55496905], + [0.19585993, 0.39543297, 0.55527637], + [0.19410009, 0.39932336, 0.55556494], + [0.19235719, 0.40319934, 0.55583559], + [0.19063135, 0.40706148, 0.55608907], + [0.18892259, 0.41091033, 0.55632606], + [0.18723083, 0.41474645, 0.55654717], + [0.18555593, 0.4185704, 0.55675292], + [0.18389763, 0.42238275, 0.55694377], + [0.18225561, 0.42618405, 0.5571201], + [0.18062949, 0.42997486, 0.55728221], + [0.17901879, 0.43375572, 0.55743035], + [0.17742298, 0.4375272, 0.55756466], + [0.17584148, 0.44128981, 0.55768526], + [0.17427363, 0.4450441, 0.55779216], + [0.17271876, 0.4487906, 0.55788532], + [0.17117615, 0.4525298, 0.55796464], + [0.16964573, 0.45626209, 0.55803034], + [0.16812641, 0.45998802, 0.55808199], + [0.1666171, 0.46370813, 0.55811913], + [0.16511703, 0.4674229, 0.55814141], + [0.16362543, 0.47113278, 0.55814842], + [0.16214155, 0.47483821, 0.55813967], + [0.16066467, 0.47853961, 0.55811466], + [0.15919413, 0.4822374, 0.5580728], + [0.15772933, 0.48593197, 0.55801347], + [0.15626973, 0.4896237, 0.557936], + [0.15481488, 0.49331293, 0.55783967], + [0.15336445, 0.49700003, 0.55772371], + [0.1519182, 0.50068529, 0.55758733], + [0.15047605, 0.50436904, 0.55742968], + [0.14903918, 0.50805136, 0.5572505], + [0.14760731, 0.51173263, 0.55704861], + [0.14618026, 0.51541316, 0.55682271], + [0.14475863, 0.51909319, 0.55657181], + [0.14334327, 0.52277292, 0.55629491], + [0.14193527, 0.52645254, 0.55599097], + [0.14053599, 0.53013219, 0.55565893], + [0.13914708, 0.53381201, 0.55529773], + [0.13777048, 0.53749213, 0.55490625], + [0.1364085, 0.54117264, 0.55448339], + [0.13506561, 0.54485335, 0.55402906], + [0.13374299, 0.54853458, 0.55354108], + [0.13244401, 0.55221637, 0.55301828], + [0.13117249, 0.55589872, 0.55245948], + [0.1299327, 0.55958162, 0.55186354], + [0.12872938, 0.56326503, 0.55122927], + [0.12756771, 0.56694891, 0.55055551], + [0.12645338, 0.57063316, 0.5498411], + [0.12539383, 0.57431754, 0.54908564], + [0.12439474, 0.57800205, 0.5482874], + [0.12346281, 0.58168661, 0.54744498], + [0.12260562, 0.58537105, 0.54655722], + [0.12183122, 0.58905521, 0.54562298], + [0.12114807, 0.59273889, 0.54464114], + [0.12056501, 0.59642187, 0.54361058], + [0.12009154, 0.60010387, 0.54253043], + [0.11973756, 0.60378459, 0.54139999], + [0.11951163, 0.60746388, 0.54021751], + [0.11942341, 0.61114146, 0.53898192], + [0.11948255, 0.61481702, 0.53769219], + [0.11969858, 0.61849025, 0.53634733], + [0.12008079, 0.62216081, 0.53494633], + [0.12063824, 0.62582833, 0.53348834], + [0.12137972, 0.62949242, 0.53197275], + [0.12231244, 0.63315277, 0.53039808], + [0.12344358, 0.63680899, 0.52876343], + [0.12477953, 0.64046069, 0.52706792], + [0.12632581, 0.64410744, 0.52531069], + [0.12808703, 0.64774881, 0.52349092], + [0.13006688, 0.65138436, 0.52160791], + [0.13226797, 0.65501363, 0.51966086], + [0.13469183, 0.65863619, 0.5176488], + [0.13733921, 0.66225157, 0.51557101], + [0.14020991, 0.66585927, 0.5134268], + [0.14330291, 0.66945881, 0.51121549], + [0.1466164, 0.67304968, 0.50893644], + [0.15014782, 0.67663139, 0.5065889], + [0.15389405, 0.68020343, 0.50417217], + [0.15785146, 0.68376525, 0.50168574], + [0.16201598, 0.68731632, 0.49912906], + [0.1663832, 0.69085611, 0.49650163], + [0.1709484, 0.69438405, 0.49380294], + [0.17570671, 0.6978996, 0.49103252], + [0.18065314, 0.70140222, 0.48818938], + [0.18578266, 0.70489133, 0.48527326], + [0.19109018, 0.70836635, 0.48228395], + [0.19657063, 0.71182668, 0.47922108], + [0.20221902, 0.71527175, 0.47608431], + [0.20803045, 0.71870095, 0.4728733], + [0.21400015, 0.72211371, 0.46958774], + [0.22012381, 0.72550945, 0.46622638], + [0.2263969, 0.72888753, 0.46278934], + [0.23281498, 0.73224735, 0.45927675], + [0.2393739, 0.73558828, 0.45568838], + [0.24606968, 0.73890972, 0.45202405], + [0.25289851, 0.74221104, 0.44828355], + [0.25985676, 0.74549162, 0.44446673], + [0.26694127, 0.74875084, 0.44057284], + [0.27414922, 0.75198807, 0.4366009], + [0.28147681, 0.75520266, 0.43255207], + [0.28892102, 0.75839399, 0.42842626], + [0.29647899, 0.76156142, 0.42422341], + [0.30414796, 0.76470433, 0.41994346], + [0.31192534, 0.76782207, 0.41558638], + [0.3198086, 0.77091403, 0.41115215], + [0.3277958, 0.77397953, 0.40664011], + [0.33588539, 0.7770179, 0.40204917], + [0.34407411, 0.78002855, 0.39738103], + [0.35235985, 0.78301086, 0.39263579], + [0.36074053, 0.78596419, 0.38781353], + [0.3692142, 0.78888793, 0.38291438], + [0.37777892, 0.79178146, 0.3779385], + [0.38643282, 0.79464415, 0.37288606], + [0.39517408, 0.79747541, 0.36775726], + [0.40400101, 0.80027461, 0.36255223], + [0.4129135, 0.80304099, 0.35726893], + [0.42190813, 0.80577412, 0.35191009], + [0.43098317, 0.80847343, 0.34647607], + [0.44013691, 0.81113836, 0.3409673], + [0.44936763, 0.81376835, 0.33538426], + [0.45867362, 0.81636288, 0.32972749], + [0.46805314, 0.81892143, 0.32399761], + [0.47750446, 0.82144351, 0.31819529], + [0.4870258, 0.82392862, 0.31232133], + [0.49661536, 0.82637633, 0.30637661], + [0.5062713, 0.82878621, 0.30036211], + [0.51599182, 0.83115784, 0.29427888], + [0.52577622, 0.83349064, 0.2881265], + [0.5356211, 0.83578452, 0.28190832], + [0.5455244, 0.83803918, 0.27562602], + [0.55548397, 0.84025437, 0.26928147], + [0.5654976, 0.8424299, 0.26287683], + [0.57556297, 0.84456561, 0.25641457], + [0.58567772, 0.84666139, 0.24989748], + [0.59583934, 0.84871722, 0.24332878], + [0.60604528, 0.8507331, 0.23671214], + [0.61629283, 0.85270912, 0.23005179], + [0.62657923, 0.85464543, 0.22335258], + [0.63690157, 0.85654226, 0.21662012], + [0.64725685, 0.85839991, 0.20986086], + [0.65764197, 0.86021878, 0.20308229], + [0.66805369, 0.86199932, 0.19629307], + [0.67848868, 0.86374211, 0.18950326], + [0.68894351, 0.86544779, 0.18272455], + [0.69941463, 0.86711711, 0.17597055], + [0.70989842, 0.86875092, 0.16925712], + [0.72039115, 0.87035015, 0.16260273], + [0.73088902, 0.87191584, 0.15602894], + [0.74138803, 0.87344918, 0.14956101], + [0.75188414, 0.87495143, 0.14322828], + [0.76237342, 0.87642392, 0.13706449], + [0.77285183, 0.87786808, 0.13110864], + [0.78331535, 0.87928545, 0.12540538], + [0.79375994, 0.88067763, 0.12000532], + [0.80418159, 0.88204632, 0.11496505], + [0.81457634, 0.88339329, 0.11034678], + [0.82494028, 0.88472036, 0.10621724], + [0.83526959, 0.88602943, 0.1026459], + [0.84556056, 0.88732243, 0.09970219], + [0.8558096, 0.88860134, 0.09745186], + [0.86601325, 0.88986815, 0.09595277], + [0.87616824, 0.89112487, 0.09525046], + [0.88627146, 0.89237353, 0.09537439], + [0.89632002, 0.89361614, 0.09633538], + [0.90631121, 0.89485467, 0.09812496], + [0.91624212, 0.89609127, 0.1007168], + [0.92610579, 0.89732977, 0.10407067], + [0.93590444, 0.8985704, 0.10813094], + [0.94563626, 0.899815, 0.11283773], + [0.95529972, 0.90106534, 0.11812832], + [0.96489353, 0.90232311, 0.12394051], + [0.97441665, 0.90358991, 0.13021494], + [0.98386829, 0.90486726, 0.13689671], + [0.99324789, 0.90615657, 0.1439362]] + + viridis = LinearSegmentedColormap.from_list(__file__, cm_data) +except: + pass + + +bc_color_dict = {'default': 'black', 'WEL': 'red', 'DRN': 'yellow', + 'RIV': 'teal', 'GHB': 'cyan', 'CHD': 'navy', + 'STR': 'purple', 'SFR': 'teal', 'UZF': 'peru', + 'LAK': 'royalblue'} + + +class PlotException(Exception): + def __init__(self, message): + super(PlotException, self).__init__(message) + + +class PlotUtilities(object): + """ + Class which groups a collection of plotting utilities + which Flopy and Flopy6 can use to generate map based plots + """ + + @staticmethod + def _plot_simulation_helper(simulation, model_list, + SelPackList, **kwargs): + """ + Plot 2-D, 3-D, transient 2-D, and stress period list (MfList) + model input data from a model instance + + Parameters + ---------- + simulation : flopy.mf6.Simulation object + model_list : list + list of model names to plot + SelPackList : list + list of package names to plot, if none + all packages will be plotted + + **kwargs : dict + filename_base : str + Base file name that will be used to automatically generate file + names for output image files. Plots will be exported as image + files if file_name_base is not None. (default is None) + file_extension : str + Valid matplotlib.pyplot file extension for savefig(). Only used + if filename_base is not None. (default is 'png') + mflay : int + MODFLOW zero-based layer number to return. If None, then all + all layers will be included. (default is None) + kper : int + MODFLOW zero-based stress period number to return. + (default is zero) + key : str + MfList dictionary key. (default is None) + + Returns + ------- + axes : list + Empty list is returned if filename_base is not None. Otherwise + a list of matplotlib.pyplot.axis are returned. + """ + defaults = {"kper": 0, "mflay": None, "filename_base": None, + "file_extension": "png", "key": None} + + for key in defaults: + if key in kwargs: + if key == 'file_extension': + defaults[key] = kwargs[key].replace(".", "") + else: + defaults[key] = kwargs[key] + + kwargs.pop(key) + + filename_base = defaults['filename_base'] + + if model_list is None: + model_list = simulation.model_names + + + axes = [] + ifig = 0 + for model_name in model_list: + model = simulation.get_model(model_name) + + model_filename_base = None + if filename_base is not None: + model_filename_base = filename_base + "_" + model_name + + if model.verbose: + print(" Plotting Model: ", model_name) + + caxs = PlotUtilities._plot_model_helper( + model, + SelPackList=SelPackList, + kper=defaults['kper'], + mflay=defaults['mflay'], + filename_base=model_filename_base, + file_extension=defaults['file_extension'], + key=defaults['key'], + initial_fig=ifig, + model_name=model_name, + **kwargs) + + if isinstance(caxs, list): + for c in caxs: + axes.append(c) + else: + axes.append(caxs) + + ifig = len(axes) + 1 + + return axes + + + @staticmethod + def _plot_model_helper(model, SelPackList, **kwargs): + """ + Plot 2-D, 3-D, transient 2-D, and stress period list (MfList) + model input data from a model instance + + Parameters + ---------- + model : Flopy model instance + SelPackList : list + list of package names to plot, if none + all packages will be plotted + + **kwargs : dict + filename_base : str + Base file name that will be used to automatically generate file + names for output image files. Plots will be exported as image + files if file_name_base is not None. (default is None) + file_extension : str + Valid matplotlib.pyplot file extension for savefig(). Only used + if filename_base is not None. (default is 'png') + mflay : int + MODFLOW zero-based layer number to return. If None, then all + all layers will be included. (default is None) + kper : int + MODFLOW zero-based stress period number to return. + (default is zero) + key : str + MfList dictionary key. (default is None) + + Returns + ------- + axes : list + Empty list is returned if filename_base is not None. Otherwise + a list of matplotlib.pyplot.axis are returned. + """ + # valid keyword arguments + defaults = {"kper": 0, "mflay": None, "filename_base": None, + "file_extension": "png", "key": None, "model_name": "", + "initial_fig": 0} + + for key in defaults: + if key in kwargs: + if key == 'file_extension': + defaults[key] = kwargs[key].replace(".", "") + else: + defaults[key] = kwargs[key] + + kwargs.pop(key) + + axes = [] + ifig = defaults['initial_fig'] + if SelPackList is None: + for p in model.packagelist: + caxs = PlotUtilities._plot_package_helper( + p, + initial_fig=ifig, + filename_base=defaults['filename_base'], + file_extension=defaults['file_extension'], + kper=defaults['kper'], + mflay=defaults['mflay'], + key=defaults['key'], + model_name=defaults['model_name']) + # unroll nested lists of axes into a single list of axes + if isinstance(caxs, list): + for c in caxs: + axes.append(c) + else: + axes.append(caxs) + # update next active figure number + ifig = len(axes) + 1 + + else: + for pon in SelPackList: + for p in model.packagelist: + if pon in p.name: + if model.verbose: + print(' Plotting Package: ', p.name[0]) + caxs = PlotUtilities._plot_package_helper( + p, + initial_fig=ifig, + filename_base=defaults['filename_base'], + file_extension=defaults['file_extension'], + kper=defaults['kper'], + mflay=defaults['mflay'], + key=defaults['key'], + model_name=defaults['model_name']) + + # unroll nested lists of axes into a single list of axes + if isinstance(caxs, list): + for c in caxs: + axes.append(c) + else: + axes.append(caxs) + # update next active figure number + ifig = len(axes) + 1 + break + if model.verbose: + print(' ') + return axes + + @staticmethod + def _plot_package_helper(package, **kwargs): + """ + Plot 2-D, 3-D, transient 2-D, and stress period list (MfList) + package input data + + Parameters + ---------- + package: flopy.pakbase.Package + package instance supplied for plotting + + **kwargs : dict + filename_base : str + Base file name that will be used to automatically generate file + names for output image files. Plots will be exported as image + files if file_name_base is not None. (default is None) + file_extension : str + Valid matplotlib.pyplot file extension for savefig(). Only used + if filename_base is not None. (default is 'png') + mflay : int + MODFLOW zero-based layer number to return. If None, then all + all layers will be included. (default is None) + kper : int + MODFLOW zero-based stress period number to return. (default is + zero) + key : str + MfList dictionary key. (default is None) + + Returns + ------- + axes : list + Empty list is returned if filename_base is not None. Otherwise + a list of matplotlib.pyplot.axis are returned. + + """ + defaults = {"kper": 0, 'filename_base': None, + "file_extension": "png", 'mflay': None, + "key": None, "initial_fig": 0, + "model_name": ""} + + for key in defaults: + if key in kwargs: + if key == "file_extension": + defaults[key] = kwargs[key].replace(".", "") + elif key == "initial_fig": + defaults[key] = int(kwargs[key]) + else: + defaults[key] = kwargs[key] + + kwargs.pop(key) + + model_name = defaults.pop("model_name") + + inc = package.parent.modelgrid.nlay + if defaults['mflay'] is not None: + inc = 1 + + axes = [] + for item, value in package.__dict__.items(): + caxs = [] + # trap non-flopy specific data_types. + + if isinstance(value, list): + for v in value: + if isinstance(v, Util3d): + if package.parent.verbose: + print( + 'plotting {} package Util3d instance: {}'.format( + package.name[0], item)) + fignum = list(range(defaults['initial_fig'], + defaults['initial_fig'] + inc)) + defaults['initial_fig'] = fignum[-1] + 1 + caxs.append( + PlotUtilities._plot_util3d_helper( + v, + filename_base=defaults['filename_base'], + file_extension=defaults['file_extension'], + mflay=defaults['mflay'], + fignum=fignum, model_name=model_name, + colorbar=True)) + + elif isinstance(value, DataInterface): + if value.data_type == DataType.transientlist: # isinstance(value, (MfList, MFTransientList)): + if package.parent.verbose: + print('plotting {} package MfList instance: {}'.format( + package.name[0], item)) + if defaults['key'] is None: + names = ['{} {} location stress period {} layer {}'.format( + model_name, package.name[0], + defaults['kper'] + 1, k + 1) + for k in range(package.parent.modelgrid.nlay)] + colorbar = False + else: + names = ['{} {} {} data stress period {} layer {}'.format( + model_name, package.name[0], defaults['key'], + defaults['kper'] + 1, k + 1) + for k in range(package.parent.modelgrid.nlay)] + colorbar = True + + fignum = list(range(defaults['initial_fig'], + defaults['initial_fig'] + inc)) + defaults['initial_fig'] = fignum[-1] + 1 + # need to keep this as value.plot() because of mf6 datatype issues + ax = value.plot(defaults['key'], + names, + defaults['kper'], + filename_base=defaults['filename_base'], + file_extension=defaults['file_extension'], + mflay=defaults['mflay'], + fignum=fignum, colorbar=colorbar, + **kwargs) + + if ax is not None: + caxs.append(ax) + + elif value.data_type == DataType.array3d: # isinstance(value, Util3d): + if value.array is not None: + if package.parent.verbose: + print('plotting {} package Util3d instance: {}'.format( + package.name[0], item)) + # fignum = list(range(ifig, ifig + inc)) + fignum = list(range(defaults['initial_fig'], + defaults['initial_fig'] + value.array.shape[0])) + defaults['initial_fig'] = fignum[-1] + 1 + + caxs.append(PlotUtilities._plot_util3d_helper( + value, + filename_base=defaults['filename_base'], + file_extension=defaults['file_extension'], + mflay=defaults['mflay'], + fignum=fignum, + model_name=model_name, + colorbar=True)) + + elif value.data_type == DataType.array2d: # isinstance(value, Util2d): + if value.array is not None: + if len(value.array.shape) == 2: # is this necessary? + if package.parent.verbose: + print('plotting {} package Util2d instance: {}'.format( + package.name[0], item)) + fignum = list(range(defaults['initial_fig'], + defaults['initial_fig'] + 1)) + defaults['initial_fig'] = fignum[-1] + 1 + + caxs.append(PlotUtilities._plot_util2d_helper( + value, + filename_base=defaults['filename_base'], + file_extension=defaults['file_extension'], + fignum=fignum, + model_name=model_name, + colorbar=True)) + + elif value.data_type == DataType.transient2d: # isinstance(value, Transient2d): + if value.array is not None: + if package.parent.verbose: + print( + 'plotting {} package Transient2d instance: {}'.format( + package.name[0], item)) + fignum = list(range(defaults['initial_fig'], + defaults['initial_fig'] + inc)) + defaults['initial_fig'] = fignum[-1] + 1 + + caxs.append(PlotUtilities._plot_transient2d_helper( + value, + filename_base=defaults['filename_base'], + file_extension=defaults['file_extension'], + kper=defaults['kper'], + fignum=fignum, + colorbar=True)) + + else: + pass + + else: + pass + + # unroll nested lists os axes into a single list of axes + if isinstance(caxs, list): + for c in caxs: + if isinstance(c, list): + for cc in c: + axes.append(cc) + else: + axes.append(c) + else: + axes.append(caxs) + + return axes + + @staticmethod + def _plot_mflist_helper(mflist, key=None, names=None, kper=0, + filename_base=None, file_extension=None, + mflay=None, **kwargs): + """ + Plot stress period boundary condition (MfList) data for a specified + stress period + + Parameters + ---------- + mflist: flopy.utils.util_list.MfList object + + key : str + MfList dictionary key. (default is None) + names : list + List of names for figure titles. (default is None) + kper : int + MODFLOW zero-based stress period number to return. (default is zero) + filename_base : str + Base file name that will be used to automatically generate file + names for output image files. Plots will be exported as image + files if file_name_base is not None. (default is None) + file_extension : str + Valid matplotlib.pyplot file extension for savefig(). Only used + if filename_base is not None. (default is 'png') + mflay : int + MODFLOW zero-based layer number to return. If None, then all + all layers will be included. (default is None) + **kwargs : dict + axes : list of matplotlib.pyplot.axis + List of matplotlib.pyplot.axis that will be used to plot + data for each layer. If axes=None axes will be generated. + (default is None) + pcolor : bool + Boolean used to determine if matplotlib.pyplot.pcolormesh + plot will be plotted. (default is True) + colorbar : bool + Boolean used to determine if a color bar will be added to + the matplotlib.pyplot.pcolormesh. Only used if pcolor=True. + (default is False) + inactive : bool + Boolean used to determine if a black overlay in inactive + cells in a layer will be displayed. (default is True) + contour : bool + Boolean used to determine if matplotlib.pyplot.contour + plot will be plotted. (default is False) + clabel : bool + Boolean used to determine if matplotlib.pyplot.clabel + will be plotted. Only used if contour=True. (default is False) + grid : bool + Boolean used to determine if the model grid will be plotted + on the figure. (default is False) + masked_values : list + List of unique values to be excluded from the plot. + + Returns + ------- + axes : list + Empty list is returned if filename_base is not None. Otherwise + a list of matplotlib.pyplot.axis is returned. + + """ + if file_extension is not None: + fext = file_extension + else: + fext = 'png' + + model_name = "" + if "model_name" in kwargs: + model_name = kwargs.pop('model_name') + " " + + filenames = None + if filename_base is not None: + if mflay is not None: + i0 = int(mflay) + if i0 + 1 >= mflist.model.modelgrid.nlay: + i0 = mflist.model.modelgrid.nlay - 1 + i1 = i0 + 1 + else: + i0 = 0 + i1 = mflist.model.modelgrid.nlay + # build filenames + package_name = mflist.package.name[0].upper() + filenames = ['{}_{}_StressPeriod{}_Layer{}.{}'.format( + filename_base, package_name, + kper + 1, k + 1, fext) + for k in range(i0, i1)] + + if names is None: + if key is None: + names = ['{}{} location stress period: {} layer: {}'.format( + model_name, mflist.package.name[0], kper + 1, k + 1) + for k in range(mflist.model.modelgrid.nlay)] + else: + names = ['{}{} {} stress period: {} layer: {}'.format( + model_name, mflist.package.name[0], + key, kper + 1, k + 1) + for k in range(mflist.model.modelgrid.nlay)] + + if key is None: + axes = PlotUtilities._plot_bc_helper(mflist.package, + kper, + names=names, + filenames=filenames, + mflay=mflay, **kwargs) + else: + arr_dict = mflist.to_array(kper, mask=True) + + try: + arr = arr_dict[key] + except: + err_msg = 'Cannot find key to plot\n' + err_msg += ' Provided key={}\n Available keys='.format(key) + for name, arr in arr_dict.items(): + err_msg += '{}, '.format(name) + err_msg += '\n' + raise PlotException(err_msg) + + axes = PlotUtilities._plot_array_helper(arr, + model=mflist.model, + names=names, + filenames=filenames, + mflay=mflay, + **kwargs) + return axes + + @staticmethod + def _plot_util2d_helper(util2d, title=None, filename_base=None, + file_extension=None, fignum=None, **kwargs): + """ + Plot 2-D model input data + + Parameters + ---------- + util2d : flopy.util.util_array.Util2d object + title : str + Plot title. If a plot title is not provide one will be + created based on data name (self.name). (default is None) + filename_base : str + Base file name that will be used to automatically generate file + names for output image files. Plots will be exported as image + files if file_name_base is not None. (default is None) + file_extension : str + Valid matplotlib.pyplot file extension for savefig(). Only used + if filename_base is not None. (default is 'png') + fignum : list + list of figure numbers + **kwargs : dict + axes : list of matplotlib.pyplot.axis + List of matplotlib.pyplot.axis that will be used to plot + data for each layer. If axes=None axes will be generated. + (default is None) + pcolor : bool + Boolean used to determine if matplotlib.pyplot.pcolormesh + plot will be plotted. (default is True) + colorbar : bool + Boolean used to determine if a color bar will be added to + the matplotlib.pyplot.pcolormesh. Only used if pcolor=True. + (default is False) + inactive : bool + Boolean used to determine if a black overlay in inactive + cells in a layer will be displayed. (default is True) + contour : bool + Boolean used to determine if matplotlib.pyplot.contour + plot will be plotted. (default is False) + clabel : bool + Boolean used to determine if matplotlib.pyplot.clabel + will be plotted. Only used if contour=True. (default is False) + grid : bool + Boolean used to determine if the model grid will be plotted + on the figure. (default is False) + masked_values : list + List of unique values to be excluded from the plot. + + Returns + ------- + axes : list + Empty list is returned if filename_base is not None. Otherwise + a list of matplotlib.pyplot.axis is returned. + + """ + model_name = "" + if "model_name" in kwargs: + model_name = kwargs.pop("model_name") + " " + + if title is None: + title = "{}{}".format(model_name, util2d.name) + + if file_extension is not None: + fext = file_extension + else: + fext = 'png' + + filename = None + if filename_base is not None: + filename = '{}_{}.{}'.format(filename_base, + util2d.name, fext) + + axes = PlotUtilities._plot_array_helper(util2d.array, + util2d.model, + names=title, + filenames=filename, + fignum=fignum, + **kwargs) + return axes + + @staticmethod + def _plot_util3d_helper(util3d, filename_base=None, + file_extension=None, mflay=None, + fignum=None, **kwargs): + """ + Plot 3-D model input data + + Parameters + ---------- + util3d : flopy.util.util_array.Util3d object + filename_base : str + Base file name that will be used to automatically generate file + names for output image files. Plots will be exported as image + files if file_name_base is not None. (default is None) + file_extension : str + Valid matplotlib.pyplot file extension for savefig(). Only used + if filename_base is not None. (default is 'png') + mflay : int + MODFLOW zero-based layer number to return. If None, then all + all layers will be included. (default is None) + fignum : list + list of figure numbers + **kwargs : dict + axes : list of matplotlib.pyplot.axis + List of matplotlib.pyplot.axis that will be used to plot + data for each layer. If axes=None axes will be generated. + (default is None) + pcolor : bool + Boolean used to determine if matplotlib.pyplot.pcolormesh + plot will be plotted. (default is True) + colorbar : bool + Boolean used to determine if a color bar will be added to + the matplotlib.pyplot.pcolormesh. Only used if pcolor=True. + (default is False) + inactive : bool + Boolean used to determine if a black overlay in inactive + cells in a layer will be displayed. (default is True) + contour : bool + Boolean used to determine if matplotlib.pyplot.contour + plot will be plotted. (default is False) + clabel : bool + Boolean used to determine if matplotlib.pyplot.clabel + will be plotted. Only used if contour=True. (default is False) + grid : bool + Boolean used to determine if the model grid will be plotted + on the figure. (default is False) + masked_values : list + List of unique values to be excluded from the plot. + + Returns + ------- + axes : list + Empty list is returned if filename_base is not None. Otherwise + a list of matplotlib.pyplot.axis is returned. + + """ + model_name = "" + if "model_name" in kwargs: + model_name = kwargs.pop('model_name') + + if file_extension is not None: + fext = file_extension + else: + fext = 'png' + + # flopy6 adaption + array = util3d.array + name = util3d.name + if isinstance(name, str): + name = [name] * array.shape[0] + + names = ['{}{} layer {}'.format(model_name, + name[k], k + 1) for k in + range(array.shape[0])] + + filenames = None + if filename_base is not None: + if mflay is not None: + i0 = int(mflay) + if i0 + 1 >= array.shape[0]: + i0 = array.shape[0] - 1 + i1 = i0 + 1 + else: + i0 = 0 + i1 = array.shape[0] + # build filenames, use local "name" variable (flopy6 adaptation) + filenames = ['{}_{}_Layer{}.{}'.format( + filename_base, name[k], + k + 1, fext) + for k in range(i0, i1)] + + axes = PlotUtilities._plot_array_helper(array, + util3d.model, + names=names, + filenames=filenames, + mflay=mflay, + fignum=fignum, + **kwargs) + return axes + + @staticmethod + def _plot_transient2d_helper(transient2d, filename_base=None, + file_extension=None, kper=0, + fignum=None, **kwargs): + """ + Plot transient 2-D model input data + + Parameters + ---------- + transient2d : flopy.utils.util_array.Transient2D object + filename_base : str + Base file name that will be used to automatically generate file + names for output image files. Plots will be exported as image + files if file_name_base is not None. (default is None) + file_extension : str + Valid matplotlib.pyplot file extension for savefig(). Only used + if filename_base is not None. (default is 'png') + kper : int + zero based stress period number + fignum : list + list of figure numbers + **kwargs : dict + axes : list of matplotlib.pyplot.axis + List of matplotlib.pyplot.axis that will be used to plot + data for each layer. If axes=None axes will be generated. + (default is None) + pcolor : bool + Boolean used to determine if matplotlib.pyplot.pcolormesh + plot will be plotted. (default is True) + colorbar : bool + Boolean used to determine if a color bar will be added to + the matplotlib.pyplot.pcolormesh. Only used if pcolor=True. + (default is False) + inactive : bool + Boolean used to determine if a black overlay in inactive + cells in a layer will be displayed. (default is True) + contour : bool + Boolean used to determine if matplotlib.pyplot.contour + plot will be plotted. (default is False) + clabel : bool + Boolean used to determine if matplotlib.pyplot.clabel + will be plotted. Only used if contour=True. (default is False) + grid : bool + Boolean used to determine if the model grid will be plotted + on the figure. (default is False) + masked_values : list + List of unique values to be excluded from the plot. + kper : str + MODFLOW zero-based stress period number to return. If + kper='all' then data for all stress period will be + extracted. (default is zero). + + Returns + ------- + axes : list + Empty list is returned if filename_base is not None. Otherwise + a list of matplotlib.pyplot.axis is returned. + + """ + if file_extension is not None: + fext = file_extension + else: + fext = 'png' + + if isinstance(kper, int): + k0 = kper + k1 = kper + 1 + + elif isinstance(kper, str): + if kper.lower() == "all": + k0 = 0 + k1 = transient2d.model.nper + + else: + k0 = int(kper) + k1 = k0 + 1 + + else: + k0 = int(kper) + k1 = k0 + 1 + + if fignum is not None: + if not isinstance(fignum, list): + fignum = list(fignum) + else: + fignum = list(range(k0, k1)) + + if 'mflay' in kwargs: + kwargs.pop('mflay') + + axes = [] + for idx, kper in enumerate(range(k0, k1)): + title = '{} stress period {:d}'.format( + transient2d.name.replace('_', '').upper(), + kper + 1) + + if filename_base is not None: + filename = filename_base + '_{:05d}.{}'.format(kper + 1, fext) + else: + filename = None + + axes.append(PlotUtilities._plot_array_helper( + transient2d.array[kper], + transient2d.model, + names=title, + filenames=filename, + fignum=fignum[idx], + **kwargs)) + return axes + + @staticmethod + def _plot_scalar_helper(scalar, filename_base=None, + file_extension=None, **kwargs): + """ + Helper method to plot scalar objects + + Parameters + ---------- + scalar : flopy.mf6.data.mfscalar object + filename_base : str + Base file name that will be used to automatically generate file + names for output image files. Plots will be exported as image + files if file_name_base is not None. (default is None) + file_extension : str + Valid matplotlib.pyplot file extension for savefig(). Only used + if filename_base is not None. (default is 'png') + + Returns + ------- + axes: list matplotlib.axes object + + """ + if file_extension is not None: + fext = file_extension + else: + fext = 'png' + + if 'mflay' in kwargs: + kwargs.pop('mflay') + + title = '{}'.format(scalar.name.replace('_', '').upper()) + + if filename_base is not None: + filename = filename_base + '.{}'.format(fext) + else: + filename = None + + axes = PlotUtilities._plot_array_helper(scalar.array, + scalar.model, + names=title, + filenames=filename, + **kwargs) + return axes + + @staticmethod + def _plot_array_helper(plotarray, model=None, modelgrid=None, axes=None, + names=None, filenames=None, fignum=None, + mflay=None, **kwargs): + """ + Helper method to plot array objects + + Parameters + ---------- + plotarray : np.array object + model: fp.modflow.Modflow object + optional if spatial reference is provided + modelgrid: fp.discretization.ModelGrid object + object that defines the spatial orientation of a modflow + grid within flopy. Optional if model object is provided + axes: matplotlib.axes object + existing matplotlib axis object to layer additional + plotting on to. Optional. + names: list + list of figure titles (optional) + filenames: list + list of filenames to save figures to (optional) + fignum: + list of figure numbers (optional) + mflay: int + modflow model layer + **kwargs: + keyword arguments + + Returns: + axes: list matplotlib.axes object + + """ + from .map import PlotMapView + + defaults = {'figsize': None, 'masked_values': None, + 'pcolor': True, 'inactive': True, + 'contour': False, 'clabel': False, + 'colorbar': False, 'grid': False, + 'levels': None, 'colors': "black", + 'dpi': None, 'fmt': "%1.3f", 'modelgrid': None} + + # check that matplotlib is installed + if plt is None: + err_msg = 'Could not import matplotlib. ' \ + 'Must install matplotlib ' + \ + ' in order to plot LayerFile data.' + raise PlotException(err_msg) + + for key in defaults: + if key in kwargs: + defaults[key] = kwargs.pop(key) + + plotarray = plotarray.astype(float) + + # test if this is vertex or structured grid + if model is not None: + grid_type = model.modelgrid.grid_type + hnoflo = model.hnoflo + hdry = model.hdry + if defaults['masked_values'] is None: + t = [] + if hnoflo is not None: + t.append(hnoflo) + if hdry is not None: + t.append(hdry) + if t: + defaults['masked_values'] = t + else: + if hnoflo is not None: + defaults['masked_values'].append(hnoflo) + if hdry is not None: + defaults['masked_values'].append(hdry) + + elif modelgrid is not None: + grid_type = modelgrid.grid_type + + else: + grid_type = "structured" + + ib = None + if modelgrid is not None: + if modelgrid.idomain is not None: + ib = modelgrid.idomain + + else: + if ib is None: + try: + ib = model.modelgrid.idomain + except: + pass + + # reshape 2d arrays to 3d for convenience + if len(plotarray.shape) == 2 and grid_type == "structured": + plotarray = plotarray.reshape((1, plotarray.shape[0], + plotarray.shape[1])) + + # setup plotting routines + # consider refactoring maxlay to nlay + maxlay = plotarray.shape[0] + i0, i1 = PlotUtilities._set_layer_range(mflay, maxlay) + names = PlotUtilities._set_names(names, maxlay) + filenames = PlotUtilities._set_names(filenames, maxlay) + fignum = PlotUtilities._set_fignum(fignum, maxlay, i0, i1) + axes = PlotUtilities._set_axes(axes, mflay, maxlay, i0, i1, + defaults, names, fignum) + + for idx, k in enumerate(range(i0, i1)): + fig = plt.figure(num=fignum[idx]) + pmv = PlotMapView(ax=axes[idx], model=model, + modelgrid=modelgrid, layer=k) + if defaults['pcolor']: + cm = pmv.plot_array(plotarray[k], + masked_values=defaults['masked_values'], + ax=axes[idx], **kwargs) + + if defaults['colorbar']: + label = '' + if not isinstance(defaults['colorbar'], bool): + label = str(defaults['colorbar']) + plt.colorbar(cm, ax=axes[idx], shrink=0.5, label=label) + + if defaults['contour']: + cl = pmv.contour_array(plotarray[k], + masked_values=defaults['masked_values'], + ax=axes[idx], + colors=defaults['colors'], + levels=defaults['levels'], + **kwargs) + if defaults['clabel']: + axes[idx].clabel(cl, fmt=defaults['fmt'],**kwargs) + + if defaults['grid']: + pmv.plot_grid(ax=axes[idx]) + + if defaults['inactive']: + if ib is not None: + pmv.plot_inactive(ibound=ib, ax=axes[idx]) + + if len(axes) == 1: + axes = axes[0] + + if filenames is not None: + for idx, k in enumerate(range(i0, i1)): + fig = plt.figure(num=fignum[idx]) + fig.savefig(filenames[idx], dpi=defaults['dpi']) + print(' created...{}'.format(os.path.basename(filenames[idx]))) + # there will be nothing to return when done + axes = None + plt.close('all') + + return axes + + @staticmethod + def _plot_bc_helper(package, kper, + axes=None, names=None, filenames=None, fignum=None, + mflay=None, **kwargs): + """ + Helper method to plot bc objects from flopy packages + + Parameters + ---------- + package : flopy.pakbase.Package objects + kper : int + zero based stress period number + axes: matplotlib.axes object + existing matplotlib axis object to layer additional + plotting on to. Optional. + names: list + list of figure titles (optional) + filenames: list + list of filenames to save figures to (optional) + fignum: + list of figure numbers (optional) + mflay: int + modflow model layer + **kwargs: + keyword arguments + + Returns + ------- + axes: list matplotlib.axes object + """ + + from .map import PlotMapView + + if plt is None: + s = 'Could not import matplotlib. Must install matplotlib ' +\ + ' in order to plot boundary condition data.' + raise PlotException(s) + + defaults = {'figsize': None, "inactive": True, + 'grid': False, "dpi": None, + "masked_values": None} + + # parse kwargs + for key in defaults: + if key in kwargs: + defaults[key] = kwargs.pop(key) + + ftype = package.name[0] + + color = "black" + if "CHD" in ftype.upper(): + color = bc_color_dict[ftype.upper()[:3]] + + # flopy-modflow vs. flopy-modflow6 trap + try: + model = package.parent + except AttributeError: + model = package._model_or_sim + + nlay = model.modelgrid.nlay + + # set up plotting routines + i0, i1 = PlotUtilities._set_layer_range(mflay, nlay) + names = PlotUtilities._set_names(names, nlay) + filenames = PlotUtilities._set_names(filenames, i1 - i0) + fignum = PlotUtilities._set_fignum(fignum, i1 - i0, i0, i1) + axes = PlotUtilities._set_axes(axes, mflay, nlay, i0, i1, + defaults, names, fignum) + + for idx, k in enumerate(range(i0, i1)): + pmv = PlotMapView(ax=axes[idx], model=model, layer=k) + fig = plt.figure(num=fignum[idx]) + pmv.plot_bc(ftype=ftype, package=package, kper=kper, ax=axes[idx], + color=color) + + if defaults['grid']: + pmv.plot_grid(ax=axes[idx]) + + if defaults['inactive']: + if model.modelgrid is not None: + ib = model.modelgrid.idomain + if ib is not None: + pmv.plot_inactive(ibound=ib, ax=axes[idx]) + + if len(axes) == 1: + axes = axes[0] + + if filenames is not None: + for idx, k in enumerate(range(i0, i1)): + fig = plt.figure(num=fignum[idx]) + fig.savefig(filenames[idx], dpi=defaults['dpi']) + plt.close(fignum[idx]) + print(' created...{}'.format(os.path.basename(filenames[idx]))) + # there will be nothing to return when done + axes = None + plt.close('all') + + return axes + + @staticmethod + def _set_layer_range(mflay, maxlay): + """ + Re-usable method to check for mflay and set + the range of plottable layers + + Parameters + ---------- + mflay : int + zero based layer number + maxlay : int + maximum number of layers in the plotting array + + Returns + ------- + i0, i1 : int, int + minimum and maximum bounds on the layer range + + """ + if mflay is not None: + i0 = int(mflay) + if i0+1 >= maxlay: + i0 = maxlay - 1 + i1 = i0 + 1 + else: + i0 = 0 + i1 = maxlay + + return i0, i1 + + @staticmethod + def _set_names(names, maxlay): + """ + Checks the supplied name variable for shape + + Parameters + ---------- + names : list of str + if names is not none, asserts that there is + a name supplied for each plot that will be + generated + + maxlay : int + maximum number of layers in the plotting array + + Returns + ------- + names : list or None + list of names or None + + """ + if names is not None: + if not isinstance(names, list): + if maxlay > 1: + names = ["{} layer {}".format(names, i + 1) + for i in range(maxlay)] + else: + names = [names] + assert len(names) == maxlay + return names + + @staticmethod + def _set_fignum(fignum, maxlay, i0, i1): + """ + Method to generate a list of matplotlib figure + numbers to join to figure objects. Checks + for existing figures. + + Parameters + ---------- + fignum : list + list of figure numbers + maxlay : int + maximum number of layers in the plotting array + i0 : int + minimum layer range + i1 : int + maximum layer range + + Returns + ------- + fignum : list + + """ + if fignum is not None: + if not isinstance(fignum, list): + fignum = [fignum] + assert len(fignum) == maxlay + # check for existing figures + f0 = fignum[0] + for i in plt.get_fignums(): + if i >= f0: + f0 = i + 1 + finc = f0 - fignum[0] + for idx, _ in enumerate(fignum): + fignum[idx] += finc + else: + # check for existing figures + f0 = 0 + for i in plt.get_fignums(): + if i >= f0: + f0 += 1 + f1 = f0 + (i1 - i0) + fignum = np.arange(f0, f1) + + return fignum + + @staticmethod + def _set_axes(axes, mflay, maxlay, i0, i1, + defaults, names, fignum): + """ + Method to prepare axes objects for plotting + + Parameters + ---------- + axes : list + matplotlib.axes objects + mflay : int + layer to plot or None + i0 : int + minimum range of layers to plot + i1 : int + maximum range of layers to plot + defaults : dict + the default dictionary from the parent plotting method + fignum : list + list of figure numbers + + Returns + ------- + axes : list + matplotlib.axes objects + + """ + if axes is not None: + if not isinstance(axes, list): + axes = [axes] + assert len(axes) == maxlay + + else: + # prepare some axis objects for use + axes = [] + for idx, k in enumerate(range(i0, i1)): + plt.figure(figsize=defaults['figsize'], + num=fignum[idx]) + ax = plt.subplot(1, 1, 1, aspect='equal') + if names is not None: + title = names[k] + else: + klay = k + if mflay is not None: + klay = int(mflay) + title = '{} Layer {}'.format('data', klay+1) + ax.set_title(title) + axes.append(ax) + + return axes + + @staticmethod + def saturated_thickness(head, top, botm, laytyp, mask_values=None): + """ + Calculate the saturated thickness. + + Parameters + ---------- + head : numpy.ndarray + head array + top : numpy.ndarray + top array of shape (nrow, ncol) + botm : numpy.ndarray + botm array of shape (nlay, nrow, ncol) + laytyp : numpy.ndarray + confined (0) or convertible (1) of shape (nlay) + mask_values : list of floats + If head is one of these values, then set sat to top - bot + + Returns + ------- + sat_thk : numpy.ndarray + Saturated thickness of shape (nlay, nrow, ncol). + + """ + if head.ndim == 3: + head = np.copy(head) + nlay, nrow, ncol = head.shape + ncpl = nrow * ncol + head.shape = (nlay, ncpl) + top.shape = (ncpl,) + botm.shape = (nlay, ncpl) + if laytyp.ndim == 3: + laytyp.shape = (nlay, ncpl) + + else: + nrow, ncol = None, None + nlay, ncpl = head.shape + + # cast a laytyp flag for each cell if modflow-2005 based, + # which makes it consistent with the mf6 iconvert array + if laytyp.ndim == 1: + t = np.zeros(head.shape) + for ix, _ in enumerate(laytyp): + t[ix, :] = laytyp[ix] + laytyp = t + del t + + sat_thk_conf = np.empty(head.shape, dtype=head.dtype) + sat_thk_unconf = np.empty(head.shape, dtype=head.dtype) + + for k in range(nlay): + if k == 0: + t = top + else: + t = botm[k - 1, :] + sat_thk_conf[k, :] = t - botm[k, :] + + for k in range(nlay): + dh = np.zeros((ncpl,), dtype=head.dtype) + s = sat_thk_conf[k, :] + + for mv in mask_values: + idx = (head[k, :] == mv) + dh[idx] = s[idx] + + if k == 0: + t = top + else: + t = botm[k - 1, :] + + t = np.where(head[k, :] > t, t, head[k, :]) + dh = np.where(dh == 0, t - botm[k, :], dh) + sat_thk_unconf[k, :] = dh[:] + + sat_thk = np.where(laytyp != 0, sat_thk_unconf, sat_thk_conf) + + if nrow is not None and ncol is not None: + sat_thk.shape = (nlay, nrow, ncol) + + return sat_thk + + @staticmethod + def centered_specific_discharge(Qx, Qy, Qz, delr, delc, sat_thk): + """ + DEPRECATED. Use postprocessing.get_specific_discharge() instead. + + Using the MODFLOW discharge, calculate the cell centered specific discharge + by dividing by the flow width and then averaging to the cell center. + + Parameters + ---------- + Qx : numpy.ndarray + MODFLOW 'flow right face' + Qy : numpy.ndarray + MODFLOW 'flow front face'. The sign on this array will be flipped + by this function so that the y axis is positive to north. + Qz : numpy.ndarray + MODFLOW 'flow lower face'. The sign on this array will be flipped by + this function so that the z axis is positive in the upward direction. + delr : numpy.ndarray + MODFLOW delr array + delc : numpy.ndarray + MODFLOW delc array + sat_thk : numpy.ndarray + Saturated thickness for each cell + + Returns + ------- + (qx, qy, qz) : tuple of numpy.ndarrays + Specific discharge arrays that have been interpolated to cell centers. + + """ + import warnings + warnings.warn('centered_specific_discharge() has been deprecated. Use ' + 'postprocessing.get_specific_discharge() instead.', + DeprecationWarning) + + qx = None + qy = None + qz = None + + if Qx is not None: + + nlay, nrow, ncol = Qx.shape + qx = np.zeros(Qx.shape, dtype=Qx.dtype) + + for k in range(nlay): + for j in range(ncol - 1): + area = delc[:] * 0.5 * (sat_thk[k, :, j] + sat_thk[k, :, j + 1]) + idx = area > 0. + qx[k, idx, j] = Qx[k, idx, j] / area[idx] + + qx[:, :, 1:] = 0.5 * (qx[:, :, 0:ncol - 1] + qx[:, :, 1:ncol]) + qx[:, :, 0] = 0.5 * qx[:, :, 0] + + if Qy is not None: + + nlay, nrow, ncol = Qy.shape + qy = np.zeros(Qy.shape, dtype=Qy.dtype) + + for k in range(nlay): + for i in range(nrow - 1): + area = delr[:] * 0.5 * (sat_thk[k, i, :] + sat_thk[k, i + 1, :]) + idx = area > 0. + qy[k, i, idx] = Qy[k, i, idx] / area[idx] + + qy[:, 1:, :] = 0.5 * (qy[:, 0:nrow - 1, :] + qy[:, 1:nrow, :]) + qy[:, 0, :] = 0.5 * qy[:, 0, :] + qy = -qy + + if Qz is not None: + qz = np.zeros(Qz.shape, dtype=Qz.dtype) + dr = delr.reshape((1, delr.shape[0])) + dc = delc.reshape((delc.shape[0], 1)) + area = dr * dc + for k in range(nlay): + qz[k, :, :] = Qz[k, :, :] / area[:, :] + qz[1:, :, :] = 0.5 * (qz[0:nlay - 1, :, :] + qz[1:nlay, :, :]) + qz[0, :, :] = 0.5 * qz[0, :, :] + qz = -qz + + return (qx, qy, qz) + + +class UnstructuredPlotUtilities(object): + """ + Collection of unstructured grid and vertex grid compatible + plotting helper functions + """ + + @staticmethod + def line_intersect_grid(ptsin, xgrid, ygrid): + """ + Uses cross product method to find which cells intersect with the + line and then uses the parameterized line equation to caluculate + intersection x, y vertex points. Should be quite fast for large model + grids! + + Parameters + ---------- + pts : list + list of tuple line vertex pairs (ex. [(1, 0), (10, 0)] + xgrid : np.array + model grid x vertices + ygrid : np.array + model grid y vertices + + Returns + ------- + vdict : dict of cell vertices + + """ + # make sure xedge and yedge are numpy arrays + if not isinstance(xgrid, np.ndarray): + xgrid = np.array(xgrid) + if not isinstance(ygrid, np.ndarray): + ygrid = np.array(ygrid) + + npts = len(ptsin) + + # use a vector cross product to find which + # cells intersect the line + vdict = {} + for ix in range(1, npts): + xmin = np.min([ptsin[ix - 1][0], ptsin[ix][0]]) + xmax = np.max([ptsin[ix - 1][0], ptsin[ix][0]]) + ymin = np.min([ptsin[ix - 1][1], ptsin[ix][1]]) + ymax = np.max([ptsin[ix - 1][1], ptsin[ix][1]]) + x1 = np.ones(xgrid.shape) * ptsin[ix - 1][0] + y1 = np.ones(ygrid.shape) * ptsin[ix - 1][1] + x2 = np.ones(xgrid.shape) * ptsin[ix][0] + y2 = np.ones(ygrid.shape) * ptsin[ix][1] + x3 = xgrid + y3 = ygrid + x4 = np.zeros(xgrid.shape) + y4 = np.zeros(ygrid.shape) + x4[:, :-1] = xgrid[:, 1:] + x4[:, -1] = xgrid[:, 0] + y4[:, :-1] = ygrid[:, 1:] + y4[:, -1] = ygrid[:, 0] + + # find where intersection is + v1 = [x2 - x1, y2 - y1] + v2 = [x2 - x3, y2 - y3] + xp = v1[0] * v2[1] - v1[1] * v2[0] + + # loop finds which edges the line intersects + cells = [] + cell_vertex_ix = [] + for cell, cpv in enumerate(xp): + if np.all([t < 0 for t in cpv]): + continue + elif np.all([t > 0 for t in cpv]): + continue + + else: + # only cycle through the cells that intersect + # the infinite line + cvert_ix = [] + for ix in range(len(cpv)): + if cpv[ix - 1] < 0 and cpv[ix] > 0: + cvert_ix.append(ix - 1) + elif cpv[ix -1] > 0 and cpv[ix] < 0: + cvert_ix.append(ix - 1) + elif cpv[ix - 1] == 0 and cpv[ix] == 0: + cvert_ix += [ix - 1, ix] + else: + pass + + if cvert_ix: + cells.append(cell) + cell_vertex_ix.append(cvert_ix) + + # find interesection vertices + numa = (x4 - x3) * (y1 - y3) - (y4 - y3) * (x1 - x3) + numb = (x2 - x1) * (y1 - y3) - (y2 - y1) * (x1 - x3) + denom = (y4 - y3) * (x2 - x1) - (x4 - x3) * (y2 - y1) + ua = numa / denom + # ub = numb / denom + del numa + del numb + del denom + + x = x1 + ua * (x2 - x1) + y = y1 + ua * (y2 - y1) + + for ix, cell in enumerate(cells): + xc = x[cell] + yc = y[cell] + verts = [(xt, yt) for xt, yt in + zip(xc[cell_vertex_ix[ix]], + yc[cell_vertex_ix[ix]])] + + if cell in vdict: + for i in verts: + # finally check that verts are + # within the line segment range + if i[0] < xmin or i[0] > xmax: + continue + elif i[1] < ymin or i[1] > ymax: + continue + elif i in vdict[cell]: + continue + elif np.isnan(i[0]) or np.isinf(i[0]) \ + or np.isinf(i[1]) or np.isnan(i[1]): + continue + else: + vdict[cell].append(i) + else: + # finally check that verts are + # within the line segment range + t = [] + for i in verts: + if i[0] < xmin or i[0] > xmax: + continue + elif i[1] < ymin or i[1] > ymax: + continue + elif i in t: + continue + elif np.isnan(i[0]) or np.isinf(i[0]) \ + or np.isinf(i[1]) or np.isnan(i[1]): + continue + else: + t.append(i) + + if t: + vdict[cell] = t + + return vdict + + @staticmethod + def irregular_shape_patch(xverts, yverts): + """ + Patch for vertex cross section plotting when + we have an irregular shape type throughout the + model grid or multiple shape types. + + Parameters + ---------- + xverts : list + xvertices + yverts : list + yvertices + + Returns + ------- + xverts, yverts as np.ndarray + + """ + max_verts = 0 + + for xv in xverts: + if len(xv) > max_verts: + max_verts = len(xv) + + for yv in yverts: + if len(yv) > max_verts: + max_verts = len(yv) + + adj_xverts = [] + for xv in xverts: + if len(xv) < max_verts: + n = max_verts - len(xv) + adj_xverts.append(xv + [xv[-1]] * n) + else: + adj_xverts.append(xv) + + adj_yverts = [] + for yv in yverts: + if len(yv) < max_verts: + n = max_verts - len(yv) + adj_yverts.append(yv + [yv[-1]] * n) + else: + adj_yverts.append(yv) + + xverts = np.array(adj_xverts) + yverts = np.array(adj_yverts) + + return xverts, yverts + + @staticmethod + def arctan2(verts): + """ + Reads 2 dimensional set of verts and orders them using the arctan 2 method + + Parameters + ---------- + verts : np.array of floats + Nx2 array of verts + + Returns + ------- + verts : np.array of float + Nx2 array of verts + + """ + center = verts.mean(axis=0) + x = verts.T[0] - center[0] + z = verts.T[1] - center[1] + + angles = np.arctan2(z, x) * 180 / np.pi + angleidx = angles.argsort() + + verts = verts[angleidx] + return verts + + +class SwiConcentration(): + """ + The binary_header class is a class to create headers for MODFLOW + binary files + + """ + def __init__(self, model=None, botm=None, istrat=1, nu=None): + if model is None: + if isinstance(botm, list): + botm = np.array(botm) + self.__botm = botm + if isinstance(nu, list): + nu = np.array(nu) + self.__nu = nu + self.__istrat = istrat + if istrat == 1: + self.__nsrf = self.nu.shape - 1 + else: + self.__nsrf = self.nu.shape - 2 + else: + try: + dis = model.get_package('DIS') + except: + sys.stdout.write('Error: DIS package not available.\n') + self.__botm = np.zeros((dis.nlay+1, dis.nrow, dis.ncol), np.float) + self.__botm[0, :, :] = dis.top.array + self.__botm[1:, :, :] = dis.botm.array + try: + swi = model.get_package('SWI2') + self.__nu = swi.nu.array + self.__istrat = swi.istrat + self.__nsrf = swi.nsrf + except (AttributeError, ValueError): + sys.stdout.write('Error: SWI2 package not available...\n') + self.__nlay = self.__botm.shape[0] - 1 + self.__nrow = self.__botm[0, :, :].shape[0] + self.__ncol = self.__botm[0, :, :].shape[1] + self.__b = self.__botm[0:-1, :, :] - self.__botm[1:, :, :] + + def calc_conc(self, zeta, layer=None): + """ + Calculate concentrations for a given time step using passed zeta. + + Parameters + ---------- + zeta : dictionary of numpy arrays + Dictionary of zeta results. zeta keys are zero-based zeta surfaces. + layer : int + Concentration will be calculated for the specified layer. If layer + is None, then the concentration will be calculated for all layers. + (default is None). + + Returns + ------- + conc : numpy array + Calculated concentration. + + Examples + -------- + + >>> import flopy + >>> m = flopy.modflow.Modflow.load('test') + >>> c = flopy.plot.SwiConcentration(model=m) + >>> conc = c.calc_conc(z, layer=0) + + """ + conc = np.zeros((self.__nlay, self.__nrow, self.__ncol), np.float) + + pct = {} + for isrf in range(self.__nsrf): + z = zeta[isrf] + pct[isrf] = (self.__botm[:-1, :, :] - z[:, :, :]) / self.__b[:, :, :] + for isrf in range(self.__nsrf): + p = pct[isrf] + if self.__istrat == 1: + conc[:, :, :] += self.__nu[isrf] * p[:, :, :] + if isrf+1 == self.__nsrf: + conc[:, :, :] += self.__nu[isrf+1] * (1. - p[:, :, :]) + #TODO linear option + if layer is None: + return conc + else: + return conc[layer, :, :] + + +def shapefile_extents(shp): + """ + Determine the extents of a shapefile + + Parameters + ---------- + shp : string + Name of the shapefile to convert to a PatchCollection. + + Returns + ------- + extents : tuple + tuple with xmin, xmax, ymin, ymax from shapefile. + + Examples + -------- + + >>> import flopy + >>> fshp = 'myshapefile' + >>> extent = flopy.plot.plotutil.shapefile_extents(fshp) + + """ + if shapefile is None: + s = 'Could not import shapefile. Must install pyshp in order to plot shapefiles.' + raise PlotException(s) + + sf = shapefile.Reader(shp) + shapes = sf.shapes() + nshp = len(shapes) + xmin, xmax, ymin, ymax = 1.e20, -1.e20, 1.e20, -1.e20 + + for n in range(nshp): + for p in shapes[n].points: + xmin, xmax = min(xmin, p[0]), max(xmax, p[0]) + ymin, ymax = min(ymin, p[1]), max(ymax, p[1]) + return xmin, xmax, ymin, ymax + + +def shapefile_get_vertices(shp): + """ + Get vertices for the features in a shapefile + + Parameters + ---------- + shp : string + Name of the shapefile to extract shapefile feature vertices. + + Returns + ------- + vertices : list + Vertices is a list with vertices for each feature in the shapefile. + Individual feature vertices are x, y tuples and contained in a list. + A list with a single x, y tuple is returned for point shapefiles. A + list with multiple x, y tuples is returned for polyline and polygon + shapefiles. + + Examples + -------- + + >>> import flopy + >>> fshp = 'myshapefile' + >>> lines = flopy.plot.plotutil.shapefile_get_vertices(fshp) + + """ + if shapefile is None: + s = 'Could not import shapefile. Must install pyshp in order to plot shapefiles.' + raise PlotException(s) + + sf = shapefile.Reader(shp) + shapes = sf.shapes() + nshp = len(shapes) + vertices = [] + for n in range(nshp): + st = shapes[n].shapeType + if st in [1, 8, 11, 21]: + #points + for p in shapes[n].points: + vertices.append([(p[0], p[1])]) + elif st in [3, 13, 23]: + #line + line = [] + for p in shapes[n].points: + line.append((p[0], p[1])) + line = np.array(line) + vertices.append(line) + elif st in [5, 25, 31]: + #polygons + pts = np.array(shapes[n].points) + prt = shapes[n].parts + par = list(prt) + [pts.shape[0]] + for pij in range(len(prt)): + vertices.append(pts[par[pij]:par[pij+1]]) + return vertices + + +def shapefile_to_patch_collection(shp, radius=500., idx=None): + """ + Create a patch collection from the shapes in a shapefile + + Parameters + ---------- + shp : string + Name of the shapefile to convert to a PatchCollection. + radius : float + Radius of circle for points in the shapefile. (Default is 500.) + idx : iterable int + A list or array that contains shape numbers to include in the + patch collection. Return all shapes if not specified. + + Returns + ------- + pc : matplotlib.collections.PatchCollection + Patch collection of shapes in the shapefile + + """ + if shapefile is None: + s = 'Could not import shapefile. Must install pyshp in order to plot shapefiles.' + raise PlotException(s) + + from matplotlib.patches import Polygon, Circle, Path, PathPatch + from matplotlib.collections import PatchCollection + if isinstance(shp, str): + sf = shapefile.Reader(shp) + else: + sf = shp + shapes = sf.shapes() + nshp = len(shapes) + ptchs = [] + if idx is None: + idx = range(nshp) + for n in idx: + st = shapes[n].shapeType + if st in [1, 8, 11, 21]: + # points + for p in shapes[n].points: + ptchs.append(Circle( (p[0], p[1]), radius=radius)) + elif st in [3, 13, 23]: + # line + vertices = [] + for p in shapes[n].points: + vertices.append([p[0], p[1]]) + vertices = np.array(vertices) + path = Path(vertices) + ptchs.append(PathPatch(path, fill=False)) + elif st in [5, 25, 31]: + # polygons + pts = np.array(shapes[n].points) + prt = shapes[n].parts + par = list(prt) + [pts.shape[0]] + for pij in range(len(prt)): + ptchs.append(Polygon(pts[par[pij]:par[pij+1]])) + pc = PatchCollection(ptchs) + return pc + + +def plot_shapefile(shp, ax=None, radius=500., cmap='Dark2', + edgecolor='scaled', facecolor='scaled', + a=None, masked_values=None, idx=None, **kwargs): + """ + Generic function for plotting a shapefile. + + Parameters + ---------- + shp : string + Name of the shapefile to plot. + ax : matplolib.pyplot.axes object + + radius : float + Radius of circle for points. (Default is 500.) + cmap : string + Name of colormap to use for polygon shading (default is 'Dark2') + edgecolor : string + Color name. (Default is 'scaled' to scale the edge colors.) + facecolor : string + Color name. (Default is 'scaled' to scale the face colors.) + a : numpy.ndarray + Array to plot. + masked_values : iterable of floats, ints + Values to mask. + idx : iterable int + A list or array that contains shape numbers to include in the + patch collection. Return all shapes if not specified. + kwargs : dictionary + Keyword arguments that are passed to PatchCollection.set(``**kwargs``). + Some common kwargs would be 'linewidths', 'linestyles', 'alpha', etc. + + Returns + ------- + pc : matplotlib.collections.PatchCollection + + Examples + -------- + + """ + + if shapefile is None: + s = 'Could not import shapefile. Must install pyshp in order to plot shapefiles.' + raise PlotException(s) + + if 'vmin' in kwargs: + vmin = kwargs.pop('vmin') + else: + vmin = None + + if 'vmax' in kwargs: + vmax = kwargs.pop('vmax') + else: + vmax = None + + if ax is None: + ax = plt.gca() + cm = plt.get_cmap(cmap) + pc = shapefile_to_patch_collection(shp, radius=radius, idx=idx) + pc.set(**kwargs) + if a is None: + nshp = len(pc.get_paths()) + cccol = cm(1. * np.arange(nshp) / nshp) + if facecolor == 'scaled': + pc.set_facecolor(cccol) + else: + pc.set_facecolor(facecolor) + if edgecolor == 'scaled': + pc.set_edgecolor(cccol) + else: + pc.set_edgecolor(edgecolor) + else: + pc.set_cmap(cm) + if masked_values is not None: + for mval in masked_values: + a = np.ma.masked_equal(a, mval) + if edgecolor == 'scaled': + pc.set_edgecolor('none') + else: + pc.set_edgecolor(edgecolor) + pc.set_array(a) + pc.set_clim(vmin=vmin, vmax=vmax) + # add the patch collection to the axis + ax.add_collection(pc) + return pc + + +def cvfd_to_patch_collection(verts, iverts): + """ + Create a patch collection from control volume vertices and incidence list + + Parameters + ---------- + verts : ndarray + 2d array of x and y points. + iverts : list of lists + should be of len(ncells) with a list of vertex numbers for each cell + + """ + from matplotlib.patches import Polygon + from matplotlib.collections import PatchCollection + ptchs = [] + for ivertlist in iverts: + points = [] + for iv in ivertlist: + points.append((verts[iv, 0], verts[iv, 1])) + # close the polygon, if necessary + if ivertlist[0] != ivertlist[-1]: + iv = ivertlist[0] + points.append((verts[iv, 0], verts[iv, 1])) + ptchs.append(Polygon(points)) + pc = PatchCollection(ptchs) + return pc + + +def plot_cvfd(verts, iverts, ax=None, layer=0, cmap='Dark2', + edgecolor='scaled', facecolor='scaled', a=None, + masked_values=None, **kwargs): + """ + Generic function for plotting a control volume finite difference grid of + information. + + Parameters + ---------- + verts : ndarray + 2d array of x and y points. + iverts : list of lists + should be of len(ncells) with a list of vertex number for each cell + ax : matplotlib.pylot axis + matplotlib.pyplot axis instance. Default is None + layer : int + layer to extract. Used in combination to the optional ncpl + parameter. Default is 0 + cmap : string + Name of colormap to use for polygon shading (default is 'Dark2') + edgecolor : string + Color name. (Default is 'scaled' to scale the edge colors.) + facecolor : string + Color name. (Default is 'scaled' to scale the face colors.) + a : numpy.ndarray + Array to plot. + masked_values : iterable of floats, ints + Values to mask. + kwargs : dictionary + Keyword arguments that are passed to PatchCollection.set(``**kwargs``). + Some common kwargs would be 'linewidths', 'linestyles', 'alpha', etc. + + Returns + ------- + pc : matplotlib.collections.PatchCollection + + Examples + -------- + + """ + import matplotlib.pyplot as plt + + if 'vmin' in kwargs: + vmin = kwargs.pop('vmin') + else: + vmin = None + + if 'vmax' in kwargs: + vmax = kwargs.pop('vmax') + else: + vmax = None + + if 'ncpl' in kwargs: + nlay = layer + 1 + ncpl = kwargs.pop('ncpl') + if isinstance(ncpl, int): + i = int(ncpl) + ncpl = np.ones((nlay), dtype=np.int) * i + elif isinstance(ncpl, list) or isinstance(ncpl, tuple): + ncpl = np.array(ncpl) + i0 = 0 + i1 = 0 + for k in range(nlay): + i0 = i1 + i1 = i0 + ncpl[k] + # retain iverts in selected layer + iverts = iverts[i0:i1] + # retain vertices in selected layer + tverts = [] + for iv in iverts: + for iloc in iv: + tverts.append((verts[iloc, 0], verts[iloc, 1])) + verts = np.array(tverts) + # calculate offset for starting vertex in layer based on + # global vertex numbers + iadj = iverts[0][0] + # reset iverts to relative vertices in selected layer + tiverts = [] + for iv in iverts: + i = [] + for t in iv: + i.append(t-iadj) + tiverts.append(i) + iverts = tiverts + else: + i0 = 0 + i1 = len(iverts) + + # get current axis + if ax is None: + ax = plt.gca() + cm = plt.get_cmap(cmap) + + pc = cvfd_to_patch_collection(verts, iverts) + pc.set(**kwargs) + + # set colors + if a is None: + nshp = len(pc.get_paths()) + cccol = cm(1. * np.arange(nshp) / nshp) + if facecolor == 'scaled': + pc.set_facecolor(cccol) + else: + pc.set_facecolor(facecolor) + if edgecolor == 'scaled': + pc.set_edgecolor(cccol) + else: + pc.set_edgecolor(edgecolor) + else: + pc.set_cmap(cm) + if masked_values is not None: + for mval in masked_values: + a = np.ma.masked_equal(a, mval) + + # add NaN values to mask + a = np.ma.masked_where(np.isnan(a), a) + + if edgecolor == 'scaled': + pc.set_edgecolor('none') + else: + pc.set_edgecolor(edgecolor) + pc.set_array(a[i0:i1]) + pc.set_clim(vmin=vmin, vmax=vmax) + # add the patch collection to the axis + ax.add_collection(pc) + return pc + + +def findrowcolumn(pt, xedge, yedge): + """ + Find the MODFLOW cell containing the x- and y- point provided. + + Parameters + ---------- + pt : list or tuple + A list or tuple containing a x- and y- coordinate + xedge : numpy.ndarray + x-coordinate of the edge of each MODFLOW column. xedge is dimensioned + to NCOL + 1. If xedge is not a numpy.ndarray it is converted to a + numpy.ndarray. + yedge : numpy.ndarray + y-coordinate of the edge of each MODFLOW row. yedge is dimensioned + to NROW + 1. If yedge is not a numpy.ndarray it is converted to a + numpy.ndarray. + + Returns + ------- + irow, jcol : int + Row and column location containing x- and y- point passed to function. + + Examples + -------- + >>> import flopy + >>> irow, jcol = flopy.plotutil.findrowcolumn(pt, xedge, yedge) + + """ + + # make sure xedge and yedge are numpy arrays + if not isinstance(xedge, np.ndarray): + xedge = np.array(xedge) + if not isinstance(yedge, np.ndarray): + yedge = np.array(yedge) + + # find column + jcol = -100 + for jdx, xmf in enumerate(xedge): + if xmf > pt[0]: + jcol = jdx - 1 + break + + # find row + irow = -100 + for jdx, ymf in enumerate(yedge): + if ymf < pt[1]: + irow = jdx - 1 + break + return irow, jcol + + +def line_intersect_grid(ptsin, xedge, yedge, returnvertices=False): + """ + Intersect a list of polyline vertices with a rectilinear MODFLOW + grid. Vertices at the intersection of the polyline with the grid + cell edges is returned. Optionally the original polyline vertices + are returned. + + Parameters + ---------- + ptsin : list + A list of x, y points defining the vertices of a polyline that will be + intersected with the rectilinear MODFLOW grid + xedge : numpy.ndarray + x-coordinate of the edge of each MODFLOW column. xedge is dimensioned + to NCOL + 1. If xedge is not a numpy.ndarray it is converted to a + numpy.ndarray. + yedge : numpy.ndarray + y-coordinate of the edge of each MODFLOW row. yedge is dimensioned + to NROW + 1. If yedge is not a numpy.ndarray it is converted to a + numpy.ndarray. + returnvertices: bool + Return the original polyline vertices in the list of numpy.ndarray + containing vertices resulting from intersection of the provided + polygon and the MODFLOW model grid if returnvertices=True. + (default is False). + + Returns + ------- + (x, y, dlen) : numpy.ndarray of tuples + numpy.ndarray of tuples containing the x, y, and segment length of the + intersection of the provided polyline with the rectilinear MODFLOW + grid. + + Examples + -------- + >>> import flopy + >>> ptsout = flopy.plotutil.line_intersect_grid(ptsin, xedge, yedge) + + """ + + small_value = 1.0e-4 + + # make sure xedge and yedge are numpy arrays + if not isinstance(xedge, np.ndarray): + xedge = np.array(xedge) + if not isinstance(yedge, np.ndarray): + yedge = np.array(yedge) + + # build list of points along current line + pts = [] + npts = len(ptsin) + dlen = 0. + for idx in range(1, npts): + x0 = ptsin[idx - 1][0] + x1 = ptsin[idx][0] + y0 = ptsin[idx - 1][1] + y1 = ptsin[idx][1] + a = x1 - x0 + b = y1 - y0 + c = math.sqrt(math.pow(a, 2.) + math.pow(b, 2.)) + # find cells with (x0, y0) and (x1, y1) + irow0, jcol0 = findrowcolumn((x0, y0), xedge, yedge) + irow1, jcol1 = findrowcolumn((x1, y1), xedge, yedge) + # determine direction to go in the x- and y-directions + jx = 0 + incx = abs(small_value * a / c) + iy = 0 + incy = -abs(small_value * b / c) + if a == 0.: + incx = 0. + # go to the right + elif a > 0.: + jx = 1 + incx *= -1. + if b == 0.: + incy = 0. + # go down + elif b < 0.: + iy = 1 + incy *= -1. + # process data + if irow0 >= 0 and jcol0 >= 0: + iadd = True + if idx > 1 and returnvertices: + iadd = False + if iadd: + pts.append((x0, y0, dlen)) + icnt = 0 + while True: + icnt += 1 + dx = xedge[jcol0 + jx] - x0 + dlx = 0. + if a != 0.: + dlx = c * dx / a + dy = yedge[irow0 + iy] - y0 + dly = 0. + if b != 0.: + dly = c * dy / b + if dlx != 0. and dly != 0.: + if abs(dlx) < abs(dly): + dy = dx * b / a + else: + dx = dy * a / b + xt = x0 + dx + incx + yt = y0 + dy + incy + dl = math.sqrt(math.pow((xt - x0), 2.) + math.pow((yt - y0), 2.)) + dlen += dl + if not returnvertices: + pts.append((xt, yt, dlen)) + x0, y0 = xt, yt + xt = x0 - 2. * incx + yt = y0 - 2. * incy + dl = math.sqrt(math.pow((xt - x0), 2.) + math.pow((yt - y0), 2.)) + dlen += dl + x0, y0 = xt, yt + irow0, jcol0 = findrowcolumn((x0, y0), xedge, yedge) + if irow0 >= 0 and jcol0 >= 0: + if not returnvertices: + pts.append((xt, yt, dlen)) + elif irow1 < 0 or jcol1 < 0: + dl = math.sqrt(math.pow((x1 - x0), 2.) + math.pow((y1 - y0), 2.)) + dlen += dl + break + if irow0 == irow1 and jcol0 == jcol1: + dl = math.sqrt(math.pow((x1 - x0), 2.) + math.pow((y1 - y0), 2.)) + dlen += dl + pts.append((x1, y1, dlen)) + break + return np.array(pts) + + +def cell_value_points(pts, xedge, yedge, vdata): + """ + Intersect a list of polyline vertices with a rectilinear MODFLOW + grid. Vertices at the intersection of the polyline with the grid + cell edges is returned. Optionally the original polyline vertices + are returned. + + Parameters + ---------- + pts : list + A list of x, y points and polyline length to extract defining the + vertices of a polyline that + xedge : numpy.ndarray + x-coordinate of the edge of each MODFLOW column. The shape of xedge is + (NCOL + 1). If xedge is not a numpy.ndarray it is converted to a + numpy.ndarray. + yedge : numpy.ndarray + y-coordinate of the edge of each MODFLOW row. The shape of yedge is + (NROW + 1). If yedge is not a numpy.ndarray it is converted to a + numpy.ndarray. + vdata : numpy.ndarray + Data (i.e., head, hk, etc.) for a rectilinear MODFLOW model grid. The + shape of vdata is (NROW, NCOL). If vdata is not a numpy.ndarray it is + converted to a numpy.ndarray. + + Returns + ------- + vcell : numpy.ndarray + numpy.ndarray of of data values from the vdata numpy.ndarray at x- and + y-coordinate locations in pts. + + Examples + -------- + >>> import flopy + >>> vcell = flopy.plotutil.cell_value_points(xpts, xedge, yedge, head[0, :, :]) + + """ + + # make sure xedge and yedge are numpy arrays + if not isinstance(xedge, np.ndarray): + xedge = np.array(xedge) + if not isinstance(yedge, np.ndarray): + yedge = np.array(yedge) + if not isinstance(vdata, np.ndarray): + vdata = np.array(vdata) + + vcell = [] + for (xt, yt, _) in pts: + # find the modflow cell containing point + irow, jcol = findrowcolumn((xt, yt), xedge, yedge) + if irow >= 0 and jcol >= 0: + if np.isnan(vdata[irow, jcol]): + vcell.append(np.nan) + else: + v = np.asarray(vdata[irow, jcol]) + vcell.append(v) + + return np.array(vcell) + + +def _set_coord_info(mg, xul, yul, xll, yll, rotation): + """ + + Parameters + ---------- + mg : fp.discretization.Grid object + + xul : float + upper left x-coordinate location + yul : float + upper left y-coordinate location + xll : float + lower left x-coordinate location + yll : float + lower left y-coordinate location + rotation : float + model grid rotation + + Returns + ------- + mg : fp.discretization.Grid object + """ + import warnings + if xul is not None and yul is not None: + warnings.warn('xul/yul have been deprecated. Use xll/yll instead.', + DeprecationWarning) + if rotation is not None: + mg._angrot = rotation + + mg.set_coord_info(xoff=mg._xul_to_xll(xul), + yoff=mg._yul_to_yll(yul), + angrot=rotation) + elif xll is not None and xll is not None: + mg.set_coord_info(xoff=xll, yoff=yll, angrot=rotation) + + elif rotation is not None: + mg.set_coord_info(xoff=xll, yoff=yll, angrot=rotation) + + return mg + + +def _depreciated_dis_handler(modelgrid, dis): + """ + PlotMapView handler for the deprecated dis parameter + which adds top and botm information to the modelgrid + + Parmaeter + --------- + modelgrid : fp.discretization.Grid object + + dis : fp.modflow.ModflowDis object + + Returns + ------- + modelgrid : fp.discretization.Grid + + """ + # creates a new modelgrid instance with the dis information + from ..discretization import StructuredGrid, VertexGrid, UnstructuredGrid + import warnings + warnings.warn('the dis parameter has been depreciated.', + PendingDeprecationWarning) + if modelgrid.grid_type == "vertex": + modelgrid = VertexGrid(modelgrid.vertices, + modelgrid.cell2d, + dis.top.array, + dis.botm.array, + idomain=modelgrid.idomain, + xoff=modelgrid.xoffset, + yoff=modelgrid.yoffset, + angrot=modelgrid.angrot) + if modelgrid.grid_type == "unstructured": + modelgrid = UnstructuredGrid(modelgrid._vertices, + modelgrid._iverts, + modelgrid._xc, + modelgrid._yc, + dis.top.array, + dis.botm.array, + idomain=modelgrid.idomain, + xoff=modelgrid.xoffset, + yoff=modelgrid.yoffset, + angrot=modelgrid.angrot) + else: + modelgrid = StructuredGrid(delc=dis.delc.array, + delr=dis.delr.array, + top=dis.top.array, + botm=dis.botm.array, + idomain=modelgrid.idomain, + xoff=modelgrid.xoffset, + yoff=modelgrid.yoffset, + angrot=modelgrid.angrot) + return modelgrid + + +def advanced_package_bc_helper(pkg, modelgrid, kper): + """ + Helper function for plotting boundary conditions from "advanced" packages + + Parameters + ---------- + pkg : flopy Package objects + modelgrid : flopy.discretization.Grid object + + Returns + ------- + """ + if pkg.package_type in ('sfr', 'uzf'): + if pkg.parent.version == 'mf6': + mflist = pkg.packagedata.array + idx = np.array([list(i) for i in mflist['cellid']], dtype=int).T + else: + iuzfbnd = pkg.iuzfbnd.array + idx = np.where(iuzfbnd != 0) + idx = np.append([[0] * idx[-1].size], idx, axis=0) + elif pkg.package_type in ('lak', 'maw'): + if pkg.parent.version == "mf6": + mflist = pkg.connectiondata.array + idx = np.array([list(i) for i in mflist['cellid']], dtype=int).T + else: + lakarr = pkg.lakarr.array[kper] + idx = np.where(lakarr != 0) + idx = np.array(idx) + else: + raise NotImplementedError("Pkg {} not implemented for bc plotting" + .format(pkg.package_type)) + return idx diff --git a/flopy/plot/vcrosssection.py b/flopy/plot/vcrosssection.py index b5007c1597..9ccbfde90d 100644 --- a/flopy/plot/vcrosssection.py +++ b/flopy/plot/vcrosssection.py @@ -1,739 +1,739 @@ -import numpy as np - -try: - import matplotlib.pyplot as plt -except: - plt = None -from flopy.plot import plotutil -from flopy.utils import geometry -from flopy.plot.crosssection import _CrossSection -import warnings -warnings.filterwarnings("ignore", category=RuntimeWarning) - - -class _VertexCrossSection(_CrossSection): - """ - Class to create a cross section of the model from a vertex - discretization. - - Class is not to be instantiated by the user! - - Parameters - ---------- - ax : matplotlib.pyplot axis - The plot axis. If not provided it, plt.gca() will be used. - model : flopy.modflow object - flopy model object. (Default is None) - modelgrid : flopy.discretization.VertexGrid - Vertex model grid object - line : dict - Dictionary with either "row", "column", or "line" key. If key - is "row" or "column" key value should be the zero-based row or - column index for cross-section. If key is "line" value should - be an array of (x, y) tuples with vertices of cross-section. - Vertices should be in map coordinates consistent with xul, - yul, and rotation. - extent : tuple of floats - (xmin, xmax, ymin, ymax) will be used to specify axes limits. If None - then these will be calculated based on grid, coordinates, and rotation. - geographic_coords : bool - boolean flag to allow the user to plot cross section lines in - geographic coordinates. If False (default), cross section is plotted - as the distance along the cross section line. - - """ - def __init__(self, ax=None, model=None, modelgrid=None, - line=None, extent=None, geographic_coords=False): - super(_VertexCrossSection, self).__init__(ax=ax, model=model, - modelgrid=modelgrid, - geographic_coords= - geographic_coords) - - if line is None: - err_msg = 'line must be specified.' - raise Exception(err_msg) - - linekeys = [linekeys.lower() for linekeys in list(line.keys())] - - if len(linekeys) != 1: - err_msg = 'Either row, column, or line must be specified ' \ - 'in line dictionary.\nkeys specified: ' - for k in linekeys: - err_msg += '{} '.format(k) - raise Exception(err_msg) - - elif "line" not in linekeys: - err_msg = "only line can be specified in line dictionary " \ - "for vertex Discretization" - raise AssertionError(err_msg) - - onkey = linekeys[0] - - if ax is None: - self.ax = plt.gca() - else: - self.ax = ax - - self.direction = "xy" - # convert pts list to a numpy array - verts = line[onkey] - xp = [] - yp = [] - for [v1, v2] in verts: - xp.append(v1) - yp.append(v2) - - # unrotate and untransform modelgrid into modflow coordinates! - xp, yp = geometry.transform(xp, yp, - self.mg.xoffset, - self.mg.yoffset, - self.mg.angrot_radians, - inverse=True) - - self.xcellcenters, self.ycellcenters = \ - geometry.transform(self.mg.xcellcenters, - self.mg.ycellcenters, - self.mg.xoffset, self.mg.yoffset, - self.mg.angrot_radians, inverse=True) - - try: - self.xvertices, self.yvertices = \ - geometry.transform(self.mg.xvertices, - self.mg.yvertices, - self.mg.xoffset, self.mg.yoffset, - self.mg.angrot_radians, inverse=True) - except ValueError: - # irregular shapes in vertex grid ie. squares and triangles - xverts, yverts = plotutil.UnstructuredPlotUtilities.\ - irregular_shape_patch(self.mg.xvertices, self.mg.yvertices) - - self.xvertices, self.yvertices = \ - geometry.transform(xverts, yverts, - self.mg.xoffset, - self.mg.yoffset, - self.mg.angrot_radians, inverse=True) - - pts = [(xt, yt) for xt, yt in zip(xp, yp)] - self.pts = np.array(pts) - - # get points along the line - - self.xypts = plotutil.UnstructuredPlotUtilities.\ - line_intersect_grid(self.pts, - self.xvertices, - self.yvertices) - - if len(self.xypts) < 2: - s = 'cross-section cannot be created\n.' - s += ' less than 2 points intersect the model grid\n' - s += ' {} points intersect the grid.'.format(len(self.xypts)) - raise Exception(s) - - if self.geographic_coords: - # transform back to geographic coordinates - xypts = {} - for nn, pt in self.xypts.items(): - xp = [t[0] for t in pt] - yp = [t[1] for t in pt] - xp, yp = geometry.transform(xp, yp, self.mg.xoffset, - self.mg.yoffset, - self.mg.angrot_radians) - xypts[nn] = [(xt, yt) for xt, yt in zip(xp, yp)] - - self.xypts = xypts - - top = self.mg.top - top.shape = (1, -1) - botm = self.mg.botm - nlay = len(botm) - ncpl = self.mg.ncpl - - elev = list(top.copy()) - for k in range(nlay): - elev.append(botm[k, :]) - - self.elev = np.array(elev) - - self.idomain = self.mg.idomain - if self.mg.idomain is None: - self.idomain = np.ones((nlay, ncpl), dtype=int) - - # choose a projection direction based on maximum information - xpts = [] - ypts = [] - for nn, verts in self.xypts.items(): - for v in verts: - xpts.append(v[0]) - ypts.append(v[1]) - - if np.max(xpts) - np.min(xpts) > np.max(ypts) - np.min(ypts): - self.direction = "x" - else: - self.direction = "y" - - # make vertex array based on projection direction - self.projpts = self.set_zpts(None) - - # Create cross-section extent - if extent is None: - self.extent = self.get_extent() - else: - self.extent = extent - - self.layer0 = None - self.layer1 = None - - self.d = {i: (np.min(np.array(v).T[0]), - np.max(np.array(v).T[0])) for - i, v in sorted(self.projpts.items())} - - self.xpts = None - self.active = None - self.ncb = None - self.laycbd = None - self.zpts = None - self.xcentergrid = None - self.zcentergrid = None - self.geographic_xcentergrid = None - self.geographic_xpts = None - - # Set axis limits - self.ax.set_xlim(self.extent[0], self.extent[1]) - self.ax.set_ylim(self.extent[2], self.extent[3]) - - def plot_array(self, a, masked_values=None, head=None, **kwargs): - """ - Plot a three-dimensional array as a patch collection. - - Parameters - ---------- - a : numpy.ndarray - Three-dimensional array to plot. - masked_values : iterable of floats, ints - Values to mask. - head : numpy.ndarray - Three-dimensional array to set top of patches to the minimum - of the top of a layer or the head value. Used to create - patches that conform to water-level elevations. - **kwargs : dictionary - keyword arguments passed to matplotlib.collections.PatchCollection - - Returns - ------- - patches : matplotlib.collections.PatchCollection - - """ - if 'ax' in kwargs: - ax = kwargs.pop('ax') - else: - ax = self.ax - - if not isinstance(a, np.ndarray): - a = np.array(a) - - if a.ndim > 1: - a = np.ravel(a) - - if masked_values is not None: - for mval in masked_values: - a = np.ma.masked_values(a, mval) - - if isinstance(head, np.ndarray): - projpts = self.set_zpts(np.ravel(head)) - else: - projpts = self.projpts - - pc = self.get_grid_patch_collection(projpts, a, **kwargs) - if pc is not None: - ax.add_collection(pc) - ax.set_xlim(self.extent[0], self.extent[1]) - ax.set_ylim(self.extent[2], self.extent[3]) - - return pc - - def plot_surface(self, a, masked_values=None, **kwargs): - """ - Plot a two- or three-dimensional array as line(s). - - Parameters - ---------- - a : numpy.ndarray - Two- or three-dimensional array to plot. - masked_values : iterable of floats, ints - Values to mask. - **kwargs : dictionary - keyword arguments passed to matplotlib.pyplot.plot - - Returns - ------- - plot : list containing matplotlib.plot objects - """ - if 'ax' in kwargs: - ax = kwargs.pop('ax') - else: - ax = self.ax - - if 'color' in kwargs: - color = kwargs.pop('color') - elif 'c' in kwargs: - color = kwargs.pop('c') - else: - color = 'b' - - if not isinstance(a, np.ndarray): - a = np.array(a) - - if a.ndim > 1: - a = np.ravel(a) - - if a.size % self.mg.ncpl != 0: - raise AssertionError("Array size must be a multiple of ncpl") - - if masked_values is not None: - for mval in masked_values: - a = np.ma.masked_values(a, mval) - - data = [] - lay_data = [] - d = [] - lay_d = [] - dim = self.mg.ncpl - for cell, verts in sorted(self.projpts.items()): - - if cell >= a.size: - continue - elif np.isnan(a[cell]): - continue - elif a[cell] is np.ma.masked: - continue - - if cell >= dim: - data.append(lay_data) - d.append(lay_d) - dim += self.mg.ncpl - lay_data = [(a[cell], a[cell])] - lay_d = [self.d[cell]] - else: - lay_data.append((a[cell], a[cell])) - lay_d.append(self.d[cell]) - - if lay_data: - data.append(lay_data) - d.append(lay_d) - - data = np.array(data) - d = np.array(d) - - plot = [] - for k in range(data.shape[0]): - if ax is None: - ax = plt.gca() - for ix, _ in enumerate(data[k]): - ax.plot(d[k, ix], data[k, ix], color=color, **kwargs) - - ax.set_xlim(self.extent[0], self.extent[1]) - ax.set_ylim(self.extent[2], self.extent[3]) - plot.append(ax) - - return plot - - def plot_fill_between(self, a, colors=('blue', 'red'), - masked_values=None, head=None, **kwargs): - """ - Plot a three-dimensional array as lines. - - Parameters - ---------- - a : numpy.ndarray - Three-dimensional array to plot. - colors: list - matplotlib fill colors, two required - masked_values : iterable of floats, ints - Values to mask. - head : numpy.ndarray - Three-dimensional array to set top of patches to the minimum - of the top of a layer or the head value. Used to create - patches that conform to water-level elevations. - **kwargs : dictionary - keyword arguments passed to matplotlib.pyplot.plot - - Returns - ------- - plot : list containing matplotlib.fillbetween objects - - """ - if "ax" in kwargs: - ax = kwargs.pop('ax') - else: - ax = self.ax - - if not isinstance(a, np.ndarray): - a = np.array(a) - - a = np.ravel(a) - - if masked_values is not None: - for mval in masked_values: - a = np.ma.masked_values(a, mval) - - if isinstance(head, np.ndarray): - projpts = self.set_zpts(head) - else: - projpts = self.projpts - - plot = [] - for cell, verts in sorted(projpts.items()): - if cell >= a.size: - continue - elif np.isnan(a[cell]): - continue - elif a[cell] is np.ma.masked: - continue - - x = list(set(np.array(verts.T[0]))) - y1 = np.max(np.array(verts.T[1])) - y2 = np.min(np.array(verts.T[1])) - v = a[cell] - - if v > y1: - v = y1 - - elif v < y2: - v = y2 - - v = [v] * len(x) - - x = np.array(x) - plot.append(ax.fill_between(x, y1, v, color=colors[0], **kwargs)) - plot.append(ax.fill_between(x, v, y2, color=colors[1], **kwargs)) - - return plot - - def contour_array(self, a, masked_values=None, head=None, **kwargs): - """ - Contour a two-dimensional array. - - Parameters - ---------- - a : numpy.ndarray - Three-dimensional array to plot. - masked_values : iterable of floats, ints - Values to mask. - head : numpy.ndarray - Three-dimensional array to set top of patches to the minimum - of the top of a layer or the head value. Used to create - patches that conform to water-level elevations. - **kwargs : dictionary - keyword arguments passed to matplotlib.pyplot.contour - - Returns - ------- - contour_set : matplotlib.pyplot.contour - - """ - import matplotlib.tri as tri - - if not isinstance(a, np.ndarray): - a = np.array(a) - - if a.ndim > 1: - a = np.ravel(a) - - if 'ax' in kwargs: - ax = kwargs.pop('ax') - else: - ax = self.ax - - xcenters = [np.mean(np.array(v).T[0]) for i, v - in sorted(self.projpts.items())] - - plotarray = np.array([a[cell] for cell - in sorted(self.projpts)]) - - # work around for tri-contour ignore vmin & vmax - # necessary for the tri-contour NaN issue fix - if "levels" not in kwargs: - if "vmin" not in kwargs: - vmin = np.nanmin(plotarray) - else: - vmin = kwargs.pop("vmin") - if "vmax" not in kwargs: - vmax = np.nanmax(plotarray) - else: - vmax = kwargs.pop('vmax') - - levels = np.linspace(vmin, vmax, 7) - kwargs['levels'] = levels - - # workaround for tri-contour nan issue - plotarray[np.isnan(plotarray)] = -2**31 - if masked_values is None: - masked_values = [-2**31] - else: - masked_values = list(masked_values) - if -2**31 not in masked_values: - masked_values.append(-2**31) - - ismasked = None - if masked_values is not None: - for mval in masked_values: - if ismasked is None: - ismasked = np.isclose(plotarray, mval) - else: - t = np.isclose(plotarray, mval) - ismasked += t - - if isinstance(head, np.ndarray): - zcenters = self.set_zcentergrid(np.ravel(head)) - else: - zcenters = [np.mean(np.array(v).T[1]) for i, v - in sorted(self.projpts.items())] - - plot_triplot = False - if 'plot_triplot' in kwargs: - plot_triplot = kwargs.pop('plot_triplot') - - if 'extent' in kwargs: - extent = kwargs.pop('extent') - - idx = (xcenters >= extent[0]) & ( - xcenters <= extent[1]) & ( - zcenters >= extent[2]) & ( - zcenters <= extent[3]) - plotarray = plotarray[idx].flatten() - xcenters = xcenters[idx].flatten() - zcenters = zcenters[idx].flatten() - - triang = tri.Triangulation(xcenters, zcenters) - - if ismasked is not None: - ismasked = ismasked.flatten() - mask = np.any(np.where(ismasked[triang.triangles], - True, False), axis=1) - triang.set_mask(mask) - - contour_set = ax.tricontour(triang, plotarray, **kwargs) - - if plot_triplot: - ax.triplot(triang, color="black", marker="o", lw=0.75) - - ax.set_xlim(self.extent[0], self.extent[1]) - ax.set_ylim(self.extent[2], self.extent[3]) - - return contour_set - - def plot_inactive(self): - raise NotImplementedError("Function must be called in PlotCrossSection") - - def plot_ibound(self): - raise NotImplementedError("Function must be called in PlotCrossSection") - - def plot_grid(self): - raise NotImplementedError("Function must be called in PlotCrossSection") - - def plot_bc(self): - raise NotImplementedError("Function must be called in PlotCrossSection") - - def plot_specific_discharge(self): - raise NotImplementedError("Function must be called in PlotCrossSection") - - def plot_discharge(self): - raise NotImplementedError("plot_specific_discharge must be " - "used for VertexGrid models") - - @classmethod - def get_grid_patch_collection(cls, projpts, plotarray, **kwargs): - """ - Get a PatchCollection of plotarray in unmasked cells - - Parameters - ---------- - projpts : dict - dictionary defined by node number which contains model patch vertices. - plotarray : numpy.ndarray - One-dimensional array to attach to the Patch Collection. - **kwargs : dictionary - keyword arguments passed to matplotlib.collections.PatchCollection - - Returns - ------- - patches : matplotlib.collections.PatchCollection - - """ - from matplotlib.patches import Polygon - from matplotlib.collections import PatchCollection - - if 'vmin' in kwargs: - vmin = kwargs.pop('vmin') - else: - vmin = None - if 'vmax' in kwargs: - vmax = kwargs.pop('vmax') - else: - vmax = None - - rectcol = [] - data = [] - for cell, verts in sorted(projpts.items()): - verts = plotutil.UnstructuredPlotUtilities\ - .arctan2(np.array(verts)) - - if np.isnan(plotarray[cell]): - continue - elif plotarray[cell] is np.ma.masked: - continue - - rectcol.append(Polygon(verts, closed=True)) - data.append(plotarray[cell]) - - if len(rectcol) > 0: - patches = PatchCollection(rectcol, **kwargs) - patches.set_array(np.array(data)) - patches.set_clim(vmin, vmax) - - else: - patches = None - - return patches - - def get_grid_line_collection(self, **kwargs): - """ - Get a LineCollection of the grid - - Parameters - ---------- - **kwargs : dictionary - keyword arguments passed to matplotlib.collections.LineCollection - - Returns - ------- - linecollection : matplotlib.collections.LineCollection - """ - from matplotlib.patches import Polygon - from matplotlib.collections import PatchCollection - - color = "grey" - if 'ec' in kwargs: - color = kwargs.pop('ec') - if color in kwargs: - color = kwargs.pop('color') - - rectcol = [] - for _, verts in sorted(self.projpts.items()): - verts = plotutil.UnstructuredPlotUtilities\ - .arctan2(np.array(verts)) - - rectcol.append(Polygon(verts, closed=True)) - - if len(rectcol) > 0: - patches = PatchCollection(rectcol, edgecolor=color, - facecolor='none', **kwargs) - else: - patches = None - - return patches - - def set_zpts(self, vs): - """ - Get an array of projection vertices corrected for - elevations based on minimum of cell elevation - (self.elev) or passed vs numpy.ndarray - - Parameters - ---------- - vs : numpy.ndarray - Two-dimensional array to plot. - - Returns - ------- - zpts : dict - - """ - # make vertex array based on projection direction - if vs is not None: - if not isinstance(vs, np.ndarray): - vs = np.array(vs) - - if self.direction == "x": - xyix = 0 - else: - xyix = -1 - - projpts = {} - for k in range(1, self.mg.nlay + 1): - top = self.elev[k - 1, :] - botm = self.elev[k, :] - adjnn = (k - 1) * self.mg.ncpl - d0 = 0 - for nn, verts in sorted(self.xypts.items(), - key=lambda q: q[-1][xyix][xyix]): - if vs is None: - t = top[nn] - else: - t = vs[nn] - if top[nn] < vs[nn]: - t = top[nn] - b = botm[nn] - if self.geographic_coords: - if self.direction == "x": - projt = [(v[0], t) for v in verts] - projb = [(v[0], b) for v in verts] - else: - projt = [(v[1], t) for v in verts] - projb = [(v[1], b) for v in verts] - else: - verts = np.array(verts).T - a2 = (np.max(verts[0]) - np.min(verts[0])) ** 2 - b2 = (np.max(verts[1]) - np.min(verts[1])) ** 2 - c = np.sqrt(a2 + b2) - d1 = d0 + c - projt = [(d0, t), (d1, t)] - projb = [(d0, b), (d1, b)] - d0 += c - - projpts[nn + adjnn] = projt + projb - - return projpts - - def set_zcentergrid(self, vs, kstep=1): - """ - Get an array of z elevations at the center of a cell that is based - on minimum of cell top elevation (self.elev) or passed vs numpy.ndarray - - Parameters - ---------- - vs : numpy.ndarray - Three-dimensional array to plot. - - Returns - ------- - zcentergrid : numpy.ndarray - - """ - verts = self.set_zpts(vs) - zcenters =[np.mean(np.array(v).T[1]) for i, v - in sorted(verts.items()) - if (i // self.mg.ncpl) % kstep == 0] - return zcenters - - def get_extent(self): - """ - Get the extent of the rotated and offset grid - - Returns - ------- - tuple : (xmin, xmax, ymin, ymax) - """ - xpts = [] - for _, verts in self.projpts.items(): - for v in verts: - xpts.append(v[0]) - - xmin = np.min(xpts) - xmax = np.max(xpts) - - ymin = np.min(self.elev) - ymax = np.max(self.elev) - - return (xmin, xmax, ymin, ymax) - +import numpy as np + +try: + import matplotlib.pyplot as plt +except: + plt = None +from flopy.plot import plotutil +from flopy.utils import geometry +from flopy.plot.crosssection import _CrossSection +import warnings +warnings.filterwarnings("ignore", category=RuntimeWarning) + + +class _VertexCrossSection(_CrossSection): + """ + Class to create a cross section of the model from a vertex + discretization. + + Class is not to be instantiated by the user! + + Parameters + ---------- + ax : matplotlib.pyplot axis + The plot axis. If not provided it, plt.gca() will be used. + model : flopy.modflow object + flopy model object. (Default is None) + modelgrid : flopy.discretization.VertexGrid + Vertex model grid object + line : dict + Dictionary with either "row", "column", or "line" key. If key + is "row" or "column" key value should be the zero-based row or + column index for cross-section. If key is "line" value should + be an array of (x, y) tuples with vertices of cross-section. + Vertices should be in map coordinates consistent with xul, + yul, and rotation. + extent : tuple of floats + (xmin, xmax, ymin, ymax) will be used to specify axes limits. If None + then these will be calculated based on grid, coordinates, and rotation. + geographic_coords : bool + boolean flag to allow the user to plot cross section lines in + geographic coordinates. If False (default), cross section is plotted + as the distance along the cross section line. + + """ + def __init__(self, ax=None, model=None, modelgrid=None, + line=None, extent=None, geographic_coords=False): + super(_VertexCrossSection, self).__init__(ax=ax, model=model, + modelgrid=modelgrid, + geographic_coords= + geographic_coords) + + if line is None: + err_msg = 'line must be specified.' + raise Exception(err_msg) + + linekeys = [linekeys.lower() for linekeys in list(line.keys())] + + if len(linekeys) != 1: + err_msg = 'Either row, column, or line must be specified ' \ + 'in line dictionary.\nkeys specified: ' + for k in linekeys: + err_msg += '{} '.format(k) + raise Exception(err_msg) + + elif "line" not in linekeys: + err_msg = "only line can be specified in line dictionary " \ + "for vertex Discretization" + raise AssertionError(err_msg) + + onkey = linekeys[0] + + if ax is None: + self.ax = plt.gca() + else: + self.ax = ax + + self.direction = "xy" + # convert pts list to a numpy array + verts = line[onkey] + xp = [] + yp = [] + for [v1, v2] in verts: + xp.append(v1) + yp.append(v2) + + # unrotate and untransform modelgrid into modflow coordinates! + xp, yp = geometry.transform(xp, yp, + self.mg.xoffset, + self.mg.yoffset, + self.mg.angrot_radians, + inverse=True) + + self.xcellcenters, self.ycellcenters = \ + geometry.transform(self.mg.xcellcenters, + self.mg.ycellcenters, + self.mg.xoffset, self.mg.yoffset, + self.mg.angrot_radians, inverse=True) + + try: + self.xvertices, self.yvertices = \ + geometry.transform(self.mg.xvertices, + self.mg.yvertices, + self.mg.xoffset, self.mg.yoffset, + self.mg.angrot_radians, inverse=True) + except ValueError: + # irregular shapes in vertex grid ie. squares and triangles + xverts, yverts = plotutil.UnstructuredPlotUtilities.\ + irregular_shape_patch(self.mg.xvertices, self.mg.yvertices) + + self.xvertices, self.yvertices = \ + geometry.transform(xverts, yverts, + self.mg.xoffset, + self.mg.yoffset, + self.mg.angrot_radians, inverse=True) + + pts = [(xt, yt) for xt, yt in zip(xp, yp)] + self.pts = np.array(pts) + + # get points along the line + + self.xypts = plotutil.UnstructuredPlotUtilities.\ + line_intersect_grid(self.pts, + self.xvertices, + self.yvertices) + + if len(self.xypts) < 2: + s = 'cross-section cannot be created\n.' + s += ' less than 2 points intersect the model grid\n' + s += ' {} points intersect the grid.'.format(len(self.xypts)) + raise Exception(s) + + if self.geographic_coords: + # transform back to geographic coordinates + xypts = {} + for nn, pt in self.xypts.items(): + xp = [t[0] for t in pt] + yp = [t[1] for t in pt] + xp, yp = geometry.transform(xp, yp, self.mg.xoffset, + self.mg.yoffset, + self.mg.angrot_radians) + xypts[nn] = [(xt, yt) for xt, yt in zip(xp, yp)] + + self.xypts = xypts + + top = self.mg.top + top.shape = (1, -1) + botm = self.mg.botm + nlay = len(botm) + ncpl = self.mg.ncpl + + elev = list(top.copy()) + for k in range(nlay): + elev.append(botm[k, :]) + + self.elev = np.array(elev) + + self.idomain = self.mg.idomain + if self.mg.idomain is None: + self.idomain = np.ones((nlay, ncpl), dtype=int) + + # choose a projection direction based on maximum information + xpts = [] + ypts = [] + for nn, verts in self.xypts.items(): + for v in verts: + xpts.append(v[0]) + ypts.append(v[1]) + + if np.max(xpts) - np.min(xpts) > np.max(ypts) - np.min(ypts): + self.direction = "x" + else: + self.direction = "y" + + # make vertex array based on projection direction + self.projpts = self.set_zpts(None) + + # Create cross-section extent + if extent is None: + self.extent = self.get_extent() + else: + self.extent = extent + + self.layer0 = None + self.layer1 = None + + self.d = {i: (np.min(np.array(v).T[0]), + np.max(np.array(v).T[0])) for + i, v in sorted(self.projpts.items())} + + self.xpts = None + self.active = None + self.ncb = None + self.laycbd = None + self.zpts = None + self.xcentergrid = None + self.zcentergrid = None + self.geographic_xcentergrid = None + self.geographic_xpts = None + + # Set axis limits + self.ax.set_xlim(self.extent[0], self.extent[1]) + self.ax.set_ylim(self.extent[2], self.extent[3]) + + def plot_array(self, a, masked_values=None, head=None, **kwargs): + """ + Plot a three-dimensional array as a patch collection. + + Parameters + ---------- + a : numpy.ndarray + Three-dimensional array to plot. + masked_values : iterable of floats, ints + Values to mask. + head : numpy.ndarray + Three-dimensional array to set top of patches to the minimum + of the top of a layer or the head value. Used to create + patches that conform to water-level elevations. + **kwargs : dictionary + keyword arguments passed to matplotlib.collections.PatchCollection + + Returns + ------- + patches : matplotlib.collections.PatchCollection + + """ + if 'ax' in kwargs: + ax = kwargs.pop('ax') + else: + ax = self.ax + + if not isinstance(a, np.ndarray): + a = np.array(a) + + if a.ndim > 1: + a = np.ravel(a) + + if masked_values is not None: + for mval in masked_values: + a = np.ma.masked_values(a, mval) + + if isinstance(head, np.ndarray): + projpts = self.set_zpts(np.ravel(head)) + else: + projpts = self.projpts + + pc = self.get_grid_patch_collection(projpts, a, **kwargs) + if pc is not None: + ax.add_collection(pc) + ax.set_xlim(self.extent[0], self.extent[1]) + ax.set_ylim(self.extent[2], self.extent[3]) + + return pc + + def plot_surface(self, a, masked_values=None, **kwargs): + """ + Plot a two- or three-dimensional array as line(s). + + Parameters + ---------- + a : numpy.ndarray + Two- or three-dimensional array to plot. + masked_values : iterable of floats, ints + Values to mask. + **kwargs : dictionary + keyword arguments passed to matplotlib.pyplot.plot + + Returns + ------- + plot : list containing matplotlib.plot objects + """ + if 'ax' in kwargs: + ax = kwargs.pop('ax') + else: + ax = self.ax + + if 'color' in kwargs: + color = kwargs.pop('color') + elif 'c' in kwargs: + color = kwargs.pop('c') + else: + color = 'b' + + if not isinstance(a, np.ndarray): + a = np.array(a) + + if a.ndim > 1: + a = np.ravel(a) + + if a.size % self.mg.ncpl != 0: + raise AssertionError("Array size must be a multiple of ncpl") + + if masked_values is not None: + for mval in masked_values: + a = np.ma.masked_values(a, mval) + + data = [] + lay_data = [] + d = [] + lay_d = [] + dim = self.mg.ncpl + for cell, verts in sorted(self.projpts.items()): + + if cell >= a.size: + continue + elif np.isnan(a[cell]): + continue + elif a[cell] is np.ma.masked: + continue + + if cell >= dim: + data.append(lay_data) + d.append(lay_d) + dim += self.mg.ncpl + lay_data = [(a[cell], a[cell])] + lay_d = [self.d[cell]] + else: + lay_data.append((a[cell], a[cell])) + lay_d.append(self.d[cell]) + + if lay_data: + data.append(lay_data) + d.append(lay_d) + + data = np.array(data) + d = np.array(d) + + plot = [] + for k in range(data.shape[0]): + if ax is None: + ax = plt.gca() + for ix, _ in enumerate(data[k]): + ax.plot(d[k, ix], data[k, ix], color=color, **kwargs) + + ax.set_xlim(self.extent[0], self.extent[1]) + ax.set_ylim(self.extent[2], self.extent[3]) + plot.append(ax) + + return plot + + def plot_fill_between(self, a, colors=('blue', 'red'), + masked_values=None, head=None, **kwargs): + """ + Plot a three-dimensional array as lines. + + Parameters + ---------- + a : numpy.ndarray + Three-dimensional array to plot. + colors: list + matplotlib fill colors, two required + masked_values : iterable of floats, ints + Values to mask. + head : numpy.ndarray + Three-dimensional array to set top of patches to the minimum + of the top of a layer or the head value. Used to create + patches that conform to water-level elevations. + **kwargs : dictionary + keyword arguments passed to matplotlib.pyplot.plot + + Returns + ------- + plot : list containing matplotlib.fillbetween objects + + """ + if "ax" in kwargs: + ax = kwargs.pop('ax') + else: + ax = self.ax + + if not isinstance(a, np.ndarray): + a = np.array(a) + + a = np.ravel(a) + + if masked_values is not None: + for mval in masked_values: + a = np.ma.masked_values(a, mval) + + if isinstance(head, np.ndarray): + projpts = self.set_zpts(head) + else: + projpts = self.projpts + + plot = [] + for cell, verts in sorted(projpts.items()): + if cell >= a.size: + continue + elif np.isnan(a[cell]): + continue + elif a[cell] is np.ma.masked: + continue + + x = list(set(np.array(verts.T[0]))) + y1 = np.max(np.array(verts.T[1])) + y2 = np.min(np.array(verts.T[1])) + v = a[cell] + + if v > y1: + v = y1 + + elif v < y2: + v = y2 + + v = [v] * len(x) + + x = np.array(x) + plot.append(ax.fill_between(x, y1, v, color=colors[0], **kwargs)) + plot.append(ax.fill_between(x, v, y2, color=colors[1], **kwargs)) + + return plot + + def contour_array(self, a, masked_values=None, head=None, **kwargs): + """ + Contour a two-dimensional array. + + Parameters + ---------- + a : numpy.ndarray + Three-dimensional array to plot. + masked_values : iterable of floats, ints + Values to mask. + head : numpy.ndarray + Three-dimensional array to set top of patches to the minimum + of the top of a layer or the head value. Used to create + patches that conform to water-level elevations. + **kwargs : dictionary + keyword arguments passed to matplotlib.pyplot.contour + + Returns + ------- + contour_set : matplotlib.pyplot.contour + + """ + import matplotlib.tri as tri + + if not isinstance(a, np.ndarray): + a = np.array(a) + + if a.ndim > 1: + a = np.ravel(a) + + if 'ax' in kwargs: + ax = kwargs.pop('ax') + else: + ax = self.ax + + xcenters = [np.mean(np.array(v).T[0]) for i, v + in sorted(self.projpts.items())] + + plotarray = np.array([a[cell] for cell + in sorted(self.projpts)]) + + # work around for tri-contour ignore vmin & vmax + # necessary for the tri-contour NaN issue fix + if "levels" not in kwargs: + if "vmin" not in kwargs: + vmin = np.nanmin(plotarray) + else: + vmin = kwargs.pop("vmin") + if "vmax" not in kwargs: + vmax = np.nanmax(plotarray) + else: + vmax = kwargs.pop('vmax') + + levels = np.linspace(vmin, vmax, 7) + kwargs['levels'] = levels + + # workaround for tri-contour nan issue + plotarray[np.isnan(plotarray)] = -2**31 + if masked_values is None: + masked_values = [-2**31] + else: + masked_values = list(masked_values) + if -2**31 not in masked_values: + masked_values.append(-2**31) + + ismasked = None + if masked_values is not None: + for mval in masked_values: + if ismasked is None: + ismasked = np.isclose(plotarray, mval) + else: + t = np.isclose(plotarray, mval) + ismasked += t + + if isinstance(head, np.ndarray): + zcenters = self.set_zcentergrid(np.ravel(head)) + else: + zcenters = [np.mean(np.array(v).T[1]) for i, v + in sorted(self.projpts.items())] + + plot_triplot = False + if 'plot_triplot' in kwargs: + plot_triplot = kwargs.pop('plot_triplot') + + if 'extent' in kwargs: + extent = kwargs.pop('extent') + + idx = (xcenters >= extent[0]) & ( + xcenters <= extent[1]) & ( + zcenters >= extent[2]) & ( + zcenters <= extent[3]) + plotarray = plotarray[idx].flatten() + xcenters = xcenters[idx].flatten() + zcenters = zcenters[idx].flatten() + + triang = tri.Triangulation(xcenters, zcenters) + + if ismasked is not None: + ismasked = ismasked.flatten() + mask = np.any(np.where(ismasked[triang.triangles], + True, False), axis=1) + triang.set_mask(mask) + + contour_set = ax.tricontour(triang, plotarray, **kwargs) + + if plot_triplot: + ax.triplot(triang, color="black", marker="o", lw=0.75) + + ax.set_xlim(self.extent[0], self.extent[1]) + ax.set_ylim(self.extent[2], self.extent[3]) + + return contour_set + + def plot_inactive(self): + raise NotImplementedError("Function must be called in PlotCrossSection") + + def plot_ibound(self): + raise NotImplementedError("Function must be called in PlotCrossSection") + + def plot_grid(self): + raise NotImplementedError("Function must be called in PlotCrossSection") + + def plot_bc(self): + raise NotImplementedError("Function must be called in PlotCrossSection") + + def plot_specific_discharge(self): + raise NotImplementedError("Function must be called in PlotCrossSection") + + def plot_discharge(self): + raise NotImplementedError("plot_specific_discharge must be " + "used for VertexGrid models") + + @classmethod + def get_grid_patch_collection(cls, projpts, plotarray, **kwargs): + """ + Get a PatchCollection of plotarray in unmasked cells + + Parameters + ---------- + projpts : dict + dictionary defined by node number which contains model patch vertices. + plotarray : numpy.ndarray + One-dimensional array to attach to the Patch Collection. + **kwargs : dictionary + keyword arguments passed to matplotlib.collections.PatchCollection + + Returns + ------- + patches : matplotlib.collections.PatchCollection + + """ + from matplotlib.patches import Polygon + from matplotlib.collections import PatchCollection + + if 'vmin' in kwargs: + vmin = kwargs.pop('vmin') + else: + vmin = None + if 'vmax' in kwargs: + vmax = kwargs.pop('vmax') + else: + vmax = None + + rectcol = [] + data = [] + for cell, verts in sorted(projpts.items()): + verts = plotutil.UnstructuredPlotUtilities\ + .arctan2(np.array(verts)) + + if np.isnan(plotarray[cell]): + continue + elif plotarray[cell] is np.ma.masked: + continue + + rectcol.append(Polygon(verts, closed=True)) + data.append(plotarray[cell]) + + if len(rectcol) > 0: + patches = PatchCollection(rectcol, **kwargs) + patches.set_array(np.array(data)) + patches.set_clim(vmin, vmax) + + else: + patches = None + + return patches + + def get_grid_line_collection(self, **kwargs): + """ + Get a LineCollection of the grid + + Parameters + ---------- + **kwargs : dictionary + keyword arguments passed to matplotlib.collections.LineCollection + + Returns + ------- + linecollection : matplotlib.collections.LineCollection + """ + from matplotlib.patches import Polygon + from matplotlib.collections import PatchCollection + + color = "grey" + if 'ec' in kwargs: + color = kwargs.pop('ec') + if color in kwargs: + color = kwargs.pop('color') + + rectcol = [] + for _, verts in sorted(self.projpts.items()): + verts = plotutil.UnstructuredPlotUtilities\ + .arctan2(np.array(verts)) + + rectcol.append(Polygon(verts, closed=True)) + + if len(rectcol) > 0: + patches = PatchCollection(rectcol, edgecolor=color, + facecolor='none', **kwargs) + else: + patches = None + + return patches + + def set_zpts(self, vs): + """ + Get an array of projection vertices corrected for + elevations based on minimum of cell elevation + (self.elev) or passed vs numpy.ndarray + + Parameters + ---------- + vs : numpy.ndarray + Two-dimensional array to plot. + + Returns + ------- + zpts : dict + + """ + # make vertex array based on projection direction + if vs is not None: + if not isinstance(vs, np.ndarray): + vs = np.array(vs) + + if self.direction == "x": + xyix = 0 + else: + xyix = -1 + + projpts = {} + for k in range(1, self.mg.nlay + 1): + top = self.elev[k - 1, :] + botm = self.elev[k, :] + adjnn = (k - 1) * self.mg.ncpl + d0 = 0 + for nn, verts in sorted(self.xypts.items(), + key=lambda q: q[-1][xyix][xyix]): + if vs is None: + t = top[nn] + else: + t = vs[nn] + if top[nn] < vs[nn]: + t = top[nn] + b = botm[nn] + if self.geographic_coords: + if self.direction == "x": + projt = [(v[0], t) for v in verts] + projb = [(v[0], b) for v in verts] + else: + projt = [(v[1], t) for v in verts] + projb = [(v[1], b) for v in verts] + else: + verts = np.array(verts).T + a2 = (np.max(verts[0]) - np.min(verts[0])) ** 2 + b2 = (np.max(verts[1]) - np.min(verts[1])) ** 2 + c = np.sqrt(a2 + b2) + d1 = d0 + c + projt = [(d0, t), (d1, t)] + projb = [(d0, b), (d1, b)] + d0 += c + + projpts[nn + adjnn] = projt + projb + + return projpts + + def set_zcentergrid(self, vs, kstep=1): + """ + Get an array of z elevations at the center of a cell that is based + on minimum of cell top elevation (self.elev) or passed vs numpy.ndarray + + Parameters + ---------- + vs : numpy.ndarray + Three-dimensional array to plot. + + Returns + ------- + zcentergrid : numpy.ndarray + + """ + verts = self.set_zpts(vs) + zcenters =[np.mean(np.array(v).T[1]) for i, v + in sorted(verts.items()) + if (i // self.mg.ncpl) % kstep == 0] + return zcenters + + def get_extent(self): + """ + Get the extent of the rotated and offset grid + + Returns + ------- + tuple : (xmin, xmax, ymin, ymax) + """ + xpts = [] + for _, verts in self.projpts.items(): + for v in verts: + xpts.append(v[0]) + + xmin = np.min(xpts) + xmax = np.max(xpts) + + ymin = np.min(self.elev) + ymax = np.max(self.elev) + + return (xmin, xmax, ymin, ymax) + diff --git a/flopy/seawat/__init__.py b/flopy/seawat/__init__.py index 53254670ba..e0eaf29f33 100644 --- a/flopy/seawat/__init__.py +++ b/flopy/seawat/__init__.py @@ -1,4 +1,4 @@ -from .swt import Seawat -from .swtvdf import SeawatVdf -from .swtvsc import SeawatVsc - +from .swt import Seawat +from .swtvdf import SeawatVdf +from .swtvsc import SeawatVsc + diff --git a/flopy/seawat/swt.py b/flopy/seawat/swt.py index da3d87e645..109dda448c 100644 --- a/flopy/seawat/swt.py +++ b/flopy/seawat/swt.py @@ -1,459 +1,459 @@ -import os -from ..mbase import BaseModel -from ..pakbase import Package -from ..modflow import Modflow -from ..mt3d import Mt3dms -from .swtvdf import SeawatVdf -from .swtvsc import SeawatVsc -from ..discretization.structuredgrid import StructuredGrid -from flopy.discretization.modeltime import ModelTime - - -class SeawatList(Package): - """ - List Package class - """ - - def __init__(self, model, extension='list', listunit=7): - Package.__init__(self, model, extension, 'LIST', listunit) - return - - def __repr__(self): - return 'List package class' - - def write_file(self): - # Not implemented for list class - return - - -class Seawat(BaseModel): - """ - SEAWAT Model Class. - - Parameters - ---------- - modelname : string, optional - Name of model. This string will be used to name the SEAWAT input - that are created with write_model. (the default is 'swttest') - namefile_ext : string, optional - Extension for the namefile (the default is 'nam') - version : string, optional - Version of SEAWAT to use (the default is 'seawat'). - exe_name : string, optional - The name of the executable to use (the default is - 'swtv4.exe'). - listunit : integer, optional - Unit number for the list file (the default is 2). - model_ws : string, optional - model workspace. Directory name to create model data sets. - (default is the present working directory). - external_path : string - Location for external files (default is None). - verbose : boolean, optional - Print additional information to the screen (default is False). - load : boolean, optional - (default is True). - silent : integer - (default is 0) - - Attributes - ---------- - - Methods - ------- - - See Also - -------- - - Notes - ----- - - Examples - -------- - - >>> import flopy - >>> m = flopy.seawat.swt.Seawat() - - """ - - def __init__(self, modelname='swttest', namefile_ext='nam', - modflowmodel=None, mt3dmodel=None, - version='seawat', exe_name='swtv4', - structured=True, listunit=2, model_ws='.', external_path=None, - verbose=False, load=True, silent=0): - - # Call constructor for parent object - BaseModel.__init__(self, modelname, namefile_ext, exe_name, model_ws, - structured=structured, verbose=verbose) - - # Set attributes - self.version_types = {'seawat': 'SEAWAT'} - self.set_version(version) - self.lst = SeawatList(self, listunit=listunit) - self.glo = None - self._mf = None - self._mt = None - - # If a MODFLOW model was passed in, then add its packages - self.mf = self - if modflowmodel is not None: - for p in modflowmodel.packagelist: - self.packagelist.append(p) - self._modelgrid = modflowmodel.modelgrid - else: - modflowmodel = Modflow() - - # If a MT3D model was passed in, then add its packages - if mt3dmodel is not None: - for p in mt3dmodel.packagelist: - self.packagelist.append(p) - else: - mt3dmodel = Mt3dms() - - # external option stuff - self.array_free_format = False - self.array_format = 'mt3d' - self.external_fnames = [] - self.external_units = [] - self.external_binflag = [] - self.external = False - self.load = load - # the starting external data unit number - self._next_ext_unit = 3000 - if external_path is not None: - assert model_ws == '.', "ERROR: external cannot be used " + \ - "with model_ws" - - # external_path = os.path.join(model_ws, external_path) - if os.path.exists(external_path): - print("Note: external_path " + str(external_path) + - " already exists") - # assert os.path.exists(external_path),'external_path does not exist' - else: - os.mkdir(external_path) - self.external = True - self.external_path = external_path - self.verbose = verbose - self.silent = silent - - # Create a dictionary to map package with package object. - # This is used for loading models. - self.mfnam_packages = {} - for k, v in modflowmodel.mfnam_packages.items(): - self.mfnam_packages[k] = v - for k, v in mt3dmodel.mfnam_packages.items(): - self.mfnam_packages[k] = v - self.mfnam_packages['vdf'] = SeawatVdf - self.mfnam_packages['vsc'] = SeawatVsc - return - - @property - def modeltime(self): - # build model time - data_frame = {'perlen': self.dis.perlen.array, - 'nstp': self.dis.nstp.array, - 'tsmult': self.dis.tsmult.array} - self._model_time = ModelTime(data_frame, - self.dis.itmuni_dict[self.dis.itmuni], - self.dis.start_datetime, - self.dis.steady.array) - return self._model_time - - @property - def modelgrid(self): - if not self._mg_resync: - return self._modelgrid - - if self.has_package('bas6'): - ibound = self.bas6.ibound.array - else: - ibound = None - # build grid - # self.dis should exist if modflow model passed - self._modelgrid = StructuredGrid(self.dis.delc.array, - self.dis.delr.array, - self.dis.top.array, - self.dis.botm.array, - idomain=ibound, - lenuni=self.dis.lenuni, - proj4=self._modelgrid.proj4, - epsg=self._modelgrid.epsg, - xoff=self._modelgrid.xoffset, - yoff=self._modelgrid.yoffset, - angrot=self._modelgrid.angrot, - nlay=self.dis.nlay) - - # resolve offsets - xoff = self._modelgrid.xoffset - if xoff is None: - if self._xul is not None: - xoff = self._modelgrid._xul_to_xll(self._xul) - else: - xoff = 0.0 - yoff = self._modelgrid.yoffset - if yoff is None: - if self._yul is not None: - yoff = self._modelgrid._yul_to_yll(self._yul) - else: - yoff = 0.0 - self._modelgrid.set_coord_info(xoff, yoff, self._modelgrid.angrot, - self._modelgrid.epsg, - self._modelgrid.proj4) - self._mg_resync = not self._modelgrid.is_complete - return self._modelgrid - - @property - def nlay(self): - if (self.dis): - return self.dis.nlay - else: - return 0 - - @property - def nrow(self): - if (self.dis): - return self.dis.nrow - else: - return 0 - - @property - def ncol(self): - if (self.dis): - return self.dis.ncol - else: - return 0 - - @property - def nper(self): - if (self.dis): - return self.dis.nper - else: - return 0 - - @property - def nrow_ncol_nlay_nper(self): - dis = self.get_package('DIS') - if (dis): - return dis.nrow, dis.ncol, dis.nlay, dis.nper - else: - return 0, 0, 0, 0 - - def get_nrow_ncol_nlay_nper(self): - return self.nrow_ncol_nlay_nper - - def get_ifrefm(self): - bas = self.get_package('BAS6') - if (bas): - return bas.ifrefm - else: - return False - - @property - def ncomp(self): - if (self.btn): - return self.btn.ncomp - else: - return 1 - - @property - def mcomp(self): - if (self.btn): - return self.btn.mcomp - else: - return 1 - - def _set_name(self, value): - # Overrides BaseModel's setter for name property - BaseModel._set_name(self, value) - - # for i in range(len(self.lst.extension)): - # self.lst.file_name[i] = self.name + '.' + self.lst.extension[i] - # return - - def change_model_ws(self, new_pth=None, reset_external=False): - # if hasattr(self,"_mf"): - if self._mf is not None: - self._mf.change_model_ws(new_pth=new_pth, - reset_external=reset_external) - # if hasattr(self,"_mt"): - if self._mt is not None: - self._mt.change_model_ws(new_pth=new_pth, - reset_external=reset_external) - super(Seawat, self).change_model_ws(new_pth=new_pth, - reset_external=reset_external) - - def write_name_file(self): - """ - Write the name file - - Returns - ------- - None - - """ - # open and write header - fn_path = os.path.join(self.model_ws, self.namefile) - f_nam = open(fn_path, 'w') - f_nam.write('{}\n'.format(self.heading)) - - # Write global file entry - if self.glo is not None: - if self.glo.unit_number[0] > 0: - f_nam.write('{:14s} {:5d} {}\n'.format(self.glo.name[0], - self.glo.unit_number[ - 0], - self.glo.file_name[0])) - # Write list file entry - f_nam.write('{:14s} {:5d} {}\n'.format(self.lst.name[0], - self.lst.unit_number[0], - self.lst.file_name[0])) - - # Write SEAWAT entries and close - f_nam.write('{}'.format(self.get_name_file_entries())) - - if self._mf is not None: - # write the external files - for b, u, f in zip(self._mf.external_binflag, - self._mf.external_units, \ - self._mf.external_fnames): - tag = "DATA" - if b: - tag = "DATA(BINARY)" - f_nam.write('{0:14s} {1:5d} {2}\n'.format(tag, u, f)) - - # write the output files - for u, f, b in zip(self._mf.output_units, self._mf.output_fnames, - self._mf.output_binflag): - if u == 0: - continue - if b: - f_nam.write( - 'DATA(BINARY) {0:5d} '.format(u) + f + ' REPLACE\n') - else: - f_nam.write('DATA {0:5d} '.format(u) + f + '\n') - - if self._mt is not None: - # write the external files - for b, u, f in zip(self._mt.external_binflag, - self._mt.external_units, \ - self._mt.external_fnames): - tag = "DATA" - if b: - tag = "DATA(BINARY)" - f_nam.write('{0:14s} {1:5d} {2}\n'.format(tag, u, f)) - - # write the output files - for u, f, b in zip(self._mt.output_units, self._mt.output_fnames, - self._mt.output_binflag): - if u == 0: - continue - if b: - f_nam.write( - 'DATA(BINARY) {0:5d} '.format(u) + f + ' REPLACE\n') - else: - f_nam.write('DATA {0:5d} '.format(u) + f + '\n') - - # write the external files - for b, u, f in zip(self.external_binflag, self.external_units, \ - self.external_fnames): - tag = "DATA" - if b: - tag = "DATA(BINARY)" - f_nam.write('{0:14s} {1:5d} {2}\n'.format(tag, u, f)) - - # write the output files - for u, f, b in zip(self.output_units, self.output_fnames, - self.output_binflag): - if u == 0: - continue - if b: - f_nam.write( - 'DATA(BINARY) {0:5d} '.format(u) + f + ' REPLACE\n') - else: - f_nam.write('DATA {0:5d} '.format(u) + f + '\n') - - f_nam.close() - return - - @staticmethod - def load(f, version='seawat', exe_name='swtv4', verbose=False, - model_ws='.', load_only=None): - """ - Load an existing model. - - Parameters - ---------- - f : string - Full path and name of SEAWAT name file. - - version : string - The version of SEAWAT (seawat) - (default is seawat) - - exe_name : string - The name of the executable to use if this loaded model is run. - (default is swtv4.exe) - - verbose : bool - Write information on the load process if True. - (default is False) - - model_ws : string - The path for the model workspace. - (default is the current working directory '.') - - load_only : list of strings - Filetype(s) to load (e.g. ['lpf', 'adv']) - (default is None, which means that all will be loaded) - - Returns - ------- - m : flopy.seawat.swt.Seawat - flopy Seawat model object - - Examples - -------- - - >>> import flopy - >>> m = flopy.seawat.swt.Seawat.load(f) - - """ - # test if name file is passed with extension (i.e., is a valid file) - if os.path.isfile(os.path.join(model_ws, f)): - modelname = f.rpartition('.')[0] - else: - modelname = f - - # create instance of a seawat model and load modflow and mt3dms models - ms = Seawat(modelname=modelname, namefile_ext='nam', - modflowmodel=None, mt3dmodel=None, - version=version, exe_name=exe_name, model_ws=model_ws, - verbose=verbose) - - mf = Modflow.load(f, version='mf2k', exe_name=None, verbose=verbose, - model_ws=model_ws, load_only=load_only, - forgive=False, check=False) - - mt = Mt3dms.load(f, version='mt3dms', exe_name=None, verbose=verbose, - model_ws=model_ws, forgive=False) - - # set listing and global files using mf objects - ms.lst = mf.lst - ms.glo = mf.glo - - for p in mf.packagelist: - p.parent = ms - ms.add_package(p) - ms._mt = None - if mt is not None: - for p in mt.packagelist: - p.parent = ms - ms.add_package(p) - mt.external_units = [] - mt.external_binflag = [] - mt.external_fnames = [] - ms._mt = mt - ms._mf = mf - - # return model object - return ms +import os +from ..mbase import BaseModel +from ..pakbase import Package +from ..modflow import Modflow +from ..mt3d import Mt3dms +from .swtvdf import SeawatVdf +from .swtvsc import SeawatVsc +from ..discretization.structuredgrid import StructuredGrid +from flopy.discretization.modeltime import ModelTime + + +class SeawatList(Package): + """ + List Package class + """ + + def __init__(self, model, extension='list', listunit=7): + Package.__init__(self, model, extension, 'LIST', listunit) + return + + def __repr__(self): + return 'List package class' + + def write_file(self): + # Not implemented for list class + return + + +class Seawat(BaseModel): + """ + SEAWAT Model Class. + + Parameters + ---------- + modelname : string, optional + Name of model. This string will be used to name the SEAWAT input + that are created with write_model. (the default is 'swttest') + namefile_ext : string, optional + Extension for the namefile (the default is 'nam') + version : string, optional + Version of SEAWAT to use (the default is 'seawat'). + exe_name : string, optional + The name of the executable to use (the default is + 'swtv4.exe'). + listunit : integer, optional + Unit number for the list file (the default is 2). + model_ws : string, optional + model workspace. Directory name to create model data sets. + (default is the present working directory). + external_path : string + Location for external files (default is None). + verbose : boolean, optional + Print additional information to the screen (default is False). + load : boolean, optional + (default is True). + silent : integer + (default is 0) + + Attributes + ---------- + + Methods + ------- + + See Also + -------- + + Notes + ----- + + Examples + -------- + + >>> import flopy + >>> m = flopy.seawat.swt.Seawat() + + """ + + def __init__(self, modelname='swttest', namefile_ext='nam', + modflowmodel=None, mt3dmodel=None, + version='seawat', exe_name='swtv4', + structured=True, listunit=2, model_ws='.', external_path=None, + verbose=False, load=True, silent=0): + + # Call constructor for parent object + BaseModel.__init__(self, modelname, namefile_ext, exe_name, model_ws, + structured=structured, verbose=verbose) + + # Set attributes + self.version_types = {'seawat': 'SEAWAT'} + self.set_version(version) + self.lst = SeawatList(self, listunit=listunit) + self.glo = None + self._mf = None + self._mt = None + + # If a MODFLOW model was passed in, then add its packages + self.mf = self + if modflowmodel is not None: + for p in modflowmodel.packagelist: + self.packagelist.append(p) + self._modelgrid = modflowmodel.modelgrid + else: + modflowmodel = Modflow() + + # If a MT3D model was passed in, then add its packages + if mt3dmodel is not None: + for p in mt3dmodel.packagelist: + self.packagelist.append(p) + else: + mt3dmodel = Mt3dms() + + # external option stuff + self.array_free_format = False + self.array_format = 'mt3d' + self.external_fnames = [] + self.external_units = [] + self.external_binflag = [] + self.external = False + self.load = load + # the starting external data unit number + self._next_ext_unit = 3000 + if external_path is not None: + assert model_ws == '.', "ERROR: external cannot be used " + \ + "with model_ws" + + # external_path = os.path.join(model_ws, external_path) + if os.path.exists(external_path): + print("Note: external_path " + str(external_path) + + " already exists") + # assert os.path.exists(external_path),'external_path does not exist' + else: + os.mkdir(external_path) + self.external = True + self.external_path = external_path + self.verbose = verbose + self.silent = silent + + # Create a dictionary to map package with package object. + # This is used for loading models. + self.mfnam_packages = {} + for k, v in modflowmodel.mfnam_packages.items(): + self.mfnam_packages[k] = v + for k, v in mt3dmodel.mfnam_packages.items(): + self.mfnam_packages[k] = v + self.mfnam_packages['vdf'] = SeawatVdf + self.mfnam_packages['vsc'] = SeawatVsc + return + + @property + def modeltime(self): + # build model time + data_frame = {'perlen': self.dis.perlen.array, + 'nstp': self.dis.nstp.array, + 'tsmult': self.dis.tsmult.array} + self._model_time = ModelTime(data_frame, + self.dis.itmuni_dict[self.dis.itmuni], + self.dis.start_datetime, + self.dis.steady.array) + return self._model_time + + @property + def modelgrid(self): + if not self._mg_resync: + return self._modelgrid + + if self.has_package('bas6'): + ibound = self.bas6.ibound.array + else: + ibound = None + # build grid + # self.dis should exist if modflow model passed + self._modelgrid = StructuredGrid(self.dis.delc.array, + self.dis.delr.array, + self.dis.top.array, + self.dis.botm.array, + idomain=ibound, + lenuni=self.dis.lenuni, + proj4=self._modelgrid.proj4, + epsg=self._modelgrid.epsg, + xoff=self._modelgrid.xoffset, + yoff=self._modelgrid.yoffset, + angrot=self._modelgrid.angrot, + nlay=self.dis.nlay) + + # resolve offsets + xoff = self._modelgrid.xoffset + if xoff is None: + if self._xul is not None: + xoff = self._modelgrid._xul_to_xll(self._xul) + else: + xoff = 0.0 + yoff = self._modelgrid.yoffset + if yoff is None: + if self._yul is not None: + yoff = self._modelgrid._yul_to_yll(self._yul) + else: + yoff = 0.0 + self._modelgrid.set_coord_info(xoff, yoff, self._modelgrid.angrot, + self._modelgrid.epsg, + self._modelgrid.proj4) + self._mg_resync = not self._modelgrid.is_complete + return self._modelgrid + + @property + def nlay(self): + if (self.dis): + return self.dis.nlay + else: + return 0 + + @property + def nrow(self): + if (self.dis): + return self.dis.nrow + else: + return 0 + + @property + def ncol(self): + if (self.dis): + return self.dis.ncol + else: + return 0 + + @property + def nper(self): + if (self.dis): + return self.dis.nper + else: + return 0 + + @property + def nrow_ncol_nlay_nper(self): + dis = self.get_package('DIS') + if (dis): + return dis.nrow, dis.ncol, dis.nlay, dis.nper + else: + return 0, 0, 0, 0 + + def get_nrow_ncol_nlay_nper(self): + return self.nrow_ncol_nlay_nper + + def get_ifrefm(self): + bas = self.get_package('BAS6') + if (bas): + return bas.ifrefm + else: + return False + + @property + def ncomp(self): + if (self.btn): + return self.btn.ncomp + else: + return 1 + + @property + def mcomp(self): + if (self.btn): + return self.btn.mcomp + else: + return 1 + + def _set_name(self, value): + # Overrides BaseModel's setter for name property + BaseModel._set_name(self, value) + + # for i in range(len(self.lst.extension)): + # self.lst.file_name[i] = self.name + '.' + self.lst.extension[i] + # return + + def change_model_ws(self, new_pth=None, reset_external=False): + # if hasattr(self,"_mf"): + if self._mf is not None: + self._mf.change_model_ws(new_pth=new_pth, + reset_external=reset_external) + # if hasattr(self,"_mt"): + if self._mt is not None: + self._mt.change_model_ws(new_pth=new_pth, + reset_external=reset_external) + super(Seawat, self).change_model_ws(new_pth=new_pth, + reset_external=reset_external) + + def write_name_file(self): + """ + Write the name file + + Returns + ------- + None + + """ + # open and write header + fn_path = os.path.join(self.model_ws, self.namefile) + f_nam = open(fn_path, 'w') + f_nam.write('{}\n'.format(self.heading)) + + # Write global file entry + if self.glo is not None: + if self.glo.unit_number[0] > 0: + f_nam.write('{:14s} {:5d} {}\n'.format(self.glo.name[0], + self.glo.unit_number[ + 0], + self.glo.file_name[0])) + # Write list file entry + f_nam.write('{:14s} {:5d} {}\n'.format(self.lst.name[0], + self.lst.unit_number[0], + self.lst.file_name[0])) + + # Write SEAWAT entries and close + f_nam.write('{}'.format(self.get_name_file_entries())) + + if self._mf is not None: + # write the external files + for b, u, f in zip(self._mf.external_binflag, + self._mf.external_units, \ + self._mf.external_fnames): + tag = "DATA" + if b: + tag = "DATA(BINARY)" + f_nam.write('{0:14s} {1:5d} {2}\n'.format(tag, u, f)) + + # write the output files + for u, f, b in zip(self._mf.output_units, self._mf.output_fnames, + self._mf.output_binflag): + if u == 0: + continue + if b: + f_nam.write( + 'DATA(BINARY) {0:5d} '.format(u) + f + ' REPLACE\n') + else: + f_nam.write('DATA {0:5d} '.format(u) + f + '\n') + + if self._mt is not None: + # write the external files + for b, u, f in zip(self._mt.external_binflag, + self._mt.external_units, \ + self._mt.external_fnames): + tag = "DATA" + if b: + tag = "DATA(BINARY)" + f_nam.write('{0:14s} {1:5d} {2}\n'.format(tag, u, f)) + + # write the output files + for u, f, b in zip(self._mt.output_units, self._mt.output_fnames, + self._mt.output_binflag): + if u == 0: + continue + if b: + f_nam.write( + 'DATA(BINARY) {0:5d} '.format(u) + f + ' REPLACE\n') + else: + f_nam.write('DATA {0:5d} '.format(u) + f + '\n') + + # write the external files + for b, u, f in zip(self.external_binflag, self.external_units, \ + self.external_fnames): + tag = "DATA" + if b: + tag = "DATA(BINARY)" + f_nam.write('{0:14s} {1:5d} {2}\n'.format(tag, u, f)) + + # write the output files + for u, f, b in zip(self.output_units, self.output_fnames, + self.output_binflag): + if u == 0: + continue + if b: + f_nam.write( + 'DATA(BINARY) {0:5d} '.format(u) + f + ' REPLACE\n') + else: + f_nam.write('DATA {0:5d} '.format(u) + f + '\n') + + f_nam.close() + return + + @staticmethod + def load(f, version='seawat', exe_name='swtv4', verbose=False, + model_ws='.', load_only=None): + """ + Load an existing model. + + Parameters + ---------- + f : string + Full path and name of SEAWAT name file. + + version : string + The version of SEAWAT (seawat) + (default is seawat) + + exe_name : string + The name of the executable to use if this loaded model is run. + (default is swtv4.exe) + + verbose : bool + Write information on the load process if True. + (default is False) + + model_ws : string + The path for the model workspace. + (default is the current working directory '.') + + load_only : list of strings + Filetype(s) to load (e.g. ['lpf', 'adv']) + (default is None, which means that all will be loaded) + + Returns + ------- + m : flopy.seawat.swt.Seawat + flopy Seawat model object + + Examples + -------- + + >>> import flopy + >>> m = flopy.seawat.swt.Seawat.load(f) + + """ + # test if name file is passed with extension (i.e., is a valid file) + if os.path.isfile(os.path.join(model_ws, f)): + modelname = f.rpartition('.')[0] + else: + modelname = f + + # create instance of a seawat model and load modflow and mt3dms models + ms = Seawat(modelname=modelname, namefile_ext='nam', + modflowmodel=None, mt3dmodel=None, + version=version, exe_name=exe_name, model_ws=model_ws, + verbose=verbose) + + mf = Modflow.load(f, version='mf2k', exe_name=None, verbose=verbose, + model_ws=model_ws, load_only=load_only, + forgive=False, check=False) + + mt = Mt3dms.load(f, version='mt3dms', exe_name=None, verbose=verbose, + model_ws=model_ws, forgive=False) + + # set listing and global files using mf objects + ms.lst = mf.lst + ms.glo = mf.glo + + for p in mf.packagelist: + p.parent = ms + ms.add_package(p) + ms._mt = None + if mt is not None: + for p in mt.packagelist: + p.parent = ms + ms.add_package(p) + mt.external_units = [] + mt.external_binflag = [] + mt.external_fnames = [] + ms._mt = mt + ms._mf = mf + + # return model object + return ms diff --git a/flopy/seawat/swtvdf.py b/flopy/seawat/swtvdf.py index 878eb5f1a5..3f732438d2 100644 --- a/flopy/seawat/swtvdf.py +++ b/flopy/seawat/swtvdf.py @@ -1,507 +1,507 @@ -import sys -import numpy as np -from ..pakbase import Package -from ..utils import Util2d, Util3d -from ..utils.util_array import Transient3d - - -class SeawatVdf(Package): - """ - SEAWAT Variable-Density Flow Package Class. - - Parameters - ---------- - model : model object - The model object (of type :class:`flopy.seawat.swt.Seawat`) to which - this package will be added. - mtdnconc (or mt3drhoflg) : int - is the MT3DMS species number that will be used in the equation of - state to compute fluid density. This input variable was formerly - referred to as MTDNCONC (Langevin and others, 2003). - If MT3DRHOFLG = 0, fluid density is specified using items 6 and 7, - and flow will be uncoupled with transport if the IMT Process is active. - If MT3DRHOFLG > 0, fluid density is calculated using the MT3DMS - species number that corresponds with MT3DRHOFLG. A value for - MT3DRHOFLG greater than zero indicates that flow will be coupled with - transport. - If MT3DRHOFLG = -1, fluid density is calculated using one or more - MT3DMS species. Items 4a, 4b, and 4c will be read instead of item 4. - The dependence of fluid density on pressure head can only be activated - when MT3DRHOFLG = -1. A value for MT3DRHOFLG of -1 indicates that flow - will be coupled with transport. - mfnadvfd : int - is a flag that determines the method for calculating the internodal - density values used to conserve fluid mass. - If MFNADVFD = 2, internodal conductance values used to conserve fluid - mass are calculated using a central-in-space algorithm. - If MFNADVFD <> 2, internodal conductance values used to conserve fluid - mass are calculated using an upstream-weighted algorithm. - nswtcpl : int - is a flag used to determine the flow and transport coupling procedure. - If NSWTCPL = 0 or 1, flow and transport will be explicitly coupled - using a one-timestep lag. The explicit coupling option is normally - much faster than the iterative option and is recommended for most - applications. - If NSWTCPL > 1, NSWTCPL is the maximum number of non-linear coupling - iterations for the flow and transport solutions. SEAWAT-2000 will stop - execution after NSWTCPL iterations if convergence between flow and - transport has not occurred. - If NSWTCPL = -1, the flow solution will be recalculated only for: The - first transport step of the simulation, or - The last transport step of the MODFLOW timestep, or - The maximum density change at a cell is greater than DNSCRIT. - iwtable : int - is a flag used to activate the variable-density water-table corrections - (Guo and Langevin, 2002, eq. 82). If IWTABLE = 0, the water-table - correction will not be applied. - If IWTABLE > 0, the water-table correction will be applied. - densemin : float - is the minimum fluid density. If the resulting density value - calculated with the equation of state is less than DENSEMIN, the - density value is set to DENSEMIN. - If DENSEMIN = 0, the computed fluid density is not limited by - DENSEMIN (this is the option to use for most simulations). - If DENSEMIN > 0, a computed fluid density less than DENSEMIN is - automatically reset to DENSEMIN. - densemax : float - is the maximum fluid density. If the resulting density value - calculated with the equation of state is greater than DENSEMAX, the - density value is set to DENSEMAX. - If DENSEMAX = 0, the computed fluid density is not limited by - DENSEMAX (this is the option to use for most simulations). - If DENSEMAX > 0, a computed fluid density larger than DENSEMAX is - automatically reset to DENSEMAX. - dnscrit : float - is a user-specified density value. If NSWTCPL is greater than 1, - DNSCRIT is the convergence crite- rion, in units of fluid density, - for convergence between flow and transport. If the maximum fluid - density difference between two consecutive implicit coupling - iterations is not less than DNSCRIT, the program will continue to - iterate on the flow and transport equations, or will terminate if - NSWTCPL is reached. If NSWTCPL is -1, DNSCRIT is the maximum density - threshold, in units of fluid density. If the fluid density change - (between the present transport timestep and the last flow solution) at - one or more cells is greater than DNSCRIT, then SEAWAT_V4 will update - the flow field (by solving the flow equation with the updated density - field). - denseref : float - is the fluid density at the reference concentration, temperature, and - pressure. For most simulations, DENSEREF is specified as the density - of freshwater at 25 degrees C and at a reference pressure of zero. - drhodc : float - formerly referred to as DENSESLP (Langevin and others, 2003), is the - slope of the linear equation of state that relates fluid density to - solute concentration. In SEAWAT_V4, separate values for DRHODC can be - entered for as many MT3DMS species as desired. If DRHODC is not - specified for a species, then that species does not affect fluid - density. Any measurement unit can be used for solute concentration, - provided DENSEREF and DRHODC are set properly. DRHODC can be - approximated by the user by dividing the density difference over the - range of end- member fluids by the difference in concentration between - the end-member fluids. - drhodprhd : float - is the slope of the linear equation of state that relates fluid - density to the height of the pressure head (in terms of the reference - density). Note that DRHODPRHD can be calculated from the volumetric - expansion coefficient for pressure using equation 15. If the - simulation is formulated in terms of kilograms and meters, DRHODPRHD - has an approximate value of 4.46 x 10-3 kg/m4. A value of zero, which - is typically used for most problems, inactivates the dependence of - fluid density on pressure. - prhdref : float - is the reference pressure head. This value should normally be set to - zero. - nsrhoeos : int - is the number of MT3DMS species to be used in the equation of state - for fluid density. This value is read only if MT3DRHOFLG = -1. - mtrhospec : int - is the MT3DMS species number corresponding to the adjacent DRHODC and - CRHOREF. - crhoref : float - is the reference concentration (C0) for species, MTRHOSPEC. For most - simulations, CRHOREF should be specified as zero. If MT3DRHOFLG > 0, - CRHOREF is assumed to equal zero (as was done in previous versions of - SEAWAT). - firstdt : float - is the length of the first transport timestep used to start the - simulation if both of the following two condi- tions are met: - 1. The IMT Process is active, and 2. transport timesteps are - calculated as a function of the user-specified Courant number (the - MT3DMS input variable, PERCEL, is greater than zero). - indense : int - is a flag. INDENSE is read only if MT3DRHOFLG is equal to zero. - If INDENSE < 0, values for the DENSE array will be reused from the - previous stress period. If it is the first stress period, values for - the DENSE array will be set to DENSEREF. - If INDENSE = 0, values for the DENSE array will be set to DENSEREF. - If INDENSE >= 1, values for the DENSE array will be read from item 7. - If INDENSE = 2, values read for the DENSE array are assumed to - represent solute concentration, and will be converted to density - values using the equation of state. - dense : Transient3d - A float or array of floats (nlay, nrow, ncol) should be assigned as - values to a dictionary related to keys of period number. dense - is the fluid density array read for each layer using the MODFLOW-2000 - U2DREL array reader. The DENSE array is read only if MT3DRHOFLG is - equal to zero. The DENSE array may also be entered in terms of solute - concentration, or any other units, if INDENSE is set to 2 and the - constants used in the density equation of state are specified - appropriately. - extension : string - Filename extension (default is 'vdf') - unitnumber : int - File unit number (default is 37). - - Attributes - ---------- - - Methods - ------- - - See Also - -------- - - Notes - ----- - In swt_4 mtdnconc became mt3drhoflg. If the latter one is defined in - kwargs, it will overwrite mtdnconc. Same goes for denseslp, which has - become drhodc. - - When loading an existing SEAWAT model that has DENSE specified as - concentrations, the load process will convert those concentrations into - density values using the equation of state. This is only relevant when - mtdnconc (or mt3drhoflg) is set to zero. - - Examples - -------- - - >>> import flopy - >>> m = flopy.seawat.Seawat() - >>> lpf = flopy.seawat.SeawatVdf(m) - - """ - unitnumber = 37 - - def __init__(self, model, mtdnconc=1, mfnadvfd=1, nswtcpl=1, iwtable=1, - densemin=0, densemax=0, dnscrit=1e-2, denseref=1.000, - denseslp=.025, crhoref=0, firstdt=0.001, indense=1, - dense=1.000, nsrhoeos=1, drhodprhd=4.46e-3, prhdref=0., - extension='vdf', unitnumber=None, filenames=None, **kwargs): - - if unitnumber is None: - unitnumber = SeawatVdf.defaultunit() - - # set filenames - if filenames is None: - filenames = [None] - elif isinstance(filenames, str): - filenames = [filenames] - - # Fill namefile items - name = [SeawatVdf.ftype()] - units = [unitnumber] - extra = [''] - - # set package name - fname = [filenames[0]] - - # Call ancestor's init to set self.parent, extension, name and unit number - Package.__init__(self, model, extension=extension, name=name, - unit_number=units, extra=extra, filenames=fname) - - nrow, ncol, nlay, nper = self.parent.nrow_ncol_nlay_nper - - self.mtdnconc = kwargs.pop('mt3drhoflg', mtdnconc) - self.mfnadvfd = mfnadvfd - self.nswtcpl = nswtcpl - self.iwtable = iwtable - self.densemin = densemin - self.densemax = densemax - self.dnscrit = dnscrit - self.nsrhoeos = nsrhoeos - self.denseref = denseref - self.denseslp = kwargs.pop('drhodc', denseslp) - self.crhoref = crhoref - self.drhodprhd = drhodprhd - self.prhdref = prhdref - self.firstdt = firstdt - self.indense = indense - if self.mtdnconc == 0: - self.dense = Transient3d(model, (nlay, nrow, ncol), np.float32, - dense, name='dense_', - locat=self.unit_number[0]) - else: - # dense not needed for most cases so setting to None - self.dense = None - self.parent.add_package(self) - return - - def write_file(self): - """ - Write the package file - - Returns - ------- - None - - """ - f_vdf = open(self.fn_path, 'w') - - # item 1 - f_vdf.write('%10i%10i%10i%10i\n' % (self.mtdnconc, self.mfnadvfd, - self.nswtcpl, self.iwtable)) - - # item 2 - f_vdf.write('%10.4f%10.4f\n' % (self.densemin, self.densemax)) - - # item 3 - if (self.nswtcpl > 1 or self.nswtcpl == -1): - f_vdf.write('%10f\n' % (self.dnscrit)) - - # item 4 - if self.mtdnconc >= 0: - if self.nsrhoeos == 1: - f_vdf.write('%10.4f%10.4f\n' % (self.denseref, self.denseslp)) - else: - f_vdf.write('%10.4f%10.4f\n' % (self.denseref, - self.denseslp[0])) - - elif self.mtdnconc == -1: - f_vdf.write('%10.4f%10.4f%10.4f\n' % (self.denseref, - self.drhodprhd, - self.prhdref)) - f_vdf.write('%10i\n' % self.nsrhoeos) - if self.nsrhoeos == 1: - f_vdf.write('%10i%10.4f%10.4f\n' % (1, self.denseslp, - self.crhoref)) - else: - for i in range(self.nsrhoeos): - mtrhospec = 1 + i - f_vdf.write('%10i%10.4f%10.4f\n' % (mtrhospec, - self.denseslp[i], - self.crhoref[i])) - - # item 5 - f_vdf.write('%10f\n' % (self.firstdt)) - - # Transient DENSE array - if self.mtdnconc == 0: - - nrow, ncol, nlay, nper = self.parent.nrow_ncol_nlay_nper - for kper in range(nper): - - itmp, file_entry_dense = self.dense.get_kper_entry(kper) - - # item 6 (and possibly 7) - if itmp > 0: - f_vdf.write('%10i\n' % (self.indense)) - f_vdf.write(file_entry_dense) - - else: - f_vdf.write('%10i\n' % (itmp)) - - f_vdf.close() - return - - @staticmethod - def load(f, model, nper=None, ext_unit_dict=None): - """ - Load an existing package. - - Parameters - ---------- - f : filename or file handle - File to load. - model : model object - The model object (of type :class:`flopy.seawat.swt.Seawat`) to - which this package will be added. - nper : int - The number of stress periods. If nper is None, then nper will be - obtained from the model object. (default is None). - ext_unit_dict : dictionary, optional - If the arrays in the file are specified using EXTERNAL, - or older style array control records, then `f` should be a file - handle. In this case ext_unit_dict is required, which can be - constructed using the function - :class:`flopy.utils.mfreadnam.parsenamefile`. - - Returns - ------- - vdf : SeawatVdf object - SeawatVdf object. - - Examples - -------- - - >>> import flopy - >>> mf = flopy.modflow.Modflow() - >>> dis = flopy.modflow.ModflowDis(mf) - >>> mt = flopy.mt3d.Mt3dms() - >>> swt = flopy.seawat.Seawat(modflowmodel=mf, mt3dmsmodel=mt) - >>> vdf = flopy.seawat.SeawatVdf.load('test.vdf', m) - - """ - - if model.verbose: - sys.stdout.write('loading vdf package file...\n') - - # Open file, if necessary - openfile = not hasattr(f, 'read') - if openfile: - filename = f - f = open(filename, 'r') - - # Dataset 0 -- comment line - while True: - line = f.readline() - if line[0] != '#': - break - - # Determine problem dimensions - nrow, ncol, nlay, npertemp = model.get_nrow_ncol_nlay_nper() - if nper is None: - nper = npertemp - - # Item 1: MT3DRHOFLG MFNADVFD NSWTCPL IWTABLE - line already read above - if model.verbose: - print(' loading MT3DRHOFLG MFNADVFD NSWTCPL IWTABLE...') - t = line.strip().split() - mt3drhoflg = int(t[0]) - mfnadvfd = int(t[1]) - nswtcpl = int(t[2]) - iwtable = int(t[3]) - if model.verbose: - print(' MT3DRHOFLG {}'.format(mt3drhoflg)) - print(' MFNADVFD {}'.format(mfnadvfd)) - print(' NSWTCPL {}'.format(nswtcpl)) - print(' IWTABLE {}'.format(iwtable)) - - # Item 2 -- DENSEMIN DENSEMAX - if model.verbose: - print(' loading DENSEMIN DENSEMAX...') - line = f.readline() - t = line.strip().split() - densemin = float(t[0]) - densemax = float(t[1]) - - # Item 3 -- DNSCRIT - if model.verbose: - print(' loading DNSCRIT...') - dnscrit = None - if nswtcpl > 1 or nswtcpl == -1: - line = f.readline() - t = line.strip().split() - dnscrit = float(t[0]) - - # Item 4 -- DENSEREF DRHODC - drhodprhd = None - prhdref = None - nsrhoeos = None - mtrhospec = None - crhoref = None - if mt3drhoflg >= 0: - if model.verbose: - print(' loading DENSEREF DRHODC(1)...') - line = f.readline() - t = line.strip().split() - denseref = float(t[0]) - drhodc = float(t[1]) - nsrhoeos = 1 - else: - if model.verbose: - print(' loading DENSEREF DRHODPRHD PRHDREF...') - line = f.readline() - t = line.strip().split() - denseref = float(t[0]) - drhodprhd = float(t[1]) - prhdref = float(t[2]) - - if model.verbose: - print(' loading NSRHOEOS...') - line = f.readline() - t = line.strip().split() - nsrhoeos = int(t[0]) - - if model.verbose: - print(' loading MTRHOSPEC DRHODC CRHOREF...') - mtrhospec = [] - drhodc = [] - crhoref = [] - for i in range(nsrhoeos): - line = f.readline() - t = line.strip().split() - mtrhospec.append(int(t[0])) - drhodc.append(float(t[1])) - crhoref.append(float(t[2])) - - # Item 5 -- FIRSTDT - if model.verbose: - print(' loading FIRSTDT...') - line = f.readline() - t = line.strip().split() - firstdt = float(t[0]) - - # Items 6 and 7 -- INDENSE DENSE - indense = None - dense = None - if mt3drhoflg == 0: - - # Create dense as a Transient3D record - dense = {} - - for iper in range(nper): - - if model.verbose: - print(' loading INDENSE ' - 'for stress period {}...'.format(iper + 1)) - line = f.readline() - t = line.strip().split() - indense = int(t[0]) - - if indense > 0: - name = 'DENSE_StressPeriod_{}'.format(iper) - t = Util3d.load(f, model, (nlay, nrow, ncol), - np.float32, name, ext_unit_dict) - if indense == 2: - t = t.array - t = denseref + drhodc * t - t = Util3d(model, (nlay, nrow, ncol), np.float32, t, - name, ext_unit_dict=ext_unit_dict) - dense[iper] = t - - dense = Transient3d(model, (nlay, nrow, ncol), np.float32, - dense, name='dense_') - - # Set indense = 1 because all concentrations converted to density - indense = 1 - - if openfile: - f.close() - - # set package unit number - unitnumber = None - filenames = [None] - if ext_unit_dict is not None: - unitnumber, filenames[0] = \ - model.get_ext_dict_attr(ext_unit_dict, - filetype=SeawatVdf.ftype()) - - # Construct and return vdf package - vdf = SeawatVdf(model, mt3drhoflg=mt3drhoflg, mfnadvfd=mfnadvfd, - nswtcpl=nswtcpl, iwtable=iwtable, - densemin=densemin, densemax=densemax, - dnscrit=dnscrit, denseref=denseref, drhodc=drhodc, - drhodprhd=drhodprhd, prhdref=prhdref, - nsrhoeos=nsrhoeos, mtrhospec=mtrhospec, - crhoref=crhoref, firstdt=firstdt, indense=indense, - dense=dense, - unitnumber=unitnumber, filenames=filenames) - return vdf - - @staticmethod - def ftype(): - return 'VDF' - - @staticmethod - def defaultunit(): - return 37 +import sys +import numpy as np +from ..pakbase import Package +from ..utils import Util2d, Util3d +from ..utils.util_array import Transient3d + + +class SeawatVdf(Package): + """ + SEAWAT Variable-Density Flow Package Class. + + Parameters + ---------- + model : model object + The model object (of type :class:`flopy.seawat.swt.Seawat`) to which + this package will be added. + mtdnconc (or mt3drhoflg) : int + is the MT3DMS species number that will be used in the equation of + state to compute fluid density. This input variable was formerly + referred to as MTDNCONC (Langevin and others, 2003). + If MT3DRHOFLG = 0, fluid density is specified using items 6 and 7, + and flow will be uncoupled with transport if the IMT Process is active. + If MT3DRHOFLG > 0, fluid density is calculated using the MT3DMS + species number that corresponds with MT3DRHOFLG. A value for + MT3DRHOFLG greater than zero indicates that flow will be coupled with + transport. + If MT3DRHOFLG = -1, fluid density is calculated using one or more + MT3DMS species. Items 4a, 4b, and 4c will be read instead of item 4. + The dependence of fluid density on pressure head can only be activated + when MT3DRHOFLG = -1. A value for MT3DRHOFLG of -1 indicates that flow + will be coupled with transport. + mfnadvfd : int + is a flag that determines the method for calculating the internodal + density values used to conserve fluid mass. + If MFNADVFD = 2, internodal conductance values used to conserve fluid + mass are calculated using a central-in-space algorithm. + If MFNADVFD <> 2, internodal conductance values used to conserve fluid + mass are calculated using an upstream-weighted algorithm. + nswtcpl : int + is a flag used to determine the flow and transport coupling procedure. + If NSWTCPL = 0 or 1, flow and transport will be explicitly coupled + using a one-timestep lag. The explicit coupling option is normally + much faster than the iterative option and is recommended for most + applications. + If NSWTCPL > 1, NSWTCPL is the maximum number of non-linear coupling + iterations for the flow and transport solutions. SEAWAT-2000 will stop + execution after NSWTCPL iterations if convergence between flow and + transport has not occurred. + If NSWTCPL = -1, the flow solution will be recalculated only for: The + first transport step of the simulation, or + The last transport step of the MODFLOW timestep, or + The maximum density change at a cell is greater than DNSCRIT. + iwtable : int + is a flag used to activate the variable-density water-table corrections + (Guo and Langevin, 2002, eq. 82). If IWTABLE = 0, the water-table + correction will not be applied. + If IWTABLE > 0, the water-table correction will be applied. + densemin : float + is the minimum fluid density. If the resulting density value + calculated with the equation of state is less than DENSEMIN, the + density value is set to DENSEMIN. + If DENSEMIN = 0, the computed fluid density is not limited by + DENSEMIN (this is the option to use for most simulations). + If DENSEMIN > 0, a computed fluid density less than DENSEMIN is + automatically reset to DENSEMIN. + densemax : float + is the maximum fluid density. If the resulting density value + calculated with the equation of state is greater than DENSEMAX, the + density value is set to DENSEMAX. + If DENSEMAX = 0, the computed fluid density is not limited by + DENSEMAX (this is the option to use for most simulations). + If DENSEMAX > 0, a computed fluid density larger than DENSEMAX is + automatically reset to DENSEMAX. + dnscrit : float + is a user-specified density value. If NSWTCPL is greater than 1, + DNSCRIT is the convergence crite- rion, in units of fluid density, + for convergence between flow and transport. If the maximum fluid + density difference between two consecutive implicit coupling + iterations is not less than DNSCRIT, the program will continue to + iterate on the flow and transport equations, or will terminate if + NSWTCPL is reached. If NSWTCPL is -1, DNSCRIT is the maximum density + threshold, in units of fluid density. If the fluid density change + (between the present transport timestep and the last flow solution) at + one or more cells is greater than DNSCRIT, then SEAWAT_V4 will update + the flow field (by solving the flow equation with the updated density + field). + denseref : float + is the fluid density at the reference concentration, temperature, and + pressure. For most simulations, DENSEREF is specified as the density + of freshwater at 25 degrees C and at a reference pressure of zero. + drhodc : float + formerly referred to as DENSESLP (Langevin and others, 2003), is the + slope of the linear equation of state that relates fluid density to + solute concentration. In SEAWAT_V4, separate values for DRHODC can be + entered for as many MT3DMS species as desired. If DRHODC is not + specified for a species, then that species does not affect fluid + density. Any measurement unit can be used for solute concentration, + provided DENSEREF and DRHODC are set properly. DRHODC can be + approximated by the user by dividing the density difference over the + range of end- member fluids by the difference in concentration between + the end-member fluids. + drhodprhd : float + is the slope of the linear equation of state that relates fluid + density to the height of the pressure head (in terms of the reference + density). Note that DRHODPRHD can be calculated from the volumetric + expansion coefficient for pressure using equation 15. If the + simulation is formulated in terms of kilograms and meters, DRHODPRHD + has an approximate value of 4.46 x 10-3 kg/m4. A value of zero, which + is typically used for most problems, inactivates the dependence of + fluid density on pressure. + prhdref : float + is the reference pressure head. This value should normally be set to + zero. + nsrhoeos : int + is the number of MT3DMS species to be used in the equation of state + for fluid density. This value is read only if MT3DRHOFLG = -1. + mtrhospec : int + is the MT3DMS species number corresponding to the adjacent DRHODC and + CRHOREF. + crhoref : float + is the reference concentration (C0) for species, MTRHOSPEC. For most + simulations, CRHOREF should be specified as zero. If MT3DRHOFLG > 0, + CRHOREF is assumed to equal zero (as was done in previous versions of + SEAWAT). + firstdt : float + is the length of the first transport timestep used to start the + simulation if both of the following two condi- tions are met: + 1. The IMT Process is active, and 2. transport timesteps are + calculated as a function of the user-specified Courant number (the + MT3DMS input variable, PERCEL, is greater than zero). + indense : int + is a flag. INDENSE is read only if MT3DRHOFLG is equal to zero. + If INDENSE < 0, values for the DENSE array will be reused from the + previous stress period. If it is the first stress period, values for + the DENSE array will be set to DENSEREF. + If INDENSE = 0, values for the DENSE array will be set to DENSEREF. + If INDENSE >= 1, values for the DENSE array will be read from item 7. + If INDENSE = 2, values read for the DENSE array are assumed to + represent solute concentration, and will be converted to density + values using the equation of state. + dense : Transient3d + A float or array of floats (nlay, nrow, ncol) should be assigned as + values to a dictionary related to keys of period number. dense + is the fluid density array read for each layer using the MODFLOW-2000 + U2DREL array reader. The DENSE array is read only if MT3DRHOFLG is + equal to zero. The DENSE array may also be entered in terms of solute + concentration, or any other units, if INDENSE is set to 2 and the + constants used in the density equation of state are specified + appropriately. + extension : string + Filename extension (default is 'vdf') + unitnumber : int + File unit number (default is 37). + + Attributes + ---------- + + Methods + ------- + + See Also + -------- + + Notes + ----- + In swt_4 mtdnconc became mt3drhoflg. If the latter one is defined in + kwargs, it will overwrite mtdnconc. Same goes for denseslp, which has + become drhodc. + + When loading an existing SEAWAT model that has DENSE specified as + concentrations, the load process will convert those concentrations into + density values using the equation of state. This is only relevant when + mtdnconc (or mt3drhoflg) is set to zero. + + Examples + -------- + + >>> import flopy + >>> m = flopy.seawat.Seawat() + >>> lpf = flopy.seawat.SeawatVdf(m) + + """ + unitnumber = 37 + + def __init__(self, model, mtdnconc=1, mfnadvfd=1, nswtcpl=1, iwtable=1, + densemin=0, densemax=0, dnscrit=1e-2, denseref=1.000, + denseslp=.025, crhoref=0, firstdt=0.001, indense=1, + dense=1.000, nsrhoeos=1, drhodprhd=4.46e-3, prhdref=0., + extension='vdf', unitnumber=None, filenames=None, **kwargs): + + if unitnumber is None: + unitnumber = SeawatVdf.defaultunit() + + # set filenames + if filenames is None: + filenames = [None] + elif isinstance(filenames, str): + filenames = [filenames] + + # Fill namefile items + name = [SeawatVdf.ftype()] + units = [unitnumber] + extra = [''] + + # set package name + fname = [filenames[0]] + + # Call ancestor's init to set self.parent, extension, name and unit number + Package.__init__(self, model, extension=extension, name=name, + unit_number=units, extra=extra, filenames=fname) + + nrow, ncol, nlay, nper = self.parent.nrow_ncol_nlay_nper + + self.mtdnconc = kwargs.pop('mt3drhoflg', mtdnconc) + self.mfnadvfd = mfnadvfd + self.nswtcpl = nswtcpl + self.iwtable = iwtable + self.densemin = densemin + self.densemax = densemax + self.dnscrit = dnscrit + self.nsrhoeos = nsrhoeos + self.denseref = denseref + self.denseslp = kwargs.pop('drhodc', denseslp) + self.crhoref = crhoref + self.drhodprhd = drhodprhd + self.prhdref = prhdref + self.firstdt = firstdt + self.indense = indense + if self.mtdnconc == 0: + self.dense = Transient3d(model, (nlay, nrow, ncol), np.float32, + dense, name='dense_', + locat=self.unit_number[0]) + else: + # dense not needed for most cases so setting to None + self.dense = None + self.parent.add_package(self) + return + + def write_file(self): + """ + Write the package file + + Returns + ------- + None + + """ + f_vdf = open(self.fn_path, 'w') + + # item 1 + f_vdf.write('%10i%10i%10i%10i\n' % (self.mtdnconc, self.mfnadvfd, + self.nswtcpl, self.iwtable)) + + # item 2 + f_vdf.write('%10.4f%10.4f\n' % (self.densemin, self.densemax)) + + # item 3 + if (self.nswtcpl > 1 or self.nswtcpl == -1): + f_vdf.write('%10f\n' % (self.dnscrit)) + + # item 4 + if self.mtdnconc >= 0: + if self.nsrhoeos == 1: + f_vdf.write('%10.4f%10.4f\n' % (self.denseref, self.denseslp)) + else: + f_vdf.write('%10.4f%10.4f\n' % (self.denseref, + self.denseslp[0])) + + elif self.mtdnconc == -1: + f_vdf.write('%10.4f%10.4f%10.4f\n' % (self.denseref, + self.drhodprhd, + self.prhdref)) + f_vdf.write('%10i\n' % self.nsrhoeos) + if self.nsrhoeos == 1: + f_vdf.write('%10i%10.4f%10.4f\n' % (1, self.denseslp, + self.crhoref)) + else: + for i in range(self.nsrhoeos): + mtrhospec = 1 + i + f_vdf.write('%10i%10.4f%10.4f\n' % (mtrhospec, + self.denseslp[i], + self.crhoref[i])) + + # item 5 + f_vdf.write('%10f\n' % (self.firstdt)) + + # Transient DENSE array + if self.mtdnconc == 0: + + nrow, ncol, nlay, nper = self.parent.nrow_ncol_nlay_nper + for kper in range(nper): + + itmp, file_entry_dense = self.dense.get_kper_entry(kper) + + # item 6 (and possibly 7) + if itmp > 0: + f_vdf.write('%10i\n' % (self.indense)) + f_vdf.write(file_entry_dense) + + else: + f_vdf.write('%10i\n' % (itmp)) + + f_vdf.close() + return + + @staticmethod + def load(f, model, nper=None, ext_unit_dict=None): + """ + Load an existing package. + + Parameters + ---------- + f : filename or file handle + File to load. + model : model object + The model object (of type :class:`flopy.seawat.swt.Seawat`) to + which this package will be added. + nper : int + The number of stress periods. If nper is None, then nper will be + obtained from the model object. (default is None). + ext_unit_dict : dictionary, optional + If the arrays in the file are specified using EXTERNAL, + or older style array control records, then `f` should be a file + handle. In this case ext_unit_dict is required, which can be + constructed using the function + :class:`flopy.utils.mfreadnam.parsenamefile`. + + Returns + ------- + vdf : SeawatVdf object + SeawatVdf object. + + Examples + -------- + + >>> import flopy + >>> mf = flopy.modflow.Modflow() + >>> dis = flopy.modflow.ModflowDis(mf) + >>> mt = flopy.mt3d.Mt3dms() + >>> swt = flopy.seawat.Seawat(modflowmodel=mf, mt3dmsmodel=mt) + >>> vdf = flopy.seawat.SeawatVdf.load('test.vdf', m) + + """ + + if model.verbose: + sys.stdout.write('loading vdf package file...\n') + + # Open file, if necessary + openfile = not hasattr(f, 'read') + if openfile: + filename = f + f = open(filename, 'r') + + # Dataset 0 -- comment line + while True: + line = f.readline() + if line[0] != '#': + break + + # Determine problem dimensions + nrow, ncol, nlay, npertemp = model.get_nrow_ncol_nlay_nper() + if nper is None: + nper = npertemp + + # Item 1: MT3DRHOFLG MFNADVFD NSWTCPL IWTABLE - line already read above + if model.verbose: + print(' loading MT3DRHOFLG MFNADVFD NSWTCPL IWTABLE...') + t = line.strip().split() + mt3drhoflg = int(t[0]) + mfnadvfd = int(t[1]) + nswtcpl = int(t[2]) + iwtable = int(t[3]) + if model.verbose: + print(' MT3DRHOFLG {}'.format(mt3drhoflg)) + print(' MFNADVFD {}'.format(mfnadvfd)) + print(' NSWTCPL {}'.format(nswtcpl)) + print(' IWTABLE {}'.format(iwtable)) + + # Item 2 -- DENSEMIN DENSEMAX + if model.verbose: + print(' loading DENSEMIN DENSEMAX...') + line = f.readline() + t = line.strip().split() + densemin = float(t[0]) + densemax = float(t[1]) + + # Item 3 -- DNSCRIT + if model.verbose: + print(' loading DNSCRIT...') + dnscrit = None + if nswtcpl > 1 or nswtcpl == -1: + line = f.readline() + t = line.strip().split() + dnscrit = float(t[0]) + + # Item 4 -- DENSEREF DRHODC + drhodprhd = None + prhdref = None + nsrhoeos = None + mtrhospec = None + crhoref = None + if mt3drhoflg >= 0: + if model.verbose: + print(' loading DENSEREF DRHODC(1)...') + line = f.readline() + t = line.strip().split() + denseref = float(t[0]) + drhodc = float(t[1]) + nsrhoeos = 1 + else: + if model.verbose: + print(' loading DENSEREF DRHODPRHD PRHDREF...') + line = f.readline() + t = line.strip().split() + denseref = float(t[0]) + drhodprhd = float(t[1]) + prhdref = float(t[2]) + + if model.verbose: + print(' loading NSRHOEOS...') + line = f.readline() + t = line.strip().split() + nsrhoeos = int(t[0]) + + if model.verbose: + print(' loading MTRHOSPEC DRHODC CRHOREF...') + mtrhospec = [] + drhodc = [] + crhoref = [] + for i in range(nsrhoeos): + line = f.readline() + t = line.strip().split() + mtrhospec.append(int(t[0])) + drhodc.append(float(t[1])) + crhoref.append(float(t[2])) + + # Item 5 -- FIRSTDT + if model.verbose: + print(' loading FIRSTDT...') + line = f.readline() + t = line.strip().split() + firstdt = float(t[0]) + + # Items 6 and 7 -- INDENSE DENSE + indense = None + dense = None + if mt3drhoflg == 0: + + # Create dense as a Transient3D record + dense = {} + + for iper in range(nper): + + if model.verbose: + print(' loading INDENSE ' + 'for stress period {}...'.format(iper + 1)) + line = f.readline() + t = line.strip().split() + indense = int(t[0]) + + if indense > 0: + name = 'DENSE_StressPeriod_{}'.format(iper) + t = Util3d.load(f, model, (nlay, nrow, ncol), + np.float32, name, ext_unit_dict) + if indense == 2: + t = t.array + t = denseref + drhodc * t + t = Util3d(model, (nlay, nrow, ncol), np.float32, t, + name, ext_unit_dict=ext_unit_dict) + dense[iper] = t + + dense = Transient3d(model, (nlay, nrow, ncol), np.float32, + dense, name='dense_') + + # Set indense = 1 because all concentrations converted to density + indense = 1 + + if openfile: + f.close() + + # set package unit number + unitnumber = None + filenames = [None] + if ext_unit_dict is not None: + unitnumber, filenames[0] = \ + model.get_ext_dict_attr(ext_unit_dict, + filetype=SeawatVdf.ftype()) + + # Construct and return vdf package + vdf = SeawatVdf(model, mt3drhoflg=mt3drhoflg, mfnadvfd=mfnadvfd, + nswtcpl=nswtcpl, iwtable=iwtable, + densemin=densemin, densemax=densemax, + dnscrit=dnscrit, denseref=denseref, drhodc=drhodc, + drhodprhd=drhodprhd, prhdref=prhdref, + nsrhoeos=nsrhoeos, mtrhospec=mtrhospec, + crhoref=crhoref, firstdt=firstdt, indense=indense, + dense=dense, + unitnumber=unitnumber, filenames=filenames) + return vdf + + @staticmethod + def ftype(): + return 'VDF' + + @staticmethod + def defaultunit(): + return 37 diff --git a/flopy/utils/__init__.py b/flopy/utils/__init__.py index c6ba879600..b366412eba 100644 --- a/flopy/utils/__init__.py +++ b/flopy/utils/__init__.py @@ -1,49 +1,49 @@ -""" - the main entry point of utils - - Parameters - ---------- - - Attributes - ---------- - - Methods - ------- - - See Also - -------- - - Notes - ----- - - Examples - -------- - - """ -from .mfreadnam import parsenamefile -from .util_array import Util3d, Util2d, Transient2d, Transient3d, read1d -from .util_list import MfList -from .binaryfile import BinaryHeader, HeadFile, UcnFile, CellBudgetFile, \ - HeadUFile -from .formattedfile import FormattedHeadFile -from .modpathfile import PathlineFile, EndpointFile, TimeseriesFile -from .swroutputfile import SwrStage, SwrBudget, SwrFlow, SwrExchange, \ - SwrStructure -from .observationfile import HydmodObs, SwrObs, Mf6Obs -from .reference import SpatialReference, SpatialReferenceUnstructured, \ - crs, TemporalReference -from .mflistfile import MfListBudget, MfusgListBudget, SwtListBudget, \ - SwrListBudget, Mf6ListBudget -from .check import check, get_neighbors -from .utils_def import FlopyBinaryData, totim_to_datetime -from .flopy_io import read_fixed_var, write_fixed_var -from .zonbud import ZoneBudget, read_zbarray, write_zbarray, \ - ZoneBudgetOutput, ZBNetOutput -from .mfgrdfile import MfGrdFile -from .postprocessing import get_transmissivities -from .sfroutputfile import SfrFile -from .recarray_utils import create_empty_recarray, ra_slice -from .mtlistfile import MtListBudget -from .optionblock import OptionBlock -from .rasters import Raster +""" + the main entry point of utils + + Parameters + ---------- + + Attributes + ---------- + + Methods + ------- + + See Also + -------- + + Notes + ----- + + Examples + -------- + + """ +from .mfreadnam import parsenamefile +from .util_array import Util3d, Util2d, Transient2d, Transient3d, read1d +from .util_list import MfList +from .binaryfile import BinaryHeader, HeadFile, UcnFile, CellBudgetFile, \ + HeadUFile +from .formattedfile import FormattedHeadFile +from .modpathfile import PathlineFile, EndpointFile, TimeseriesFile +from .swroutputfile import SwrStage, SwrBudget, SwrFlow, SwrExchange, \ + SwrStructure +from .observationfile import HydmodObs, SwrObs, Mf6Obs +from .reference import SpatialReference, SpatialReferenceUnstructured, \ + crs, TemporalReference +from .mflistfile import MfListBudget, MfusgListBudget, SwtListBudget, \ + SwrListBudget, Mf6ListBudget +from .check import check, get_neighbors +from .utils_def import FlopyBinaryData, totim_to_datetime +from .flopy_io import read_fixed_var, write_fixed_var +from .zonbud import ZoneBudget, read_zbarray, write_zbarray, \ + ZoneBudgetOutput, ZBNetOutput +from .mfgrdfile import MfGrdFile +from .postprocessing import get_transmissivities +from .sfroutputfile import SfrFile +from .recarray_utils import create_empty_recarray, ra_slice +from .mtlistfile import MtListBudget +from .optionblock import OptionBlock +from .rasters import Raster from .gridintersect import GridIntersect, ModflowGridIndices \ No newline at end of file diff --git a/flopy/utils/binaryfile.py b/flopy/utils/binaryfile.py index a824ac0fa4..90263f59ee 100755 --- a/flopy/utils/binaryfile.py +++ b/flopy/utils/binaryfile.py @@ -1,1845 +1,1845 @@ -""" -Module to read MODFLOW binary output files. The module contains four -important classes that can be accessed by the user. - -* HeadFile (Binary head file. Can also be used for drawdown) -* HeadUFile (Binary MODFLOW-USG unstructured head file) -* UcnFile (Binary concentration file from MT3DMS) -* CellBudgetFile (Binary cell-by-cell flow file) - -""" -from __future__ import print_function -import numpy as np -import warnings -from collections import OrderedDict -from ..utils.datafile import Header, LayerFile - - -class BinaryHeader(Header): - """ - The binary_header class is a class to create headers for MODFLOW - binary files. - - Parameters - ---------- - bintype : str - is the type of file being opened (head and ucn file currently - supported) - precision : str - is the precision of the floating point data in the file - - """ - - def __init__(self, bintype=None, precision='single'): - super(BinaryHeader, self).__init__(bintype, precision) - - def set_values(self, **kwargs): - """ - Set values using kwargs - """ - ikey = ['ntrans', 'kstp', 'kper', 'ncol', 'nrow', 'ilay', 'ncpl', - 'nodes', 'm2', 'm3'] - fkey = ['pertim', 'totim'] - ckey = ['text'] - for k in ikey: - if k in kwargs.keys(): - try: - self.header[0][k] = int(kwargs[k]) - except: - msg = '{0} key not available in {1} header ' - 'dtype'.format(k, self.header_type) - print(msg) - for k in fkey: - if k in kwargs.keys(): - try: - self.header[0][k] = float(kwargs[k]) - except: - msg = '{} key not available '.format(k) + \ - 'in {} header dtype'.format(self.header_type) - print(msg) - for k in ckey: - if k in kwargs.keys(): - # Convert to upper case to be consistent case used by MODFLOW - # text strings. Necessary to work with HeadFile and UcnFile - # routines - ttext = kwargs[k].upper() - # trim a long string - if len(ttext) > 16: - text = ttext[0:16] - # pad a short string - elif len(ttext) < 16: - text = "{:<16}".format(ttext) - # the string is just right - else: - text = ttext - self.header[0][k] = text - else: - self.header[0][k] = 'DUMMY TEXT' - - @staticmethod - def set_dtype(bintype=None, precision='single'): - """ - Set the dtype - - """ - header = Header(filetype=bintype, precision=precision) - return header.dtype - - @staticmethod - def create(bintype=None, precision='single', **kwargs): - """ - Create a binary header - - """ - header = BinaryHeader(bintype=bintype, precision=precision) - if header.get_dtype() is not None: - header.set_values(**kwargs) - return header.get_values() - - -def binaryread_struct(file, vartype, shape=(1,), charlen=16): - """ - Read text, a scalar value, or an array of values from a binary file. - - file : file object - is an open file object - vartype : type - is the return variable type: str, numpy.int32, numpy.float32, - or numpy.float64 - shape : tuple - is the shape of the returned array (shape(1, ) returns a single - value) for example, shape = (nlay, nrow, ncol) - charlen : int - is the length of the text string. Note that string arrays - cannot be returned, only multi-character strings. Shape has no - affect on strings. - - """ - import struct - import numpy as np - - # store the mapping from type to struct format (fmt) - typefmtd = {np.int32: 'i', np.float32: 'f', np.float64: 'd'} - - # read a string variable of length charlen - if vartype == str: - result = file.read(charlen * 1) - - # read other variable types - else: - fmt = typefmtd[vartype] - # find the number of bytes for one value - numbytes = vartype(1).nbytes - # find the number of values - nval = np.core.fromnumeric.prod(shape) - fmt = str(nval) + fmt - s = file.read(numbytes * nval) - result = struct.unpack(fmt, s) - if nval == 1: - result = vartype(result[0]) - else: - result = np.array(result, dtype=vartype) - result = np.reshape(result, shape) - return result - - -def binaryread(file, vartype, shape=(1,), charlen=16): - """ - Uses numpy to read from binary file. This was found to be faster than the - struct approach and is used as the default. - - """ - - # read a string variable of length charlen - if vartype == str: - result = file.read(charlen * 1) - else: - # find the number of values - nval = np.prod(shape) - result = np.fromfile(file, vartype, nval) - if nval == 1: - result = result # [0] - else: - result = np.reshape(result, shape) - return result - - -def join_struct_arrays(arrays): - """ - Simple function that can join two numpy structured arrays. - - """ - newdtype = sum((a.dtype.descr for a in arrays), []) - newrecarray = np.empty(len(arrays[0]), dtype=newdtype) - for a in arrays: - for name in a.dtype.names: - newrecarray[name] = a[name] - return newrecarray - - -def get_headfile_precision(filename): - """ - Determine precision of a MODFLOW head file. - - Parameters - ---------- - filename : str - Name of binary MODFLOW file to determine precision. - - Returns - ------- - result : str - Result will be unknown, single, or double - - """ - - # Set default result if neither single or double works - result = 'unknown' - - # Create string containing set of ascii characters - asciiset = ' ' - for i in range(33, 127): - asciiset += chr(i) - - # Open file, and check filesize to ensure this is not an empty file - f = open(filename, 'rb') - f.seek(0, 2) - totalbytes = f.tell() - f.seek(0, 0) # reset to beginning - assert f.tell() == 0 - if totalbytes == 0: - raise IOError('datafile error: file is empty: ' + str(filename)) - - # first try single - vartype = [('kstp', ' 1 and self.nrow * self.ncol > 10000000: - s = 'Possible error. ncol ({}) * nrow ({}) > 10,000,000 ' - s = s.format(self.ncol, self.nrow) - warnings.warn(s) - self.file.seek(0, 2) - self.totalbytes = self.file.tell() - self.file.seek(0, 0) - ipos = 0 - while ipos < self.totalbytes: - header = self._get_header() - self.recordarray.append(header) - if self.text.upper() not in header['text']: - continue - if ipos == 0: - self.times.append(header['totim']) - kstpkper = (header['kstp'], header['kper']) - self.kstpkper.append(kstpkper) - else: - totim = header['totim'] - if totim != self.times[-1]: - self.times.append(totim) - kstpkper = (header['kstp'], header['kper']) - self.kstpkper.append(kstpkper) - ipos = self.file.tell() - self.iposarray.append(ipos) - databytes = self.get_databytes(header) - self.file.seek(databytes, 1) - ipos = self.file.tell() - - # self.recordarray contains a recordarray of all the headers. - self.recordarray = np.array(self.recordarray, dtype=self.header_dtype) - self.iposarray = np.array(self.iposarray) - self.nlay = np.max(self.recordarray['ilay']) - return - - def get_databytes(self, header): - """ - - Parameters - ---------- - header : datafile.Header - header object - - Returns - ------- - databytes : int - size of the data array, in bytes, following the header - - """ - return np.int64(header['ncol']) * \ - np.int64(header['nrow']) * \ - np.int64(self.realtype(1).nbytes) - - def _read_data(self, shp): - return binaryread(self.file, self.realtype, - shape=shp) - - def _get_header(self): - """ - Read the file header - - """ - header = binaryread(self.file, self.header_dtype, (1,)) - return header[0] - - def get_ts(self, idx): - """ - Get a time series from the binary file. - - Parameters - ---------- - idx : tuple of ints, or a list of a tuple of ints - idx can be (layer, row, column) or it can be a list in the form - [(layer, row, column), (layer, row, column), ...]. The layer, - row, and column values must be zero based. - - Returns - ---------- - out : numpy array - Array has size (ntimes, ncells + 1). The first column in the - data array will contain time (totim). - - See Also - -------- - - Notes - ----- - - The layer, row, and column values must be zero-based, and must be - within the following ranges: 0 <= k < nlay; 0 <= i < nrow; 0 <= j < ncol - - Examples - -------- - - """ - kijlist = self._build_kijlist(idx) - nstation = self._get_nstation(idx, kijlist) - - # Initialize result array and put times in first column - result = self._init_result(nstation) - - istat = 1 - for k, i, j in kijlist: - ioffset = (i * self.ncol + j) * self.realtype(1).nbytes - for irec, header in enumerate(self.recordarray): - ilay = header[ - 'ilay'] - 1 # change ilay from header to zero-based - if ilay != k: - continue - ipos = np.long(self.iposarray[irec]) - - # Calculate offset necessary to reach intended cell - self.file.seek(ipos + np.long(ioffset), 0) - - # Find the time index and then put value into result in the - # correct location. - itim = np.where(result[:, 0] == header['totim'])[0] - result[itim, istat] = binaryread(self.file, self.realtype) - istat += 1 - return result - - -class HeadFile(BinaryLayerFile): - """ - HeadFile Class. - - Parameters - ---------- - filename : string - Name of the concentration file - text : string - Name of the text string in the head file. Default is 'head' - precision : string - 'auto', 'single' or 'double'. Default is 'auto'. - verbose : bool - Write information to the screen. Default is False. - - Attributes - ---------- - - Methods - ------- - - See Also - -------- - - Notes - ----- - The HeadFile class provides simple ways to retrieve 2d and 3d - head arrays from a MODFLOW binary head file and time series - arrays for one or more cells. - - The BinaryLayerFile class is built on a record array consisting of - headers, which are record arrays of the modflow header information - (kstp, kper, pertim, totim, text, nrow, ncol, ilay) - and long integers, which are pointers to first bytes of data for - the corresponding data array. - - Examples - -------- - - >>> import flopy.utils.binaryfile as bf - >>> hdobj = bf.HeadFile('model.hds', precision='single') - >>> hdobj.list_records() - >>> rec = hdobj.get_data(kstpkper=(1, 50)) - - >>> ddnobj = bf.HeadFile('model.ddn', text='drawdown', precision='single') - >>> ddnobj.list_records() - >>> rec = ddnobj.get_data(totim=100.) - - - """ - - def __init__(self, filename, text='head', precision='auto', - verbose=False, **kwargs): - self.text = text.encode() - if precision == 'auto': - precision = get_headfile_precision(filename) - if precision == 'unknown': - s = 'Error. Precision could not be determined for {}'.format( - filename) - print(s) - raise Exception() - self.header_dtype = BinaryHeader.set_dtype(bintype='Head', - precision=precision) - super(HeadFile, self).__init__(filename, precision, verbose, kwargs) - return - - -class UcnFile(BinaryLayerFile): - """ - UcnFile Class. - - Parameters - ---------- - filename : string - Name of the concentration file - text : string - Name of the text string in the ucn file. Default is 'CONCENTRATION' - precision : string - 'auto', 'single' or 'double'. Default is 'auto'. - verbose : bool - Write information to the screen. Default is False. - - Attributes - ---------- - - Methods - ------- - - See Also - -------- - - Notes - ----- - The UcnFile class provides simple ways to retrieve 2d and 3d - concentration arrays from a MT3D binary head file and time series - arrays for one or more cells. - - The BinaryLayerFile class is built on a record array consisting of - headers, which are record arrays of the modflow header information - (kstp, kper, pertim, totim, text, nrow, ncol, ilay) - and long integers, which are pointers to first bytes of data for - the corresponding data array. - - Examples - -------- - - >>> import flopy.utils.binaryfile as bf - >>> ucnobj = bf.UcnFile('MT3D001.UCN', precision='single') - >>> ucnobj.list_records() - >>> rec = ucnobj.get_data(kstpkper=(1,1)) - - """ - - def __init__(self, filename, text='concentration', precision='auto', - verbose=False, **kwargs): - self.text = text.encode() - if precision == 'auto': - precision = get_headfile_precision(filename) - if precision == 'unknown': - s = 'Error. Precision could not be determined for {}'.format( - filename) - print(s) - raise Exception() - self.header_dtype = BinaryHeader.set_dtype(bintype='Ucn', - precision=precision) - super(UcnFile, self).__init__(filename, precision, verbose, kwargs) - return - - -class BudgetIndexError(Exception): - pass - - -class CellBudgetFile(object): - """ - CellBudgetFile Class. - - Parameters - ---------- - filename : string - Name of the cell budget file - precision : string - 'single' or 'double'. Default is 'single'. - verbose : bool - Write information to the screen. Default is False. - - Attributes - ---------- - - Methods - ------- - - See Also - -------- - - Notes - ----- - - Examples - -------- - - >>> import flopy.utils.binaryfile as bf - >>> cbb = bf.CellBudgetFile('mymodel.cbb') - >>> cbb.list_records() - >>> rec = cbb.get_data(kstpkper=(0,0), text='RIVER LEAKAGE') - - """ - - def __init__(self, filename, precision='auto', verbose=False, **kwargs): - self.filename = filename - self.precision = precision - self.verbose = verbose - self.file = open(self.filename, 'rb') - # Get filesize to ensure this is not an empty file - self.file.seek(0, 2) - totalbytes = self.file.tell() - self.file.seek(0, 0) # reset to beginning - assert self.file.tell() == 0 - if totalbytes == 0: - raise IOError('datafile error: file is empty: ' + str(filename)) - self.nrow = 0 - self.ncol = 0 - self.nlay = 0 - self.nper = 0 - self.times = [] - self.kstpkper = [] - self.recordarray = [] - self.iposheader = [] - self.iposarray = [] - self.textlist = [] - self.imethlist = [] - self.paknamlist = [] - self.nrecords = 0 - - self.dis = None - self.modelgrid = None - if 'model' in kwargs.keys(): - self.model = kwargs.pop('model') - self.modelgrid = self.model.modelgrid - self.dis = self.model.dis - if 'dis' in kwargs.keys(): - self.dis = kwargs.pop('dis') - self.modelgrid = self.dis.parent.modelgrid - if 'sr' in kwargs.keys(): - from ..utils import SpatialReferenceUnstructured - from ..discretization import StructuredGrid, UnstructuredGrid - sr = kwargs.pop('sr') - if isinstance(sr, SpatialReferenceUnstructured): - self.modelgrid = UnstructuredGrid(vertices=sr.verts, - iverts=sr.iverts, - xcenters=sr.xc, - ycenters=sr.yc, - ncpl=sr.ncpl) - else: - self.modelgrid = StructuredGrid(delc=sr.delc, delr=sr.delr, - xoff=sr.xll, yoff=sr.yll, - angrot=sr.rotation) - if 'modelgrid' in kwargs.keys(): - self.modelgrid = kwargs.pop('modelgrid') - if len(kwargs.keys()) > 0: - args = ','.join(kwargs.keys()) - raise Exception('LayerFile error: unrecognized kwargs: ' + args) - - if precision == 'auto': - success = self._set_precision('single') - if not success: - success = self._set_precision('double') - if not success: - s = "Budget precision could not be auto determined" - raise BudgetIndexError(s) - elif precision == 'single': - success = self._set_precision(precision) - elif precision == 'double': - success = self._set_precision(precision) - else: - raise Exception('Unknown precision specified: ' + precision) - - if not success: - s = "Budget file could not be read using {} " \ - "precision".format(precision) - raise Exception(s) - - return - - def __enter__(self): - return self - - def __exit__(self, *exc): - self.close() - - def __reset(self): - """ - Reset indexing lists when determining precision - """ - self.file.seek(0, 0) - self.times = [] - self.kstpkper = [] - self.recordarray = [] - self.iposheader = [] - self.iposarray = [] - self.textlist = [] - self.imethlist = [] - self.paknamlist = [] - self.nrecords = 0 - - def _set_precision(self, precision='single'): - """ - Method to set the budget precsion from a CBC file. Enables - Auto precision code to work - - Parameters - ---------- - precision : str - budget file precision (accepts 'single' or 'double') - """ - success = True - h1dt = [('kstp', 'i4'), ('kper', 'i4'), ('text', 'a16'), - ('ncol', 'i4'), ('nrow', 'i4'), ('nlay', 'i4')] - if precision == 'single': - self.realtype = np.float32 - ffmt = 'f4' - else: - self.realtype = np.float64 - ffmt = 'f8' - - h2dt0 = [('imeth', 'i4'), ('delt', ffmt), ('pertim', ffmt), - ('totim', ffmt)] - h2dt = [('imeth', 'i4'), ('delt', ffmt), ('pertim', ffmt), - ('totim', ffmt), ('modelnam', 'a16'), ('paknam', 'a16'), - ('modelnam2', 'a16'), ('paknam2', 'a16')] - self.header1_dtype = np.dtype(h1dt) - self.header2_dtype0 = np.dtype(h2dt0) - self.header2_dtype = np.dtype(h2dt) - hdt = h1dt + h2dt - self.header_dtype = np.dtype(hdt) - - try: - self._build_index() - except BudgetIndexError: - success = False - self.__reset() - - return success - - def _totim_from_kstpkper(self, kstpkper): - if self.dis is None: - return -1.0 - kstp, kper = kstpkper - perlen = self.dis.perlen.array - nstp = self.dis.nstp.array[kper] - tsmult = self.dis.tsmult.array[kper] - kper_len = np.sum(perlen[:kper]) - this_perlen = perlen[kper] - if tsmult == 1: - dt1 = this_perlen / float(nstp) - else: - dt1 = this_perlen * (tsmult - 1.0) / ((tsmult ** nstp) - 1.0) - kstp_len = [dt1] - for i in range(kstp + 1): - kstp_len.append(kstp_len[-1] * tsmult) - # kstp_len = np.array(kstp_len) - # kstp_len = kstp_len[:kstp].sum() - kstp_len = sum(kstp_len[:kstp + 1]) - return kper_len + kstp_len - - def _build_index(self): - """ - Build the ordered dictionary, which maps the header information - to the position in the binary file. - """ - asciiset = ' ' - for i in range(33, 127): - asciiset += chr(i) - - header = self._get_header() - self.nrow = header["nrow"] - self.ncol = header["ncol"] - self.nlay = np.abs(header["nlay"]) - text = header['text'] - if isinstance(text, bytes): - text = text.decode() - if self.nrow < 0 or self.ncol < 0: - raise Exception("negative nrow, ncol") - self.file.seek(0, 2) - self.totalbytes = self.file.tell() - self.file.seek(0, 0) - self.recorddict = OrderedDict() - ipos = 0 - while ipos < self.totalbytes: - self.iposheader.append(ipos) - header = self._get_header() - self.nrecords += 1 - totim = header['totim'] - if totim == 0: - totim = self._totim_from_kstpkper( - (header["kstp"] - 1, header["kper"] - 1)) - header["totim"] = totim - if totim >= 0 and totim not in self.times: - self.times.append(totim) - kstpkper = (header['kstp'], header['kper']) - if kstpkper not in self.kstpkper: - self.kstpkper.append(kstpkper) - if header['text'] not in self.textlist: - # check the precision of the file using text records - try: - tlist = [header['text'], header['modelnam']] - for text in tlist: - if isinstance(text, bytes): - text = text.decode() - for t in text: - if t.upper() not in asciiset: - raise Exception() - - except: - raise BudgetIndexError("Improper precision") - self.textlist.append(header['text']) - self.imethlist.append(header['imeth']) - if header['paknam'] not in self.paknamlist: - self.paknamlist.append(header['paknam']) - ipos = self.file.tell() - - if self.verbose: - for itxt in ['kstp', 'kper', 'text', 'ncol', 'nrow', 'nlay', - 'imeth', 'delt', 'pertim', 'totim', 'modelnam', - 'paknam', 'modelnam2', 'paknam2']: - s = header[itxt] - if isinstance(s, bytes): - s = s.decode() - print(itxt + ': ' + str(s)) - print('file position: ', ipos) - if int(header['imeth']) != 5 and \ - int(header['imeth']) != 6 and \ - int(header['imeth']) != 7: - print('') - - # store record and byte position mapping - self.recorddict[ - tuple(header)] = ipos # store the position right after header2 - self.recordarray.append(header) - self.iposarray.append( - ipos) # store the position right after header2 - - # skip over the data to the next record and set ipos - self._skip_record(header) - ipos = self.file.tell() - - # convert to numpy arrays - self.recordarray = np.array(self.recordarray, dtype=self.header_dtype) - self.iposheader = np.array(self.iposheader, dtype=np.int64) - self.iposarray = np.array(self.iposarray, dtype=np.int64) - self.nper = self.recordarray["kper"].max() - return - - def _skip_record(self, header): - """ - Skip over this record, not counting header and header2. - - """ - nlay = abs(header['nlay']) - nrow = header['nrow'] - ncol = header['ncol'] - imeth = header['imeth'] - if imeth == 0: - nbytes = (nrow * ncol * nlay * self.realtype(1).nbytes) - elif imeth == 1: - nbytes = (nrow * ncol * nlay * self.realtype(1).nbytes) - elif imeth == 2: - nlist = binaryread(self.file, np.int32)[0] - nbytes = nlist * (np.int32(1).nbytes + self.realtype(1).nbytes) - elif imeth == 3: - nbytes = (nrow * ncol * self.realtype(1).nbytes) - nbytes += (nrow * ncol * np.int32(1).nbytes) - elif imeth == 4: - nbytes = (nrow * ncol * self.realtype(1).nbytes) - elif imeth == 5: - nauxp1 = binaryread(self.file, np.int32)[0] - naux = nauxp1 - 1 - - for i in range(naux): - temp = binaryread(self.file, str, charlen=16) - nlist = binaryread(self.file, np.int32)[0] - if self.verbose: - print('naux: ', naux) - print('nlist: ', nlist) - print('') - nbytes = nlist * (np.int32(1).nbytes + self.realtype(1).nbytes + - naux * self.realtype(1).nbytes) - elif imeth == 6: - # read rest of list data - nauxp1 = binaryread(self.file, np.int32)[0] - naux = nauxp1 - 1 - - for i in range(naux): - temp = binaryread(self.file, str, charlen=16) - nlist = binaryread(self.file, np.int32)[0] - if self.verbose: - print('naux: ', naux) - print('nlist: ', nlist) - print('') - nbytes = nlist * ( - np.int32(1).nbytes * 2 + self.realtype(1).nbytes + - naux * self.realtype(1).nbytes) - else: - raise Exception('invalid method code ' + str(imeth)) - if nbytes != 0: - self.file.seek(nbytes, 1) - return - - def _get_header(self): - """ - Read the file header - - """ - header1 = binaryread(self.file, self.header1_dtype, (1,)) - nlay = header1['nlay'] - if nlay < 0: - # fill header2 by first reading imeth, delt, pertim and totim - # and then adding modelnames and paknames if imeth = 6 - temp = binaryread(self.file, self.header2_dtype0, (1,)) - header2 = np.array([(0, 0., 0., 0., '', '', '', '')], - dtype=self.header2_dtype) - for name in temp.dtype.names: - header2[name] = temp[name] - if int(header2['imeth']) == 6: - header2['modelnam'] = binaryread(self.file, str, charlen=16) - header2['paknam'] = binaryread(self.file, str, charlen=16) - header2['modelnam2'] = binaryread(self.file, str, charlen=16) - header2['paknam2'] = binaryread(self.file, str, charlen=16) - else: - header2 = np.array([(0, 0., 0., 0., '', '', '', '')], - dtype=self.header2_dtype) - fullheader = join_struct_arrays([header1, header2]) - return fullheader[0] - - def _find_text(self, text): - """ - Determine if selected record name is in budget file - - """ - # check and make sure that text is in file - text16 = None - if text is not None: - if isinstance(text, bytes): - ttext = text.decode() - else: - ttext = text - for t in self.textlist: - if ttext.upper() in t.decode(): - text16 = t - break - if text16 is None: - errmsg = 'The specified text string is not in the budget file.' - raise Exception(errmsg) - return text16 - - def _find_paknam(self, paknam): - """ - Determine if selected record name is in budget file - - """ - # check and make sure that text is in file - paknam16 = None - if paknam is not None: - if isinstance(paknam, bytes): - tpaknam = paknam.decode() - else: - tpaknam = paknam - for t in self._unique_package_names(): - if tpaknam.upper() in t.decode(): - paknam16 = t - break - if paknam16 is None: - errmsg = 'The specified package name string is not ' + \ - 'in the budget file.' - raise Exception(errmsg) - return paknam16 - - def list_records(self): - """ - Print a list of all of the records in the file - """ - for rec in self.recordarray: - if isinstance(rec, bytes): - rec = rec.decode() - print(rec) - return - - def list_unique_records(self): - """ - Print a list of unique record names - """ - print('RECORD IMETH') - print(22 * '-') - for rec, imeth in zip(self.textlist, self.imethlist): - if isinstance(rec, bytes): - rec = rec.decode() - print('{:16} {:5d}'.format(rec.strip(), imeth)) - return - - def list_unique_packages(self): - """ - Print a list of unique package names - """ - for rec in self._unique_package_names(): - if isinstance(rec, bytes): - rec = rec.decode() - print(rec) - return - - def get_unique_record_names(self, decode=False): - """ - Get a list of unique record names in the file - - Parameters - ---------- - decode : bool - Optional boolean used to decode byte strings (default is False). - - Returns - ---------- - names : list of strings - List of unique text names in the binary file. - - """ - if decode: - names = [] - for text in self.textlist: - if isinstance(text, bytes): - text = text.decode() - names.append(text) - else: - names = self.textlist - return names - - def get_unique_package_names(self, decode=False): - """ - Get a list of unique package names in the file - - Parameters - ---------- - decode : bool - Optional boolean used to decode byte strings (default is False). - - Returns - ---------- - names : list of strings - List of unique package names in the binary file. - - """ - if decode: - names = [] - for text in self.paknamlist: - if isinstance(text, bytes): - text = text.decode() - names.append(text) - else: - names = self.paknamlist - return names - - def _unique_package_names(self): - """ - Get a list of unique package names in the file - - Returns - ---------- - out : list of strings - List of unique package names in the binary file. - - """ - return self.paknamlist - - def get_kstpkper(self): - """ - Get a list of unique stress periods and time steps in the file - - Returns - ---------- - out : list of (kstp, kper) tuples - List of unique kstp, kper combinations in binary file. kstp and - kper values are zero-based. - - """ - kstpkper = [] - for kstp, kper in self.kstpkper: - kstpkper.append((kstp - 1, kper - 1)) - return kstpkper - - def get_indices(self, text=None): - """ - Get a list of indices for a selected record name - - Parameters - ---------- - text : str - The text identifier for the record. Examples include - 'RIVER LEAKAGE', 'STORAGE', 'FLOW RIGHT FACE', etc. - - Returns - ---------- - out : tuple - indices of selected record name in budget file. - - """ - # check and make sure that text is in file - if text is not None: - text16 = self._find_text(text) - select_indices = np.where((self.recordarray['text'] == text16)) - if isinstance(select_indices, tuple): - select_indices = select_indices[0] - else: - select_indices = None - return select_indices - - def get_position(self, idx, header=False): - """ - Get the starting position of the data or header for a specified record - number in the binary budget file. - - Parameters - ---------- - idx : int - The zero-based record number. The first record is record 0. - header : bool - If True, the position of the start of the header data is returned. - If False, the position of the start of the data is returned - (default is False). - - Returns - ------- - ipos : int64 - The position of the start of the data in the cell budget file - or the start of the header. - - """ - if header: - ipos = self.iposheader[idx] - else: - ipos = self.iposarray[idx] - return ipos - - def get_data(self, idx=None, kstpkper=None, totim=None, text=None, - paknam=None, full3D=False): - """ - Get data from the binary budget file. - - Parameters - ---------- - idx : int or list - The zero-based record number. The first record is record 0. - kstpkper : tuple of ints - A tuple containing the time step and stress period (kstp, kper). - The kstp and kper values are zero based. - totim : float - The simulation time. - text : str - The text identifier for the record. Examples include - 'RIVER LEAKAGE', 'STORAGE', 'FLOW RIGHT FACE', etc. - full3D : boolean - If true, then return the record as a three dimensional numpy - array, even for those list-style records written as part of a - 'COMPACT BUDGET' MODFLOW budget file. (Default is False.) - - Returns - ---------- - recordlist : list of records - A list of budget objects. The structure of the returned object - depends on the structure of the data in the cbb file. - - If full3D is True, then this method will return a numpy masked - array of size (nlay, nrow, ncol) for those list-style - 'COMPACT BUDGET' records written by MODFLOW. - - See Also - -------- - - Notes - ----- - - Examples - -------- - - """ - # trap for totim error - if totim is not None: - if len(self.times) == 0: - errmsg = '''This is an older style budget file that - does not have times in it. Use the MODFLOW - compact budget format if you want to work with - times. Or you may access this file using the - kstp and kper arguments or the idx argument.''' - raise Exception(errmsg) - - # check and make sure that text is in file - text16 = None - if text is not None: - text16 = self._find_text(text) - paknam16 = None - if paknam is not None: - paknam16 = self._find_paknam(paknam) - - if kstpkper is not None: - kstp1 = kstpkper[0] + 1 - kper1 = kstpkper[1] + 1 - if text is None and paknam is None: - select_indices = np.where( - (self.recordarray['kstp'] == kstp1) & - (self.recordarray['kper'] == kper1)) - else: - if paknam is None and text is not None: - select_indices = np.where( - (self.recordarray['kstp'] == kstp1) & - (self.recordarray['kper'] == kper1) & - (self.recordarray['text'] == text16)) - elif text is None and paknam is not None: - select_indices = np.where( - (self.recordarray['kstp'] == kstp1) & - (self.recordarray['kper'] == kper1) & - (self.recordarray['paknam'] == paknam16)) - else: - select_indices = np.where( - (self.recordarray['kstp'] == kstp1) & - (self.recordarray['kper'] == kper1) & - (self.recordarray['text'] == text16) & - (self.recordarray['paknam'] == paknam16)) - - elif totim is not None: - if text is None and paknam is None: - select_indices = np.where( - (self.recordarray['totim'] == totim)) - else: - if paknam is None and text is not None: - select_indices = np.where( - (self.recordarray['totim'] == totim) & - (self.recordarray['text'] == text16)) - elif text is None and paknam is not None: - select_indices = np.where( - (self.recordarray['totim'] == totim) & - (self.recordarray['paknam'] == paknam16)) - else: - select_indices = np.where( - (self.recordarray['totim'] == totim) & - (self.recordarray['text'] == text16) & - (self.recordarray['paknam'] == paknam16)) - - # allow for idx to be a list or a scalar - elif idx is not None: - if isinstance(idx, list): - select_indices = idx - else: - select_indices = [idx] - - # case where only text is entered - elif text is not None: - select_indices = np.where((self.recordarray['text'] == text16)) - - else: - raise TypeError( - "get_data() missing 1 required argument: 'kstpkper', 'totim', " - "'idx', or 'text'") - - # build and return the record list - if isinstance(select_indices, tuple): - select_indices = select_indices[0] - recordlist = [] - for idx in select_indices: - rec = self.get_record(idx, full3D=full3D) - recordlist.append(rec) - - return recordlist - - def get_ts(self, idx, text=None, times=None): - """ - Get a time series from the binary budget file. - - Parameters - ---------- - idx : tuple of ints, or a list of a tuple of ints - idx can be (layer, row, column) or it can be a list in the form - [(layer, row, column), (layer, row, column), ...]. The layer, - row, and column values must be zero based. - text : str - The text identifier for the record. Examples include - 'RIVER LEAKAGE', 'STORAGE', 'FLOW RIGHT FACE', etc. - times : iterable of floats - List of times to from which to get time series. - - Returns - ---------- - out : numpy array - Array has size (ntimes, ncells + 1). The first column in the - data array will contain time (totim). - - See Also - -------- - - Notes - ----- - - The layer, row, and column values must be zero-based, and must be - within the following ranges: 0 <= k < nlay; 0 <= i < nrow; 0 <= j < ncol - - Examples - -------- - - """ - # issue exception if text not provided - if text is None: - etxt = 'text keyword must be provided to CellBudgetFile ' + \ - 'get_ts() method.' - raise Exception(etxt) - - kijlist = self._build_kijlist(idx) - nstation = self._get_nstation(idx, kijlist) - - # Initialize result array and put times in first column - result = self._init_result(nstation) - - kk = self.get_kstpkper() - timesint = self.get_times() - if len(timesint) < 1: - if times is None: - timesint = [x + 1 for x in range(len(kk))] - else: - if isinstance(times, np.ndarray): - times = times.tolist() - if len(times) != len(kk): - etxt = 'times passed to CellBudgetFile get_ts() ' + \ - 'method must be equal to {} '.format(len(kk)) + \ - 'not {}'.format(len(times)) - raise Exception(etxt) - timesint = times - for idx, t in enumerate(timesint): - result[idx, 0] = t - - for itim, k in enumerate(kk): - try: - v = self.get_data(kstpkper=k, text=text, full3D=True) - # skip missing data - required for storage - if len(v) > 0: - v = v[0] - istat = 1 - for k, i, j in kijlist: - result[itim, istat] = v[k, i, j].copy() - istat += 1 - except ValueError: - v = self.get_data(kstpkper=k, text=text) - # skip missing data - required for storage - if len(v)> 0: - if self.modelgrid is None: - s = "A modelgrid instance must be provided during " \ - "instantiation to get IMETH=6 timeseries data" - raise AssertionError(s) - - if self.modelgrid.grid_type == 'structured': - ndx = [lrc[0] * (self.modelgrid.nrow * - self.modelgrid.ncol) + - lrc[1] * self.modelgrid.ncol + - (lrc[2] + 1) for lrc in kijlist] - else: - ndx = [lrc[0] * self.modelgrid.ncpl + - (lrc[-1] + 1) for lrc in kijlist] - - for vv in v: - field = vv.dtype.names[2] - dix = np.where(np.isin(vv['node'], ndx))[0] - if len(dix) > 0: - result[itim, 1:] = vv[field][dix] - - return result - - def _build_kijlist(self, idx): - if isinstance(idx, list): - kijlist = idx - elif isinstance(idx, tuple): - kijlist = [idx] - else: - raise Exception('Could not build kijlist from ', idx) - - # Check to make sure that k, i, j are within range, otherwise - # the seek approach won't work. Can't use k = -1, for example. - for k, i, j in kijlist: - fail = False - errmsg = 'Invalid cell index. Cell ' + str( - (k, i, j)) + ' not within model grid: ' + \ - str((self.nlay, self.nrow, self.ncol)) - if k < 0 or k > self.nlay - 1: - fail = True - if i < 0 or i > self.nrow - 1: - fail = True - if j < 0 or j > self.ncol - 1: - fail = True - if fail: - raise Exception(errmsg) - return kijlist - - def _get_nstation(self, idx, kijlist): - if isinstance(idx, list): - return len(kijlist) - elif isinstance(idx, tuple): - return 1 - - def _init_result(self, nstation): - # Initialize result array and put times in first column - result = np.empty((len(self.kstpkper), nstation + 1), - dtype=self.realtype) - result[:, :] = np.nan - if len(self.times) == result.shape[0]: - result[:, 0] = np.array(self.times) - return result - - def get_record(self, idx, full3D=False): - """ - Get a single data record from the budget file. - - Parameters - ---------- - idx : int - The zero-based record number. The first record is record 0. - full3D : boolean - If true, then return the record as a three dimensional numpy - array, even for those list-style records written as part of a - 'COMPACT BUDGET' MODFLOW budget file. (Default is False.) - - Returns - ---------- - record : a single data record - The structure of the returned object depends on the structure of - the data in the cbb file. Compact list data are returned as - - If full3D is True, then this method will return a numpy masked - array of size (nlay, nrow, ncol) for those list-style - 'COMPACT BUDGET' records written by MODFLOW. - - See Also - -------- - - Notes - ----- - - Examples - -------- - - """ - # idx must be an ndarray, so if it comes in as an integer then convert - if np.isscalar(idx): - idx = np.array([idx]) - - header = self.recordarray[idx] - ipos = np.long(self.iposarray[idx]) - self.file.seek(ipos, 0) - imeth = header['imeth'][0] - - t = header['text'][0] - if isinstance(t, bytes): - t = t.decode('utf-8') - s = 'Returning ' + str(t).strip() + ' as ' - - nlay = abs(header['nlay'][0]) - nrow = header['nrow'][0] - ncol = header['ncol'][0] - - # default method - if imeth == 0: - if self.verbose: - s += 'an array of shape ' + str((nlay, nrow, ncol)) - print(s) - return binaryread(self.file, self.realtype(1), - shape=(nlay, nrow, ncol)) - # imeth 1 - elif imeth == 1: - if self.verbose: - s += 'an array of shape ' + str((nlay, nrow, ncol)) - print(s) - return binaryread(self.file, self.realtype(1), - shape=(nlay, nrow, ncol)) - - # imeth 2 - elif imeth == 2: - nlist = binaryread(self.file, np.int32)[0] - dtype = np.dtype([('node', np.int32), ('q', self.realtype)]) - if self.verbose: - if full3D: - s += 'a numpy masked array of size ({},{},{})'.format(nlay, - nrow, - ncol) - else: - s += 'a numpy recarray of size (' + str(nlist) + ', 2)' - print(s) - data = binaryread(self.file, dtype, shape=(nlist,)) - if full3D: - return self.create3D(data, nlay, nrow, ncol) - else: - return data.view(np.recarray) - - # imeth 3 - elif imeth == 3: - ilayer = binaryread(self.file, np.int32, shape=(nrow, ncol)) - data = binaryread(self.file, self.realtype(1), shape=(nrow, ncol)) - if self.verbose: - if full3D: - s += 'a numpy masked array of size ({},{},{})'.format(nlay, - nrow, - ncol) - else: - s += 'a list of two 2D numpy arrays. ' - s += 'The first is an integer layer array of shape ' + \ - str((nrow, ncol)) - s += 'The second is real data array of shape ' + \ - str((nrow, ncol)) - print(s) - if full3D: - out = np.ma.zeros((nlay, nrow, ncol), dtype=np.float32) - out.mask = True - vertical_layer = ilayer[0] - 1 # This is always the top layer - out[vertical_layer, :, :] = data - return out - else: - return [ilayer, data] - - # imeth 4 - elif imeth == 4: - if self.verbose: - s += 'a 2d numpy array of size ({},{})'.format(nrow, ncol) - print(s) - return binaryread(self.file, self.realtype(1), shape=(nrow, ncol)) - - # imeth 5 - elif imeth == 5: - nauxp1 = binaryread(self.file, np.int32)[0] - naux = nauxp1 - 1 - l = [('node', np.int32), ('q', self.realtype)] - for i in range(naux): - auxname = binaryread(self.file, str, charlen=16) - if not isinstance(auxname, str): - auxname = auxname.decode() - l.append((auxname, self.realtype)) - dtype = np.dtype(l) - nlist = binaryread(self.file, np.int32)[0] - data = binaryread(self.file, dtype, shape=(nlist,)) - if full3D: - if self.verbose: - s += 'a list array of shape ({},{},{})'.format(nlay, - nrow, - ncol) - print(s) - return self.create3D(data, nlay, nrow, ncol) - else: - if self.verbose: - s += 'a numpy recarray of size (' + \ - str(nlist) + ', {})'.format(2 + naux) - print(s) - return data.view(np.recarray) - - # imeth 6 - elif imeth == 6: - # read rest of list data - nauxp1 = binaryread(self.file, np.int32)[0] - naux = nauxp1 - 1 - l = [('node', np.int32), ('node2', np.int32), ('q', self.realtype)] - for i in range(naux): - auxname = binaryread(self.file, str, charlen=16) - if not isinstance(auxname, str): - auxname = auxname.decode() - l.append((auxname.strip(), self.realtype)) - dtype = np.dtype(l) - nlist = binaryread(self.file, np.int32)[0] - data = binaryread(self.file, dtype, shape=(nlist,)) - if self.verbose: - if full3D: - s += 'full 3D arrays not supported for ' + \ - 'imeth = {}'.format(imeth) - else: - s += 'a numpy recarray of size (' + str(nlist) + ', 2)' - print(s) - if full3D: - s += 'full 3D arrays not supported for ' + \ - 'imeth = {}'.format(imeth) - raise ValueError(s) - else: - return data.view(np.recarray) - else: - raise ValueError('invalid imeth value - {}'.format(imeth)) - - # should not reach this point - return - - def create3D(self, data, nlay, nrow, ncol): - """ - Convert a dictionary of {node: q, ...} into a numpy masked array. - In most cases this should not be called directly by the user unless - you know what you're doing. Instead, it is used as part of the - full3D keyword for get_data. - - Parameters - ---------- - data : dictionary - Dictionary with node keywords and flows (q) items. - - nlay, nrow, ncol : int - Number of layers, rows, and columns of the model grid. - - Returns - ---------- - out : numpy masked array - List contains unique simulation times (totim) in binary file. - - """ - out = np.ma.zeros((nlay * nrow * ncol), dtype=np.float32) - out.mask = True - for [node, q] in zip(data['node'], data['q']): - idx = node - 1 - out.data[idx] += q - out.mask[idx] = False - return np.ma.reshape(out, (nlay, nrow, ncol)) - - def get_times(self): - """ - Get a list of unique times in the file - - Returns - ---------- - out : list of floats - List contains unique simulation times (totim) in binary file. - - """ - return self.times - - def get_nrecords(self): - """ - Return the number of records in the file - - Returns - ------- - - out : int - Number of records in the file. - - """ - return self.recordarray.shape[0] - - def get_residual(self, totim, scaled=False): - """ - Return an array the size of the model grid containing the flow residual - calculated from the budget terms. Residual will not be correct unless - all flow terms are written to the budget file. - - Parameters - ---------- - totim : float - Simulation time for which to calculate the residual. This value - must be precise, so it is best to get it from the get_times - method. - - scaled : bool - If True, then divide the residual by the total cell inflow - - Returns - ------- - residual : np.ndarray - The flow residual for the cell of shape (nlay, nrow, ncol) - - """ - - nlay = self.nlay - nrow = self.nrow - ncol = self.ncol - residual = np.zeros((nlay, nrow, ncol), dtype=np.float) - if scaled: - inflow = np.zeros((nlay, nrow, ncol), dtype=np.float) - select_indices = np.where((self.recordarray['totim'] == totim))[0] - - for i in select_indices: - text = self.recordarray[i]['text'].decode() - if self.verbose: - print('processing {}'.format(text)) - flow = self.get_record(idx=i, full3D=True) - if ncol > 1 and 'RIGHT FACE' in text: - residual -= flow[:, :, :] - residual[:, :, 1:] += flow[:, :, :-1] - if scaled: - idx = np.where(flow < 0.) - inflow[idx] -= flow[idx] - idx = np.where(flow > 0.) - l, r, c = idx - idx = (l, r, c + 1) - inflow[idx] += flow[idx] - elif nrow > 1 and 'FRONT FACE' in text: - residual -= flow[:, :, :] - residual[:, 1:, :] += flow[:, :-1, :] - if scaled: - idx = np.where(flow < 0.) - inflow[idx] -= flow[idx] - idx = np.where(flow > 0.) - l, r, c = idx - idx = (l, r + 1, c) - inflow[idx] += flow[idx] - elif nlay > 1 and 'LOWER FACE' in text: - residual -= flow[:, :, :] - residual[1:, :, :] += flow[:-1, :, :] - if scaled: - idx = np.where(flow < 0.) - inflow[idx] -= flow[idx] - idx = np.where(flow > 0.) - l, r, c = idx - idx = (l + 1, r, c) - inflow[idx] += flow[idx] - else: - residual += flow - if scaled: - idx = np.where(flow > 0.) - inflow[idx] += flow[idx] - - if scaled: - residual_scaled = np.zeros((nlay, nrow, ncol), dtype=np.float) - idx = (inflow > 0.) - residual_scaled[idx] = residual[idx] / inflow[idx] - return residual_scaled - - return residual - - def close(self): - """ - Close the file handle - """ - self.file.close() - return - - -class HeadUFile(BinaryLayerFile): - """ - Unstructured MODFLOW-USG HeadUFile Class. - - Parameters - ---------- - filename : string - Name of the concentration file - text : string - Name of the text string in the head file. Default is 'headu' - precision : string - 'auto', 'single' or 'double'. Default is 'auto'. - verbose : bool - Write information to the screen. Default is False. - - Attributes - ---------- - - Methods - ------- - - See Also - -------- - - Notes - ----- - The HeadUFile class provides simple ways to retrieve a list of - head arrays from a MODFLOW-USG binary head file and time series - arrays for one or more cells. - - The BinaryLayerFile class is built on a record array consisting of - headers, which are record arrays of the modflow header information - (kstp, kper, pertim, totim, text, nrow, ncol, ilay) - and long integers, which are pointers to first bytes of data for - the corresponding data array. For unstructured grids, nrow and ncol - are the starting and ending node numbers for layer, ilay. This class - overrides methods in the parent class so that the proper sized arrays - are created. - - When the get_data method is called for this class, a list of - one-dimensional arrays will be returned, where each array is the head - array for a layer. If the heads for a layer were not saved, then - None will be returned for that layer. - - Examples - -------- - - >>> import flopy.utils.binaryfile as bf - >>> hdobj = bf.HeadUFile('model.hds') - >>> hdobj.list_records() - >>> usgheads = hdobj.get_data(kstpkper=(1, 50)) - - - """ - - def __init__(self, filename, text='headu', precision='auto', - verbose=False, **kwargs): - """ - Class constructor - """ - self.text = text.encode() - if precision == 'auto': - precision = get_headfile_precision(filename) - if precision == 'unknown': - s = 'Error. Precision could not be determined for {}'.format( - filename) - print(s) - raise Exception() - self.header_dtype = BinaryHeader.set_dtype(bintype='Head', - precision=precision) - super(HeadUFile, self).__init__(filename, precision, verbose, kwargs) - return - - def _get_data_array(self, totim=0.): - """ - Get a list of 1D arrays for the - specified kstp and kper value or totim value. - - """ - - if totim >= 0.: - keyindices = np.where((self.recordarray['totim'] == totim))[0] - if len(keyindices) == 0: - msg = 'totim value ({}) not found in file...'.format(totim) - raise Exception(msg) - else: - raise Exception('Data not found...') - - # fill a list of 1d arrays with heads from binary file - data = self.nlay * [None] - for idx in keyindices: - ipos = self.iposarray[idx] - ilay = self.recordarray['ilay'][idx] - nstrt = self.recordarray['ncol'][idx] - nend = self.recordarray['nrow'][idx] - npl = nend - nstrt + 1 - if self.verbose: - msg = 'Byte position in file: {} for '.format(ipos) + \ - 'layer {}'.format(ilay) - print(msg) - self.file.seek(ipos, 0) - data[ilay - 1] = binaryread(self.file, self.realtype, - shape=(npl,)) - return data - - def get_databytes(self, header): - """ - - Parameters - ---------- - header : datafile.Header - header object - - Returns - ------- - databytes : int - size of the data array, in bytes, following the header - - """ - # unstructured head files contain node starting and ending indices - # for each layer - nstrt = np.int64(header['ncol']) - nend = np.int64(header['nrow']) - npl = nend - nstrt + 1 - return npl * np.int64(self.realtype(1).nbytes) - - def get_ts(self, idx): - """ - Get a time series from the binary HeadUFile (not implemented). - - Parameters - ---------- - idx : tuple of ints, or a list of a tuple of ints - idx can be (layer, row, column) or it can be a list in the form - [(layer, row, column), (layer, row, column), ...]. The layer, - row, and column values must be zero based. - - Returns - ---------- - out : numpy array - Array has size (ntimes, ncells + 1). The first column in the - data array will contain time (totim). - - See Also - -------- - - Notes - ----- - - Examples - -------- - - """ - msg = 'HeadUFile: get_ts() is not implemented' - raise NotImplementedError(msg) +""" +Module to read MODFLOW binary output files. The module contains four +important classes that can be accessed by the user. + +* HeadFile (Binary head file. Can also be used for drawdown) +* HeadUFile (Binary MODFLOW-USG unstructured head file) +* UcnFile (Binary concentration file from MT3DMS) +* CellBudgetFile (Binary cell-by-cell flow file) + +""" +from __future__ import print_function +import numpy as np +import warnings +from collections import OrderedDict +from ..utils.datafile import Header, LayerFile + + +class BinaryHeader(Header): + """ + The binary_header class is a class to create headers for MODFLOW + binary files. + + Parameters + ---------- + bintype : str + is the type of file being opened (head and ucn file currently + supported) + precision : str + is the precision of the floating point data in the file + + """ + + def __init__(self, bintype=None, precision='single'): + super(BinaryHeader, self).__init__(bintype, precision) + + def set_values(self, **kwargs): + """ + Set values using kwargs + """ + ikey = ['ntrans', 'kstp', 'kper', 'ncol', 'nrow', 'ilay', 'ncpl', + 'nodes', 'm2', 'm3'] + fkey = ['pertim', 'totim'] + ckey = ['text'] + for k in ikey: + if k in kwargs.keys(): + try: + self.header[0][k] = int(kwargs[k]) + except: + msg = '{0} key not available in {1} header ' + 'dtype'.format(k, self.header_type) + print(msg) + for k in fkey: + if k in kwargs.keys(): + try: + self.header[0][k] = float(kwargs[k]) + except: + msg = '{} key not available '.format(k) + \ + 'in {} header dtype'.format(self.header_type) + print(msg) + for k in ckey: + if k in kwargs.keys(): + # Convert to upper case to be consistent case used by MODFLOW + # text strings. Necessary to work with HeadFile and UcnFile + # routines + ttext = kwargs[k].upper() + # trim a long string + if len(ttext) > 16: + text = ttext[0:16] + # pad a short string + elif len(ttext) < 16: + text = "{:<16}".format(ttext) + # the string is just right + else: + text = ttext + self.header[0][k] = text + else: + self.header[0][k] = 'DUMMY TEXT' + + @staticmethod + def set_dtype(bintype=None, precision='single'): + """ + Set the dtype + + """ + header = Header(filetype=bintype, precision=precision) + return header.dtype + + @staticmethod + def create(bintype=None, precision='single', **kwargs): + """ + Create a binary header + + """ + header = BinaryHeader(bintype=bintype, precision=precision) + if header.get_dtype() is not None: + header.set_values(**kwargs) + return header.get_values() + + +def binaryread_struct(file, vartype, shape=(1,), charlen=16): + """ + Read text, a scalar value, or an array of values from a binary file. + + file : file object + is an open file object + vartype : type + is the return variable type: str, numpy.int32, numpy.float32, + or numpy.float64 + shape : tuple + is the shape of the returned array (shape(1, ) returns a single + value) for example, shape = (nlay, nrow, ncol) + charlen : int + is the length of the text string. Note that string arrays + cannot be returned, only multi-character strings. Shape has no + affect on strings. + + """ + import struct + import numpy as np + + # store the mapping from type to struct format (fmt) + typefmtd = {np.int32: 'i', np.float32: 'f', np.float64: 'd'} + + # read a string variable of length charlen + if vartype == str: + result = file.read(charlen * 1) + + # read other variable types + else: + fmt = typefmtd[vartype] + # find the number of bytes for one value + numbytes = vartype(1).nbytes + # find the number of values + nval = np.core.fromnumeric.prod(shape) + fmt = str(nval) + fmt + s = file.read(numbytes * nval) + result = struct.unpack(fmt, s) + if nval == 1: + result = vartype(result[0]) + else: + result = np.array(result, dtype=vartype) + result = np.reshape(result, shape) + return result + + +def binaryread(file, vartype, shape=(1,), charlen=16): + """ + Uses numpy to read from binary file. This was found to be faster than the + struct approach and is used as the default. + + """ + + # read a string variable of length charlen + if vartype == str: + result = file.read(charlen * 1) + else: + # find the number of values + nval = np.prod(shape) + result = np.fromfile(file, vartype, nval) + if nval == 1: + result = result # [0] + else: + result = np.reshape(result, shape) + return result + + +def join_struct_arrays(arrays): + """ + Simple function that can join two numpy structured arrays. + + """ + newdtype = sum((a.dtype.descr for a in arrays), []) + newrecarray = np.empty(len(arrays[0]), dtype=newdtype) + for a in arrays: + for name in a.dtype.names: + newrecarray[name] = a[name] + return newrecarray + + +def get_headfile_precision(filename): + """ + Determine precision of a MODFLOW head file. + + Parameters + ---------- + filename : str + Name of binary MODFLOW file to determine precision. + + Returns + ------- + result : str + Result will be unknown, single, or double + + """ + + # Set default result if neither single or double works + result = 'unknown' + + # Create string containing set of ascii characters + asciiset = ' ' + for i in range(33, 127): + asciiset += chr(i) + + # Open file, and check filesize to ensure this is not an empty file + f = open(filename, 'rb') + f.seek(0, 2) + totalbytes = f.tell() + f.seek(0, 0) # reset to beginning + assert f.tell() == 0 + if totalbytes == 0: + raise IOError('datafile error: file is empty: ' + str(filename)) + + # first try single + vartype = [('kstp', ' 1 and self.nrow * self.ncol > 10000000: + s = 'Possible error. ncol ({}) * nrow ({}) > 10,000,000 ' + s = s.format(self.ncol, self.nrow) + warnings.warn(s) + self.file.seek(0, 2) + self.totalbytes = self.file.tell() + self.file.seek(0, 0) + ipos = 0 + while ipos < self.totalbytes: + header = self._get_header() + self.recordarray.append(header) + if self.text.upper() not in header['text']: + continue + if ipos == 0: + self.times.append(header['totim']) + kstpkper = (header['kstp'], header['kper']) + self.kstpkper.append(kstpkper) + else: + totim = header['totim'] + if totim != self.times[-1]: + self.times.append(totim) + kstpkper = (header['kstp'], header['kper']) + self.kstpkper.append(kstpkper) + ipos = self.file.tell() + self.iposarray.append(ipos) + databytes = self.get_databytes(header) + self.file.seek(databytes, 1) + ipos = self.file.tell() + + # self.recordarray contains a recordarray of all the headers. + self.recordarray = np.array(self.recordarray, dtype=self.header_dtype) + self.iposarray = np.array(self.iposarray) + self.nlay = np.max(self.recordarray['ilay']) + return + + def get_databytes(self, header): + """ + + Parameters + ---------- + header : datafile.Header + header object + + Returns + ------- + databytes : int + size of the data array, in bytes, following the header + + """ + return np.int64(header['ncol']) * \ + np.int64(header['nrow']) * \ + np.int64(self.realtype(1).nbytes) + + def _read_data(self, shp): + return binaryread(self.file, self.realtype, + shape=shp) + + def _get_header(self): + """ + Read the file header + + """ + header = binaryread(self.file, self.header_dtype, (1,)) + return header[0] + + def get_ts(self, idx): + """ + Get a time series from the binary file. + + Parameters + ---------- + idx : tuple of ints, or a list of a tuple of ints + idx can be (layer, row, column) or it can be a list in the form + [(layer, row, column), (layer, row, column), ...]. The layer, + row, and column values must be zero based. + + Returns + ---------- + out : numpy array + Array has size (ntimes, ncells + 1). The first column in the + data array will contain time (totim). + + See Also + -------- + + Notes + ----- + + The layer, row, and column values must be zero-based, and must be + within the following ranges: 0 <= k < nlay; 0 <= i < nrow; 0 <= j < ncol + + Examples + -------- + + """ + kijlist = self._build_kijlist(idx) + nstation = self._get_nstation(idx, kijlist) + + # Initialize result array and put times in first column + result = self._init_result(nstation) + + istat = 1 + for k, i, j in kijlist: + ioffset = (i * self.ncol + j) * self.realtype(1).nbytes + for irec, header in enumerate(self.recordarray): + ilay = header[ + 'ilay'] - 1 # change ilay from header to zero-based + if ilay != k: + continue + ipos = np.long(self.iposarray[irec]) + + # Calculate offset necessary to reach intended cell + self.file.seek(ipos + np.long(ioffset), 0) + + # Find the time index and then put value into result in the + # correct location. + itim = np.where(result[:, 0] == header['totim'])[0] + result[itim, istat] = binaryread(self.file, self.realtype) + istat += 1 + return result + + +class HeadFile(BinaryLayerFile): + """ + HeadFile Class. + + Parameters + ---------- + filename : string + Name of the concentration file + text : string + Name of the text string in the head file. Default is 'head' + precision : string + 'auto', 'single' or 'double'. Default is 'auto'. + verbose : bool + Write information to the screen. Default is False. + + Attributes + ---------- + + Methods + ------- + + See Also + -------- + + Notes + ----- + The HeadFile class provides simple ways to retrieve 2d and 3d + head arrays from a MODFLOW binary head file and time series + arrays for one or more cells. + + The BinaryLayerFile class is built on a record array consisting of + headers, which are record arrays of the modflow header information + (kstp, kper, pertim, totim, text, nrow, ncol, ilay) + and long integers, which are pointers to first bytes of data for + the corresponding data array. + + Examples + -------- + + >>> import flopy.utils.binaryfile as bf + >>> hdobj = bf.HeadFile('model.hds', precision='single') + >>> hdobj.list_records() + >>> rec = hdobj.get_data(kstpkper=(1, 50)) + + >>> ddnobj = bf.HeadFile('model.ddn', text='drawdown', precision='single') + >>> ddnobj.list_records() + >>> rec = ddnobj.get_data(totim=100.) + + + """ + + def __init__(self, filename, text='head', precision='auto', + verbose=False, **kwargs): + self.text = text.encode() + if precision == 'auto': + precision = get_headfile_precision(filename) + if precision == 'unknown': + s = 'Error. Precision could not be determined for {}'.format( + filename) + print(s) + raise Exception() + self.header_dtype = BinaryHeader.set_dtype(bintype='Head', + precision=precision) + super(HeadFile, self).__init__(filename, precision, verbose, kwargs) + return + + +class UcnFile(BinaryLayerFile): + """ + UcnFile Class. + + Parameters + ---------- + filename : string + Name of the concentration file + text : string + Name of the text string in the ucn file. Default is 'CONCENTRATION' + precision : string + 'auto', 'single' or 'double'. Default is 'auto'. + verbose : bool + Write information to the screen. Default is False. + + Attributes + ---------- + + Methods + ------- + + See Also + -------- + + Notes + ----- + The UcnFile class provides simple ways to retrieve 2d and 3d + concentration arrays from a MT3D binary head file and time series + arrays for one or more cells. + + The BinaryLayerFile class is built on a record array consisting of + headers, which are record arrays of the modflow header information + (kstp, kper, pertim, totim, text, nrow, ncol, ilay) + and long integers, which are pointers to first bytes of data for + the corresponding data array. + + Examples + -------- + + >>> import flopy.utils.binaryfile as bf + >>> ucnobj = bf.UcnFile('MT3D001.UCN', precision='single') + >>> ucnobj.list_records() + >>> rec = ucnobj.get_data(kstpkper=(1,1)) + + """ + + def __init__(self, filename, text='concentration', precision='auto', + verbose=False, **kwargs): + self.text = text.encode() + if precision == 'auto': + precision = get_headfile_precision(filename) + if precision == 'unknown': + s = 'Error. Precision could not be determined for {}'.format( + filename) + print(s) + raise Exception() + self.header_dtype = BinaryHeader.set_dtype(bintype='Ucn', + precision=precision) + super(UcnFile, self).__init__(filename, precision, verbose, kwargs) + return + + +class BudgetIndexError(Exception): + pass + + +class CellBudgetFile(object): + """ + CellBudgetFile Class. + + Parameters + ---------- + filename : string + Name of the cell budget file + precision : string + 'single' or 'double'. Default is 'single'. + verbose : bool + Write information to the screen. Default is False. + + Attributes + ---------- + + Methods + ------- + + See Also + -------- + + Notes + ----- + + Examples + -------- + + >>> import flopy.utils.binaryfile as bf + >>> cbb = bf.CellBudgetFile('mymodel.cbb') + >>> cbb.list_records() + >>> rec = cbb.get_data(kstpkper=(0,0), text='RIVER LEAKAGE') + + """ + + def __init__(self, filename, precision='auto', verbose=False, **kwargs): + self.filename = filename + self.precision = precision + self.verbose = verbose + self.file = open(self.filename, 'rb') + # Get filesize to ensure this is not an empty file + self.file.seek(0, 2) + totalbytes = self.file.tell() + self.file.seek(0, 0) # reset to beginning + assert self.file.tell() == 0 + if totalbytes == 0: + raise IOError('datafile error: file is empty: ' + str(filename)) + self.nrow = 0 + self.ncol = 0 + self.nlay = 0 + self.nper = 0 + self.times = [] + self.kstpkper = [] + self.recordarray = [] + self.iposheader = [] + self.iposarray = [] + self.textlist = [] + self.imethlist = [] + self.paknamlist = [] + self.nrecords = 0 + + self.dis = None + self.modelgrid = None + if 'model' in kwargs.keys(): + self.model = kwargs.pop('model') + self.modelgrid = self.model.modelgrid + self.dis = self.model.dis + if 'dis' in kwargs.keys(): + self.dis = kwargs.pop('dis') + self.modelgrid = self.dis.parent.modelgrid + if 'sr' in kwargs.keys(): + from ..utils import SpatialReferenceUnstructured + from ..discretization import StructuredGrid, UnstructuredGrid + sr = kwargs.pop('sr') + if isinstance(sr, SpatialReferenceUnstructured): + self.modelgrid = UnstructuredGrid(vertices=sr.verts, + iverts=sr.iverts, + xcenters=sr.xc, + ycenters=sr.yc, + ncpl=sr.ncpl) + else: + self.modelgrid = StructuredGrid(delc=sr.delc, delr=sr.delr, + xoff=sr.xll, yoff=sr.yll, + angrot=sr.rotation) + if 'modelgrid' in kwargs.keys(): + self.modelgrid = kwargs.pop('modelgrid') + if len(kwargs.keys()) > 0: + args = ','.join(kwargs.keys()) + raise Exception('LayerFile error: unrecognized kwargs: ' + args) + + if precision == 'auto': + success = self._set_precision('single') + if not success: + success = self._set_precision('double') + if not success: + s = "Budget precision could not be auto determined" + raise BudgetIndexError(s) + elif precision == 'single': + success = self._set_precision(precision) + elif precision == 'double': + success = self._set_precision(precision) + else: + raise Exception('Unknown precision specified: ' + precision) + + if not success: + s = "Budget file could not be read using {} " \ + "precision".format(precision) + raise Exception(s) + + return + + def __enter__(self): + return self + + def __exit__(self, *exc): + self.close() + + def __reset(self): + """ + Reset indexing lists when determining precision + """ + self.file.seek(0, 0) + self.times = [] + self.kstpkper = [] + self.recordarray = [] + self.iposheader = [] + self.iposarray = [] + self.textlist = [] + self.imethlist = [] + self.paknamlist = [] + self.nrecords = 0 + + def _set_precision(self, precision='single'): + """ + Method to set the budget precsion from a CBC file. Enables + Auto precision code to work + + Parameters + ---------- + precision : str + budget file precision (accepts 'single' or 'double') + """ + success = True + h1dt = [('kstp', 'i4'), ('kper', 'i4'), ('text', 'a16'), + ('ncol', 'i4'), ('nrow', 'i4'), ('nlay', 'i4')] + if precision == 'single': + self.realtype = np.float32 + ffmt = 'f4' + else: + self.realtype = np.float64 + ffmt = 'f8' + + h2dt0 = [('imeth', 'i4'), ('delt', ffmt), ('pertim', ffmt), + ('totim', ffmt)] + h2dt = [('imeth', 'i4'), ('delt', ffmt), ('pertim', ffmt), + ('totim', ffmt), ('modelnam', 'a16'), ('paknam', 'a16'), + ('modelnam2', 'a16'), ('paknam2', 'a16')] + self.header1_dtype = np.dtype(h1dt) + self.header2_dtype0 = np.dtype(h2dt0) + self.header2_dtype = np.dtype(h2dt) + hdt = h1dt + h2dt + self.header_dtype = np.dtype(hdt) + + try: + self._build_index() + except BudgetIndexError: + success = False + self.__reset() + + return success + + def _totim_from_kstpkper(self, kstpkper): + if self.dis is None: + return -1.0 + kstp, kper = kstpkper + perlen = self.dis.perlen.array + nstp = self.dis.nstp.array[kper] + tsmult = self.dis.tsmult.array[kper] + kper_len = np.sum(perlen[:kper]) + this_perlen = perlen[kper] + if tsmult == 1: + dt1 = this_perlen / float(nstp) + else: + dt1 = this_perlen * (tsmult - 1.0) / ((tsmult ** nstp) - 1.0) + kstp_len = [dt1] + for i in range(kstp + 1): + kstp_len.append(kstp_len[-1] * tsmult) + # kstp_len = np.array(kstp_len) + # kstp_len = kstp_len[:kstp].sum() + kstp_len = sum(kstp_len[:kstp + 1]) + return kper_len + kstp_len + + def _build_index(self): + """ + Build the ordered dictionary, which maps the header information + to the position in the binary file. + """ + asciiset = ' ' + for i in range(33, 127): + asciiset += chr(i) + + header = self._get_header() + self.nrow = header["nrow"] + self.ncol = header["ncol"] + self.nlay = np.abs(header["nlay"]) + text = header['text'] + if isinstance(text, bytes): + text = text.decode() + if self.nrow < 0 or self.ncol < 0: + raise Exception("negative nrow, ncol") + self.file.seek(0, 2) + self.totalbytes = self.file.tell() + self.file.seek(0, 0) + self.recorddict = OrderedDict() + ipos = 0 + while ipos < self.totalbytes: + self.iposheader.append(ipos) + header = self._get_header() + self.nrecords += 1 + totim = header['totim'] + if totim == 0: + totim = self._totim_from_kstpkper( + (header["kstp"] - 1, header["kper"] - 1)) + header["totim"] = totim + if totim >= 0 and totim not in self.times: + self.times.append(totim) + kstpkper = (header['kstp'], header['kper']) + if kstpkper not in self.kstpkper: + self.kstpkper.append(kstpkper) + if header['text'] not in self.textlist: + # check the precision of the file using text records + try: + tlist = [header['text'], header['modelnam']] + for text in tlist: + if isinstance(text, bytes): + text = text.decode() + for t in text: + if t.upper() not in asciiset: + raise Exception() + + except: + raise BudgetIndexError("Improper precision") + self.textlist.append(header['text']) + self.imethlist.append(header['imeth']) + if header['paknam'] not in self.paknamlist: + self.paknamlist.append(header['paknam']) + ipos = self.file.tell() + + if self.verbose: + for itxt in ['kstp', 'kper', 'text', 'ncol', 'nrow', 'nlay', + 'imeth', 'delt', 'pertim', 'totim', 'modelnam', + 'paknam', 'modelnam2', 'paknam2']: + s = header[itxt] + if isinstance(s, bytes): + s = s.decode() + print(itxt + ': ' + str(s)) + print('file position: ', ipos) + if int(header['imeth']) != 5 and \ + int(header['imeth']) != 6 and \ + int(header['imeth']) != 7: + print('') + + # store record and byte position mapping + self.recorddict[ + tuple(header)] = ipos # store the position right after header2 + self.recordarray.append(header) + self.iposarray.append( + ipos) # store the position right after header2 + + # skip over the data to the next record and set ipos + self._skip_record(header) + ipos = self.file.tell() + + # convert to numpy arrays + self.recordarray = np.array(self.recordarray, dtype=self.header_dtype) + self.iposheader = np.array(self.iposheader, dtype=np.int64) + self.iposarray = np.array(self.iposarray, dtype=np.int64) + self.nper = self.recordarray["kper"].max() + return + + def _skip_record(self, header): + """ + Skip over this record, not counting header and header2. + + """ + nlay = abs(header['nlay']) + nrow = header['nrow'] + ncol = header['ncol'] + imeth = header['imeth'] + if imeth == 0: + nbytes = (nrow * ncol * nlay * self.realtype(1).nbytes) + elif imeth == 1: + nbytes = (nrow * ncol * nlay * self.realtype(1).nbytes) + elif imeth == 2: + nlist = binaryread(self.file, np.int32)[0] + nbytes = nlist * (np.int32(1).nbytes + self.realtype(1).nbytes) + elif imeth == 3: + nbytes = (nrow * ncol * self.realtype(1).nbytes) + nbytes += (nrow * ncol * np.int32(1).nbytes) + elif imeth == 4: + nbytes = (nrow * ncol * self.realtype(1).nbytes) + elif imeth == 5: + nauxp1 = binaryread(self.file, np.int32)[0] + naux = nauxp1 - 1 + + for i in range(naux): + temp = binaryread(self.file, str, charlen=16) + nlist = binaryread(self.file, np.int32)[0] + if self.verbose: + print('naux: ', naux) + print('nlist: ', nlist) + print('') + nbytes = nlist * (np.int32(1).nbytes + self.realtype(1).nbytes + + naux * self.realtype(1).nbytes) + elif imeth == 6: + # read rest of list data + nauxp1 = binaryread(self.file, np.int32)[0] + naux = nauxp1 - 1 + + for i in range(naux): + temp = binaryread(self.file, str, charlen=16) + nlist = binaryread(self.file, np.int32)[0] + if self.verbose: + print('naux: ', naux) + print('nlist: ', nlist) + print('') + nbytes = nlist * ( + np.int32(1).nbytes * 2 + self.realtype(1).nbytes + + naux * self.realtype(1).nbytes) + else: + raise Exception('invalid method code ' + str(imeth)) + if nbytes != 0: + self.file.seek(nbytes, 1) + return + + def _get_header(self): + """ + Read the file header + + """ + header1 = binaryread(self.file, self.header1_dtype, (1,)) + nlay = header1['nlay'] + if nlay < 0: + # fill header2 by first reading imeth, delt, pertim and totim + # and then adding modelnames and paknames if imeth = 6 + temp = binaryread(self.file, self.header2_dtype0, (1,)) + header2 = np.array([(0, 0., 0., 0., '', '', '', '')], + dtype=self.header2_dtype) + for name in temp.dtype.names: + header2[name] = temp[name] + if int(header2['imeth']) == 6: + header2['modelnam'] = binaryread(self.file, str, charlen=16) + header2['paknam'] = binaryread(self.file, str, charlen=16) + header2['modelnam2'] = binaryread(self.file, str, charlen=16) + header2['paknam2'] = binaryread(self.file, str, charlen=16) + else: + header2 = np.array([(0, 0., 0., 0., '', '', '', '')], + dtype=self.header2_dtype) + fullheader = join_struct_arrays([header1, header2]) + return fullheader[0] + + def _find_text(self, text): + """ + Determine if selected record name is in budget file + + """ + # check and make sure that text is in file + text16 = None + if text is not None: + if isinstance(text, bytes): + ttext = text.decode() + else: + ttext = text + for t in self.textlist: + if ttext.upper() in t.decode(): + text16 = t + break + if text16 is None: + errmsg = 'The specified text string is not in the budget file.' + raise Exception(errmsg) + return text16 + + def _find_paknam(self, paknam): + """ + Determine if selected record name is in budget file + + """ + # check and make sure that text is in file + paknam16 = None + if paknam is not None: + if isinstance(paknam, bytes): + tpaknam = paknam.decode() + else: + tpaknam = paknam + for t in self._unique_package_names(): + if tpaknam.upper() in t.decode(): + paknam16 = t + break + if paknam16 is None: + errmsg = 'The specified package name string is not ' + \ + 'in the budget file.' + raise Exception(errmsg) + return paknam16 + + def list_records(self): + """ + Print a list of all of the records in the file + """ + for rec in self.recordarray: + if isinstance(rec, bytes): + rec = rec.decode() + print(rec) + return + + def list_unique_records(self): + """ + Print a list of unique record names + """ + print('RECORD IMETH') + print(22 * '-') + for rec, imeth in zip(self.textlist, self.imethlist): + if isinstance(rec, bytes): + rec = rec.decode() + print('{:16} {:5d}'.format(rec.strip(), imeth)) + return + + def list_unique_packages(self): + """ + Print a list of unique package names + """ + for rec in self._unique_package_names(): + if isinstance(rec, bytes): + rec = rec.decode() + print(rec) + return + + def get_unique_record_names(self, decode=False): + """ + Get a list of unique record names in the file + + Parameters + ---------- + decode : bool + Optional boolean used to decode byte strings (default is False). + + Returns + ---------- + names : list of strings + List of unique text names in the binary file. + + """ + if decode: + names = [] + for text in self.textlist: + if isinstance(text, bytes): + text = text.decode() + names.append(text) + else: + names = self.textlist + return names + + def get_unique_package_names(self, decode=False): + """ + Get a list of unique package names in the file + + Parameters + ---------- + decode : bool + Optional boolean used to decode byte strings (default is False). + + Returns + ---------- + names : list of strings + List of unique package names in the binary file. + + """ + if decode: + names = [] + for text in self.paknamlist: + if isinstance(text, bytes): + text = text.decode() + names.append(text) + else: + names = self.paknamlist + return names + + def _unique_package_names(self): + """ + Get a list of unique package names in the file + + Returns + ---------- + out : list of strings + List of unique package names in the binary file. + + """ + return self.paknamlist + + def get_kstpkper(self): + """ + Get a list of unique stress periods and time steps in the file + + Returns + ---------- + out : list of (kstp, kper) tuples + List of unique kstp, kper combinations in binary file. kstp and + kper values are zero-based. + + """ + kstpkper = [] + for kstp, kper in self.kstpkper: + kstpkper.append((kstp - 1, kper - 1)) + return kstpkper + + def get_indices(self, text=None): + """ + Get a list of indices for a selected record name + + Parameters + ---------- + text : str + The text identifier for the record. Examples include + 'RIVER LEAKAGE', 'STORAGE', 'FLOW RIGHT FACE', etc. + + Returns + ---------- + out : tuple + indices of selected record name in budget file. + + """ + # check and make sure that text is in file + if text is not None: + text16 = self._find_text(text) + select_indices = np.where((self.recordarray['text'] == text16)) + if isinstance(select_indices, tuple): + select_indices = select_indices[0] + else: + select_indices = None + return select_indices + + def get_position(self, idx, header=False): + """ + Get the starting position of the data or header for a specified record + number in the binary budget file. + + Parameters + ---------- + idx : int + The zero-based record number. The first record is record 0. + header : bool + If True, the position of the start of the header data is returned. + If False, the position of the start of the data is returned + (default is False). + + Returns + ------- + ipos : int64 + The position of the start of the data in the cell budget file + or the start of the header. + + """ + if header: + ipos = self.iposheader[idx] + else: + ipos = self.iposarray[idx] + return ipos + + def get_data(self, idx=None, kstpkper=None, totim=None, text=None, + paknam=None, full3D=False): + """ + Get data from the binary budget file. + + Parameters + ---------- + idx : int or list + The zero-based record number. The first record is record 0. + kstpkper : tuple of ints + A tuple containing the time step and stress period (kstp, kper). + The kstp and kper values are zero based. + totim : float + The simulation time. + text : str + The text identifier for the record. Examples include + 'RIVER LEAKAGE', 'STORAGE', 'FLOW RIGHT FACE', etc. + full3D : boolean + If true, then return the record as a three dimensional numpy + array, even for those list-style records written as part of a + 'COMPACT BUDGET' MODFLOW budget file. (Default is False.) + + Returns + ---------- + recordlist : list of records + A list of budget objects. The structure of the returned object + depends on the structure of the data in the cbb file. + + If full3D is True, then this method will return a numpy masked + array of size (nlay, nrow, ncol) for those list-style + 'COMPACT BUDGET' records written by MODFLOW. + + See Also + -------- + + Notes + ----- + + Examples + -------- + + """ + # trap for totim error + if totim is not None: + if len(self.times) == 0: + errmsg = '''This is an older style budget file that + does not have times in it. Use the MODFLOW + compact budget format if you want to work with + times. Or you may access this file using the + kstp and kper arguments or the idx argument.''' + raise Exception(errmsg) + + # check and make sure that text is in file + text16 = None + if text is not None: + text16 = self._find_text(text) + paknam16 = None + if paknam is not None: + paknam16 = self._find_paknam(paknam) + + if kstpkper is not None: + kstp1 = kstpkper[0] + 1 + kper1 = kstpkper[1] + 1 + if text is None and paknam is None: + select_indices = np.where( + (self.recordarray['kstp'] == kstp1) & + (self.recordarray['kper'] == kper1)) + else: + if paknam is None and text is not None: + select_indices = np.where( + (self.recordarray['kstp'] == kstp1) & + (self.recordarray['kper'] == kper1) & + (self.recordarray['text'] == text16)) + elif text is None and paknam is not None: + select_indices = np.where( + (self.recordarray['kstp'] == kstp1) & + (self.recordarray['kper'] == kper1) & + (self.recordarray['paknam'] == paknam16)) + else: + select_indices = np.where( + (self.recordarray['kstp'] == kstp1) & + (self.recordarray['kper'] == kper1) & + (self.recordarray['text'] == text16) & + (self.recordarray['paknam'] == paknam16)) + + elif totim is not None: + if text is None and paknam is None: + select_indices = np.where( + (self.recordarray['totim'] == totim)) + else: + if paknam is None and text is not None: + select_indices = np.where( + (self.recordarray['totim'] == totim) & + (self.recordarray['text'] == text16)) + elif text is None and paknam is not None: + select_indices = np.where( + (self.recordarray['totim'] == totim) & + (self.recordarray['paknam'] == paknam16)) + else: + select_indices = np.where( + (self.recordarray['totim'] == totim) & + (self.recordarray['text'] == text16) & + (self.recordarray['paknam'] == paknam16)) + + # allow for idx to be a list or a scalar + elif idx is not None: + if isinstance(idx, list): + select_indices = idx + else: + select_indices = [idx] + + # case where only text is entered + elif text is not None: + select_indices = np.where((self.recordarray['text'] == text16)) + + else: + raise TypeError( + "get_data() missing 1 required argument: 'kstpkper', 'totim', " + "'idx', or 'text'") + + # build and return the record list + if isinstance(select_indices, tuple): + select_indices = select_indices[0] + recordlist = [] + for idx in select_indices: + rec = self.get_record(idx, full3D=full3D) + recordlist.append(rec) + + return recordlist + + def get_ts(self, idx, text=None, times=None): + """ + Get a time series from the binary budget file. + + Parameters + ---------- + idx : tuple of ints, or a list of a tuple of ints + idx can be (layer, row, column) or it can be a list in the form + [(layer, row, column), (layer, row, column), ...]. The layer, + row, and column values must be zero based. + text : str + The text identifier for the record. Examples include + 'RIVER LEAKAGE', 'STORAGE', 'FLOW RIGHT FACE', etc. + times : iterable of floats + List of times to from which to get time series. + + Returns + ---------- + out : numpy array + Array has size (ntimes, ncells + 1). The first column in the + data array will contain time (totim). + + See Also + -------- + + Notes + ----- + + The layer, row, and column values must be zero-based, and must be + within the following ranges: 0 <= k < nlay; 0 <= i < nrow; 0 <= j < ncol + + Examples + -------- + + """ + # issue exception if text not provided + if text is None: + etxt = 'text keyword must be provided to CellBudgetFile ' + \ + 'get_ts() method.' + raise Exception(etxt) + + kijlist = self._build_kijlist(idx) + nstation = self._get_nstation(idx, kijlist) + + # Initialize result array and put times in first column + result = self._init_result(nstation) + + kk = self.get_kstpkper() + timesint = self.get_times() + if len(timesint) < 1: + if times is None: + timesint = [x + 1 for x in range(len(kk))] + else: + if isinstance(times, np.ndarray): + times = times.tolist() + if len(times) != len(kk): + etxt = 'times passed to CellBudgetFile get_ts() ' + \ + 'method must be equal to {} '.format(len(kk)) + \ + 'not {}'.format(len(times)) + raise Exception(etxt) + timesint = times + for idx, t in enumerate(timesint): + result[idx, 0] = t + + for itim, k in enumerate(kk): + try: + v = self.get_data(kstpkper=k, text=text, full3D=True) + # skip missing data - required for storage + if len(v) > 0: + v = v[0] + istat = 1 + for k, i, j in kijlist: + result[itim, istat] = v[k, i, j].copy() + istat += 1 + except ValueError: + v = self.get_data(kstpkper=k, text=text) + # skip missing data - required for storage + if len(v)> 0: + if self.modelgrid is None: + s = "A modelgrid instance must be provided during " \ + "instantiation to get IMETH=6 timeseries data" + raise AssertionError(s) + + if self.modelgrid.grid_type == 'structured': + ndx = [lrc[0] * (self.modelgrid.nrow * + self.modelgrid.ncol) + + lrc[1] * self.modelgrid.ncol + + (lrc[2] + 1) for lrc in kijlist] + else: + ndx = [lrc[0] * self.modelgrid.ncpl + + (lrc[-1] + 1) for lrc in kijlist] + + for vv in v: + field = vv.dtype.names[2] + dix = np.where(np.isin(vv['node'], ndx))[0] + if len(dix) > 0: + result[itim, 1:] = vv[field][dix] + + return result + + def _build_kijlist(self, idx): + if isinstance(idx, list): + kijlist = idx + elif isinstance(idx, tuple): + kijlist = [idx] + else: + raise Exception('Could not build kijlist from ', idx) + + # Check to make sure that k, i, j are within range, otherwise + # the seek approach won't work. Can't use k = -1, for example. + for k, i, j in kijlist: + fail = False + errmsg = 'Invalid cell index. Cell ' + str( + (k, i, j)) + ' not within model grid: ' + \ + str((self.nlay, self.nrow, self.ncol)) + if k < 0 or k > self.nlay - 1: + fail = True + if i < 0 or i > self.nrow - 1: + fail = True + if j < 0 or j > self.ncol - 1: + fail = True + if fail: + raise Exception(errmsg) + return kijlist + + def _get_nstation(self, idx, kijlist): + if isinstance(idx, list): + return len(kijlist) + elif isinstance(idx, tuple): + return 1 + + def _init_result(self, nstation): + # Initialize result array and put times in first column + result = np.empty((len(self.kstpkper), nstation + 1), + dtype=self.realtype) + result[:, :] = np.nan + if len(self.times) == result.shape[0]: + result[:, 0] = np.array(self.times) + return result + + def get_record(self, idx, full3D=False): + """ + Get a single data record from the budget file. + + Parameters + ---------- + idx : int + The zero-based record number. The first record is record 0. + full3D : boolean + If true, then return the record as a three dimensional numpy + array, even for those list-style records written as part of a + 'COMPACT BUDGET' MODFLOW budget file. (Default is False.) + + Returns + ---------- + record : a single data record + The structure of the returned object depends on the structure of + the data in the cbb file. Compact list data are returned as + + If full3D is True, then this method will return a numpy masked + array of size (nlay, nrow, ncol) for those list-style + 'COMPACT BUDGET' records written by MODFLOW. + + See Also + -------- + + Notes + ----- + + Examples + -------- + + """ + # idx must be an ndarray, so if it comes in as an integer then convert + if np.isscalar(idx): + idx = np.array([idx]) + + header = self.recordarray[idx] + ipos = np.long(self.iposarray[idx]) + self.file.seek(ipos, 0) + imeth = header['imeth'][0] + + t = header['text'][0] + if isinstance(t, bytes): + t = t.decode('utf-8') + s = 'Returning ' + str(t).strip() + ' as ' + + nlay = abs(header['nlay'][0]) + nrow = header['nrow'][0] + ncol = header['ncol'][0] + + # default method + if imeth == 0: + if self.verbose: + s += 'an array of shape ' + str((nlay, nrow, ncol)) + print(s) + return binaryread(self.file, self.realtype(1), + shape=(nlay, nrow, ncol)) + # imeth 1 + elif imeth == 1: + if self.verbose: + s += 'an array of shape ' + str((nlay, nrow, ncol)) + print(s) + return binaryread(self.file, self.realtype(1), + shape=(nlay, nrow, ncol)) + + # imeth 2 + elif imeth == 2: + nlist = binaryread(self.file, np.int32)[0] + dtype = np.dtype([('node', np.int32), ('q', self.realtype)]) + if self.verbose: + if full3D: + s += 'a numpy masked array of size ({},{},{})'.format(nlay, + nrow, + ncol) + else: + s += 'a numpy recarray of size (' + str(nlist) + ', 2)' + print(s) + data = binaryread(self.file, dtype, shape=(nlist,)) + if full3D: + return self.create3D(data, nlay, nrow, ncol) + else: + return data.view(np.recarray) + + # imeth 3 + elif imeth == 3: + ilayer = binaryread(self.file, np.int32, shape=(nrow, ncol)) + data = binaryread(self.file, self.realtype(1), shape=(nrow, ncol)) + if self.verbose: + if full3D: + s += 'a numpy masked array of size ({},{},{})'.format(nlay, + nrow, + ncol) + else: + s += 'a list of two 2D numpy arrays. ' + s += 'The first is an integer layer array of shape ' + \ + str((nrow, ncol)) + s += 'The second is real data array of shape ' + \ + str((nrow, ncol)) + print(s) + if full3D: + out = np.ma.zeros((nlay, nrow, ncol), dtype=np.float32) + out.mask = True + vertical_layer = ilayer[0] - 1 # This is always the top layer + out[vertical_layer, :, :] = data + return out + else: + return [ilayer, data] + + # imeth 4 + elif imeth == 4: + if self.verbose: + s += 'a 2d numpy array of size ({},{})'.format(nrow, ncol) + print(s) + return binaryread(self.file, self.realtype(1), shape=(nrow, ncol)) + + # imeth 5 + elif imeth == 5: + nauxp1 = binaryread(self.file, np.int32)[0] + naux = nauxp1 - 1 + l = [('node', np.int32), ('q', self.realtype)] + for i in range(naux): + auxname = binaryread(self.file, str, charlen=16) + if not isinstance(auxname, str): + auxname = auxname.decode() + l.append((auxname, self.realtype)) + dtype = np.dtype(l) + nlist = binaryread(self.file, np.int32)[0] + data = binaryread(self.file, dtype, shape=(nlist,)) + if full3D: + if self.verbose: + s += 'a list array of shape ({},{},{})'.format(nlay, + nrow, + ncol) + print(s) + return self.create3D(data, nlay, nrow, ncol) + else: + if self.verbose: + s += 'a numpy recarray of size (' + \ + str(nlist) + ', {})'.format(2 + naux) + print(s) + return data.view(np.recarray) + + # imeth 6 + elif imeth == 6: + # read rest of list data + nauxp1 = binaryread(self.file, np.int32)[0] + naux = nauxp1 - 1 + l = [('node', np.int32), ('node2', np.int32), ('q', self.realtype)] + for i in range(naux): + auxname = binaryread(self.file, str, charlen=16) + if not isinstance(auxname, str): + auxname = auxname.decode() + l.append((auxname.strip(), self.realtype)) + dtype = np.dtype(l) + nlist = binaryread(self.file, np.int32)[0] + data = binaryread(self.file, dtype, shape=(nlist,)) + if self.verbose: + if full3D: + s += 'full 3D arrays not supported for ' + \ + 'imeth = {}'.format(imeth) + else: + s += 'a numpy recarray of size (' + str(nlist) + ', 2)' + print(s) + if full3D: + s += 'full 3D arrays not supported for ' + \ + 'imeth = {}'.format(imeth) + raise ValueError(s) + else: + return data.view(np.recarray) + else: + raise ValueError('invalid imeth value - {}'.format(imeth)) + + # should not reach this point + return + + def create3D(self, data, nlay, nrow, ncol): + """ + Convert a dictionary of {node: q, ...} into a numpy masked array. + In most cases this should not be called directly by the user unless + you know what you're doing. Instead, it is used as part of the + full3D keyword for get_data. + + Parameters + ---------- + data : dictionary + Dictionary with node keywords and flows (q) items. + + nlay, nrow, ncol : int + Number of layers, rows, and columns of the model grid. + + Returns + ---------- + out : numpy masked array + List contains unique simulation times (totim) in binary file. + + """ + out = np.ma.zeros((nlay * nrow * ncol), dtype=np.float32) + out.mask = True + for [node, q] in zip(data['node'], data['q']): + idx = node - 1 + out.data[idx] += q + out.mask[idx] = False + return np.ma.reshape(out, (nlay, nrow, ncol)) + + def get_times(self): + """ + Get a list of unique times in the file + + Returns + ---------- + out : list of floats + List contains unique simulation times (totim) in binary file. + + """ + return self.times + + def get_nrecords(self): + """ + Return the number of records in the file + + Returns + ------- + + out : int + Number of records in the file. + + """ + return self.recordarray.shape[0] + + def get_residual(self, totim, scaled=False): + """ + Return an array the size of the model grid containing the flow residual + calculated from the budget terms. Residual will not be correct unless + all flow terms are written to the budget file. + + Parameters + ---------- + totim : float + Simulation time for which to calculate the residual. This value + must be precise, so it is best to get it from the get_times + method. + + scaled : bool + If True, then divide the residual by the total cell inflow + + Returns + ------- + residual : np.ndarray + The flow residual for the cell of shape (nlay, nrow, ncol) + + """ + + nlay = self.nlay + nrow = self.nrow + ncol = self.ncol + residual = np.zeros((nlay, nrow, ncol), dtype=np.float) + if scaled: + inflow = np.zeros((nlay, nrow, ncol), dtype=np.float) + select_indices = np.where((self.recordarray['totim'] == totim))[0] + + for i in select_indices: + text = self.recordarray[i]['text'].decode() + if self.verbose: + print('processing {}'.format(text)) + flow = self.get_record(idx=i, full3D=True) + if ncol > 1 and 'RIGHT FACE' in text: + residual -= flow[:, :, :] + residual[:, :, 1:] += flow[:, :, :-1] + if scaled: + idx = np.where(flow < 0.) + inflow[idx] -= flow[idx] + idx = np.where(flow > 0.) + l, r, c = idx + idx = (l, r, c + 1) + inflow[idx] += flow[idx] + elif nrow > 1 and 'FRONT FACE' in text: + residual -= flow[:, :, :] + residual[:, 1:, :] += flow[:, :-1, :] + if scaled: + idx = np.where(flow < 0.) + inflow[idx] -= flow[idx] + idx = np.where(flow > 0.) + l, r, c = idx + idx = (l, r + 1, c) + inflow[idx] += flow[idx] + elif nlay > 1 and 'LOWER FACE' in text: + residual -= flow[:, :, :] + residual[1:, :, :] += flow[:-1, :, :] + if scaled: + idx = np.where(flow < 0.) + inflow[idx] -= flow[idx] + idx = np.where(flow > 0.) + l, r, c = idx + idx = (l + 1, r, c) + inflow[idx] += flow[idx] + else: + residual += flow + if scaled: + idx = np.where(flow > 0.) + inflow[idx] += flow[idx] + + if scaled: + residual_scaled = np.zeros((nlay, nrow, ncol), dtype=np.float) + idx = (inflow > 0.) + residual_scaled[idx] = residual[idx] / inflow[idx] + return residual_scaled + + return residual + + def close(self): + """ + Close the file handle + """ + self.file.close() + return + + +class HeadUFile(BinaryLayerFile): + """ + Unstructured MODFLOW-USG HeadUFile Class. + + Parameters + ---------- + filename : string + Name of the concentration file + text : string + Name of the text string in the head file. Default is 'headu' + precision : string + 'auto', 'single' or 'double'. Default is 'auto'. + verbose : bool + Write information to the screen. Default is False. + + Attributes + ---------- + + Methods + ------- + + See Also + -------- + + Notes + ----- + The HeadUFile class provides simple ways to retrieve a list of + head arrays from a MODFLOW-USG binary head file and time series + arrays for one or more cells. + + The BinaryLayerFile class is built on a record array consisting of + headers, which are record arrays of the modflow header information + (kstp, kper, pertim, totim, text, nrow, ncol, ilay) + and long integers, which are pointers to first bytes of data for + the corresponding data array. For unstructured grids, nrow and ncol + are the starting and ending node numbers for layer, ilay. This class + overrides methods in the parent class so that the proper sized arrays + are created. + + When the get_data method is called for this class, a list of + one-dimensional arrays will be returned, where each array is the head + array for a layer. If the heads for a layer were not saved, then + None will be returned for that layer. + + Examples + -------- + + >>> import flopy.utils.binaryfile as bf + >>> hdobj = bf.HeadUFile('model.hds') + >>> hdobj.list_records() + >>> usgheads = hdobj.get_data(kstpkper=(1, 50)) + + + """ + + def __init__(self, filename, text='headu', precision='auto', + verbose=False, **kwargs): + """ + Class constructor + """ + self.text = text.encode() + if precision == 'auto': + precision = get_headfile_precision(filename) + if precision == 'unknown': + s = 'Error. Precision could not be determined for {}'.format( + filename) + print(s) + raise Exception() + self.header_dtype = BinaryHeader.set_dtype(bintype='Head', + precision=precision) + super(HeadUFile, self).__init__(filename, precision, verbose, kwargs) + return + + def _get_data_array(self, totim=0.): + """ + Get a list of 1D arrays for the + specified kstp and kper value or totim value. + + """ + + if totim >= 0.: + keyindices = np.where((self.recordarray['totim'] == totim))[0] + if len(keyindices) == 0: + msg = 'totim value ({}) not found in file...'.format(totim) + raise Exception(msg) + else: + raise Exception('Data not found...') + + # fill a list of 1d arrays with heads from binary file + data = self.nlay * [None] + for idx in keyindices: + ipos = self.iposarray[idx] + ilay = self.recordarray['ilay'][idx] + nstrt = self.recordarray['ncol'][idx] + nend = self.recordarray['nrow'][idx] + npl = nend - nstrt + 1 + if self.verbose: + msg = 'Byte position in file: {} for '.format(ipos) + \ + 'layer {}'.format(ilay) + print(msg) + self.file.seek(ipos, 0) + data[ilay - 1] = binaryread(self.file, self.realtype, + shape=(npl,)) + return data + + def get_databytes(self, header): + """ + + Parameters + ---------- + header : datafile.Header + header object + + Returns + ------- + databytes : int + size of the data array, in bytes, following the header + + """ + # unstructured head files contain node starting and ending indices + # for each layer + nstrt = np.int64(header['ncol']) + nend = np.int64(header['nrow']) + npl = nend - nstrt + 1 + return npl * np.int64(self.realtype(1).nbytes) + + def get_ts(self, idx): + """ + Get a time series from the binary HeadUFile (not implemented). + + Parameters + ---------- + idx : tuple of ints, or a list of a tuple of ints + idx can be (layer, row, column) or it can be a list in the form + [(layer, row, column), (layer, row, column), ...]. The layer, + row, and column values must be zero based. + + Returns + ---------- + out : numpy array + Array has size (ntimes, ncells + 1). The first column in the + data array will contain time (totim). + + See Also + -------- + + Notes + ----- + + Examples + -------- + + """ + msg = 'HeadUFile: get_ts() is not implemented' + raise NotImplementedError(msg) diff --git a/flopy/utils/datafile.py b/flopy/utils/datafile.py index a8e778ad71..783cebfa21 100755 --- a/flopy/utils/datafile.py +++ b/flopy/utils/datafile.py @@ -1,558 +1,558 @@ -""" -Module to read MODFLOW output files. The module contains shared -abstract classes that should not be directly accessed. - -""" -from __future__ import print_function -import numpy as np -import flopy.utils -from ..discretization.structuredgrid import StructuredGrid - - -class Header(object): - """ - The header class is an abstract base class to create headers for MODFLOW files - """ - - def __init__(self, filetype=None, precision='single'): - floattype = 'f4' - if precision == 'double': - floattype = 'f8' - self.header_types = ['head', 'drawdown', 'ucn', 'vardis', 'vardisv', - 'vardisu'] - if filetype is None: - self.header_type = None - else: - if isinstance(filetype, bytes): - filetype = filetype.decode() - self.header_type = filetype.lower() - if self.header_type in self.header_types: - if self.header_type == 'head': - self.dtype = np.dtype([('kstp', 'i4'), ('kper', 'i4'), - ('pertim', floattype), - ('totim', floattype), - ('text', 'a16'), - ('ncol', 'i4'), ('nrow', 'i4'), - ('ilay', 'i4')]) - elif self.header_type == 'drawdown': - self.dtype = np.dtype([('kstp', 'i4'), ('kper', 'i4'), - ('pertim', floattype), - ('totim', floattype), - ('text', 'a16'), - ('ncol', 'i4'), ('nrow', 'i4'), - ('ilay', 'i4')]) - elif self.header_type == 'ucn': - self.dtype = np.dtype( - [('ntrans', 'i4'), ('kstp', 'i4'), ('kper', 'i4'), - ('totim', floattype), ('text', 'a16'), - ('ncol', 'i4'), ('nrow', 'i4'), ('ilay', 'i4')]) - elif self.header_type == 'vardis': - self.dtype = np.dtype([('kstp', 'i4'), ('kper', 'i4'), - ('pertim', floattype), - ('totim', floattype), - ('text', 'a16'), - ('ncol', 'i4'), ('nrow', 'i4'), - ('ilay', 'i4')]) - elif self.header_type == 'vardisv': - self.dtype = np.dtype([('kstp', 'i4'), ('kper', 'i4'), - ('pertim', floattype), - ('totim', floattype), - ('text', 'a16'), - ('ncpl', 'i4'), ('ilay', 'i4'), - ('m3', 'i4')]) - elif self.header_type == 'vardisu': - self.dtype = np.dtype([('kstp', 'i4'), ('kper', 'i4'), - ('pertim', floattype), - ('totim', floattype), - ('text', 'a16'), - ('nodes', 'i4'), ('m2', 'i4'), - ('m3', 'i4')]) - - self.header = np.ones(1, self.dtype) - else: - self.dtype = None - self.header = None - msg = 'Specified {} '.format(self.header_type) + \ - 'type is not available. Available types are:' - print(msg) - for idx, t in enumerate(self.header_types): - print(' {0} {1}'.format(idx + 1, t)) - return - - def get_dtype(self): - """ - Return the dtype - """ - return self.dtype - - def get_names(self): - """ - Return the dtype names - """ - return self.dtype.names - - def get_values(self): - """ - Return the header values - """ - if self.header is None: - return None - else: - return self.header[0] - - -class LayerFile(object): - """ - The LayerFile class is the abstract base class from which specific derived - classes are formed. LayerFile This class should not be instantiated - directly. - - """ - - def __init__(self, filename, precision, verbose, kwargs): - self.filename = filename - self.precision = precision - self.verbose = verbose - self.file = open(self.filename, 'rb') - # Get filesize to ensure this is not an empty file - self.file.seek(0, 2) - totalbytes = self.file.tell() - self.file.seek(0, 0) # reset to beginning - assert self.file.tell() == 0 - if totalbytes == 0: - raise IOError('datafile error: file is empty: ' + str(filename)) - self.nrow = 0 - self.ncol = 0 - self.nlay = 0 - self.times = [] - self.kstpkper = [] - self.recordarray = [] - self.iposarray = [] - - if precision == 'single': - self.realtype = np.float32 - elif precision == 'double': - self.realtype = np.float64 - else: - raise Exception('Unknown precision specified: ' + precision) - - self.model = None - self.dis = None - self.mg = None - if 'model' in kwargs.keys(): - self.model = kwargs.pop('model') - self.mg = self.model.modelgrid - self.dis = self.model.dis - if 'dis' in kwargs.keys(): - self.dis = kwargs.pop('dis') - self.mg = self.dis.parent.modelgrid - if "modelgrid" in kwargs.keys(): - self.mg = kwargs.pop('modelgrid') - if len(kwargs.keys()) > 0: - args = ','.join(kwargs.keys()) - raise Exception('LayerFile error: unrecognized kwargs: ' + args) - - # read through the file and build the pointer index - self._build_index() - - # now that we read the data and know nrow and ncol, - # we can make a generic sr if needed - if self.mg is None: - self.mg = StructuredGrid(delc=np.ones((self.nrow,)), - delr=np.ones(self.ncol, ), - xoff=0.0, yoff=0.0, - angrot=0.0) - return - - def to_shapefile(self, filename, kstpkper=None, totim=None, mflay=None, - attrib_name='lf_data'): - """ - Export model output data to a shapefile at a specific location - in LayerFile instance. - - Parameters - ---------- - filename : str - Shapefile name to write - kstpkper : tuple of ints - A tuple containing the time step and stress period (kstp, kper). - These are zero-based kstp and kper values. - totim : float - The simulation time. - mflay : integer - MODFLOW zero-based layer number to return. If None, then layer 1 - will be written - attrib_name : str - Base name of attribute columns. (default is 'lf_data') - - Returns - ---------- - None - - See Also - -------- - - Notes - ----- - - Examples - -------- - >>> import flopy - >>> hdobj = flopy.utils.HeadFile('test.hds') - >>> times = hdobj.get_times() - >>> hdobj.to_shapefile('test_heads_sp6.shp', totim=times[-1]) - """ - - plotarray = np.atleast_3d(self.get_data(kstpkper=kstpkper, - totim=totim, mflay=mflay) - .transpose()).transpose() - if mflay != None: - attrib_dict = { - attrib_name + '{}'.format(mflay): plotarray[0, :, :]} - else: - attrib_dict = {} - for k in range(plotarray.shape[0]): - name = attrib_name + '{}'.format(k) - attrib_dict[name] = plotarray[k] - - from ..export.shapefile_utils import write_grid_shapefile - write_grid_shapefile(filename, self.mg, attrib_dict) - - def plot(self, axes=None, kstpkper=None, totim=None, mflay=None, - filename_base=None, **kwargs): - """ - Plot 3-D model output data in a specific location - in LayerFile instance - - Parameters - ---------- - axes : list of matplotlib.pyplot.axis - List of matplotlib.pyplot.axis that will be used to plot - data for each layer. If axes=None axes will be generated. - (default is None) - kstpkper : tuple of ints - A tuple containing the time step and stress period (kstp, kper). - These are zero-based kstp and kper values. - totim : float - The simulation time. - mflay : int - MODFLOW zero-based layer number to return. If None, then all - all layers will be included. (default is None) - filename_base : str - Base file name that will be used to automatically generate file - names for output image files. Plots will be exported as image - files if file_name_base is not None. (default is None) - **kwargs : dict - pcolor : bool - Boolean used to determine if matplotlib.pyplot.pcolormesh - plot will be plotted. (default is True) - colorbar : bool - Boolean used to determine if a color bar will be added to - the matplotlib.pyplot.pcolormesh. Only used if pcolor=True. - (default is False) - contour : bool - Boolean used to determine if matplotlib.pyplot.contour - plot will be plotted. (default is False) - clabel : bool - Boolean used to determine if matplotlib.pyplot.clabel - will be plotted. Only used if contour=True. (default is False) - grid : bool - Boolean used to determine if the model grid will be plotted - on the figure. (default is False) - masked_values : list - List of unique values to be excluded from the plot. - file_extension : str - Valid matplotlib.pyplot file extension for savefig(). Only used - if filename_base is not None. (default is 'png') - - Returns - ---------- - None - - See Also - -------- - - Notes - ----- - - Examples - -------- - >>> import flopy - >>> hdobj = flopy.utils.HeadFile('test.hds') - >>> times = hdobj.get_times() - >>> hdobj.plot(totim=times[-1]) - - """ - - if 'file_extension' in kwargs: - fext = kwargs.pop('file_extension') - fext = fext.replace('.', '') - else: - fext = 'png' - - masked_values = kwargs.pop("masked_values", []) - if self.model is not None: - if hasattr(self.model, 'bas6') and self.model.bas6 is not None: - masked_values.append(self.model.bas6.hnoflo) - kwargs["masked_values"] = masked_values - - filenames = None - if filename_base is not None: - if mflay is not None: - i0 = int(mflay) - if i0 + 1 >= self.nlay: - i0 = self.nlay - 1 - i1 = i0 + 1 - else: - i0 = 0 - i1 = self.nlay - filenames = ['{}_Layer{}.{}'.format(filename_base, k + 1, fext) - for k in range(i0, i1)] - - # make sure we have a (lay,row,col) shape plotarray - plotarray = np.atleast_3d(self.get_data(kstpkper=kstpkper, - totim=totim, mflay=mflay) - .transpose()).transpose() - - from flopy.plot.plotutil import PlotUtilities - - return PlotUtilities._plot_array_helper(plotarray, - model=self.model, - axes=axes, - filenames=filenames, - mflay=mflay, - modelgrid=self.mg, - **kwargs) - - def _build_index(self): - """ - Build the recordarray and iposarray, which maps the header information - to the position in the formatted file. - """ - e = 'Abstract method _build_index called in LayerFile. ' + \ - 'This method needs to be overridden.' - raise Exception(e) - - def list_records(self): - """ - Print a list of all of the records in the file - obj.list_records() - - """ - for header in self.recordarray: - print(header) - return - - def _get_data_array(self, totim=0): - """ - Get the three dimensional data array for the - specified kstp and kper value or totim value. - - """ - - if totim >= 0.: - keyindices = np.where((self.recordarray['totim'] == totim))[0] - if len(keyindices) == 0: - msg = 'totim value ({}) not found in file...'.format(totim) - raise Exception(msg) - else: - raise Exception('Data not found...') - - # initialize head with nan and then fill it - idx = keyindices[0] - nrow = self.recordarray['nrow'][idx] - ncol = self.recordarray['ncol'][idx] - data = np.empty((self.nlay, nrow, ncol), dtype=self.realtype) - data[:, :, :] = np.nan - for idx in keyindices: - ipos = self.iposarray[idx] - ilay = self.recordarray['ilay'][idx] - if self.verbose: - msg = 'Byte position in file: {} for '.format(ipos) + \ - 'layer {}'.format(ilay) - print(msg) - self.file.seek(ipos, 0) - nrow = self.recordarray['nrow'][idx] - ncol = self.recordarray['ncol'][idx] - shp = (nrow, ncol) - data[ilay - 1] = self._read_data(shp) - return data - - def get_times(self): - """ - Get a list of unique times in the file - - Returns - ---------- - out : list of floats - List contains unique simulation times (totim) in binary file. - - """ - return self.times - - def get_kstpkper(self): - """ - Get a list of unique stress periods and time steps in the file - - Returns - ---------- - out : list of (kstp, kper) tuples - List of unique kstp, kper combinations in binary file. kstp and - kper values are presently zero-based. - - """ - kstpkper = [] - for kstp, kper in self.kstpkper: - kstpkper.append((kstp - 1, kper - 1)) - return kstpkper - - def get_data(self, kstpkper=None, idx=None, totim=None, mflay=None): - """ - Get data from the file for the specified conditions. - - Parameters - ---------- - idx : int - The zero-based record number. The first record is record 0. - kstpkper : tuple of ints - A tuple containing the time step and stress period (kstp, kper). - These are zero-based kstp and kper values. - totim : float - The simulation time. - mflay : integer - MODFLOW zero-based layer number to return. If None, then all - all layers will be included. (Default is None.) - - Returns - ---------- - data : numpy array - Array has size (nlay, nrow, ncol) if mflay is None or it has size - (nrow, ncol) if mlay is specified. - - See Also - -------- - - Notes - ----- - if both kstpkper and totim are None, will return the last entry - Examples - -------- - - """ - # One-based kstp and kper for pulling out of recarray - if kstpkper is not None: - kstp1 = kstpkper[0] + 1 - kper1 = kstpkper[1] + 1 - idx = np.where( - (self.recordarray['kstp'] == kstp1) & - (self.recordarray['kper'] == kper1)) - if idx[0].shape[0] == 0: - raise Exception("get_data() error: kstpkper not found:{0}". - format(kstpkper)) - totim1 = self.recordarray[idx]["totim"][0] - elif totim is not None: - totim1 = totim - elif idx is not None: - totim1 = self.recordarray['totim'][idx] - else: - totim1 = self.times[-1] - - data = self._get_data_array(totim1) - if mflay is None: - return data - else: - return data[mflay, :, :] - - def get_alldata(self, mflay=None, nodata=-9999): - """ - Get all of the data from the file. - - Parameters - ---------- - mflay : integer - MODFLOW zero-based layer number to return. If None, then all - all layers will be included. (Default is None.) - - nodata : float - The nodata value in the data array. All array values that have the - nodata value will be assigned np.nan. - - Returns - ---------- - data : numpy array - Array has size (ntimes, nlay, nrow, ncol) if mflay is None or it - has size (ntimes, nrow, ncol) if mlay is specified. - - See Also - -------- - - Notes - ----- - - Examples - -------- - - """ - rv = [] - for totim in self.times: - h = self.get_data(totim=totim, mflay=mflay) - rv.append(h) - rv = np.array(rv) - rv[rv == nodata] = np.nan - return rv - - def _read_data(self, shp): - """ - Read data from file - - """ - e = 'Abstract method _read_data called in LayerFile. ' + \ - 'This method needs to be overridden.' - raise Exception(e) - - def _build_kijlist(self, idx): - if isinstance(idx, list): - kijlist = idx - elif isinstance(idx, tuple): - kijlist = [idx] - else: - raise Exception('Could not build kijlist from ', idx) - - # Check to make sure that k, i, j are within range, otherwise - # the seek approach won't work. Can't use k = -1, for example. - for k, i, j in kijlist: - fail = False - errmsg = 'Invalid cell index. Cell ' + str( - (k, i, j)) + ' not within model grid: ' + \ - str((self.nlay, self.nrow, self.ncol)) - if k < 0 or k > self.nlay - 1: - fail = True - if i < 0 or i > self.nrow - 1: - fail = True - if j < 0 or j > self.ncol - 1: - fail = True - if fail: - raise Exception(errmsg) - return kijlist - - def _get_nstation(self, idx, kijlist): - if isinstance(idx, list): - return len(kijlist) - elif isinstance(idx, tuple): - return 1 - - def _init_result(self, nstation): - # Initialize result array and put times in first column - result = np.empty((len(self.times), nstation + 1), - dtype=self.realtype) - result[:, :] = np.nan - result[:, 0] = np.array(self.times) - return result - - def close(self): - """ - Close the file handle. - - """ - self.file.close() - return +""" +Module to read MODFLOW output files. The module contains shared +abstract classes that should not be directly accessed. + +""" +from __future__ import print_function +import numpy as np +import flopy.utils +from ..discretization.structuredgrid import StructuredGrid + + +class Header(object): + """ + The header class is an abstract base class to create headers for MODFLOW files + """ + + def __init__(self, filetype=None, precision='single'): + floattype = 'f4' + if precision == 'double': + floattype = 'f8' + self.header_types = ['head', 'drawdown', 'ucn', 'vardis', 'vardisv', + 'vardisu'] + if filetype is None: + self.header_type = None + else: + if isinstance(filetype, bytes): + filetype = filetype.decode() + self.header_type = filetype.lower() + if self.header_type in self.header_types: + if self.header_type == 'head': + self.dtype = np.dtype([('kstp', 'i4'), ('kper', 'i4'), + ('pertim', floattype), + ('totim', floattype), + ('text', 'a16'), + ('ncol', 'i4'), ('nrow', 'i4'), + ('ilay', 'i4')]) + elif self.header_type == 'drawdown': + self.dtype = np.dtype([('kstp', 'i4'), ('kper', 'i4'), + ('pertim', floattype), + ('totim', floattype), + ('text', 'a16'), + ('ncol', 'i4'), ('nrow', 'i4'), + ('ilay', 'i4')]) + elif self.header_type == 'ucn': + self.dtype = np.dtype( + [('ntrans', 'i4'), ('kstp', 'i4'), ('kper', 'i4'), + ('totim', floattype), ('text', 'a16'), + ('ncol', 'i4'), ('nrow', 'i4'), ('ilay', 'i4')]) + elif self.header_type == 'vardis': + self.dtype = np.dtype([('kstp', 'i4'), ('kper', 'i4'), + ('pertim', floattype), + ('totim', floattype), + ('text', 'a16'), + ('ncol', 'i4'), ('nrow', 'i4'), + ('ilay', 'i4')]) + elif self.header_type == 'vardisv': + self.dtype = np.dtype([('kstp', 'i4'), ('kper', 'i4'), + ('pertim', floattype), + ('totim', floattype), + ('text', 'a16'), + ('ncpl', 'i4'), ('ilay', 'i4'), + ('m3', 'i4')]) + elif self.header_type == 'vardisu': + self.dtype = np.dtype([('kstp', 'i4'), ('kper', 'i4'), + ('pertim', floattype), + ('totim', floattype), + ('text', 'a16'), + ('nodes', 'i4'), ('m2', 'i4'), + ('m3', 'i4')]) + + self.header = np.ones(1, self.dtype) + else: + self.dtype = None + self.header = None + msg = 'Specified {} '.format(self.header_type) + \ + 'type is not available. Available types are:' + print(msg) + for idx, t in enumerate(self.header_types): + print(' {0} {1}'.format(idx + 1, t)) + return + + def get_dtype(self): + """ + Return the dtype + """ + return self.dtype + + def get_names(self): + """ + Return the dtype names + """ + return self.dtype.names + + def get_values(self): + """ + Return the header values + """ + if self.header is None: + return None + else: + return self.header[0] + + +class LayerFile(object): + """ + The LayerFile class is the abstract base class from which specific derived + classes are formed. LayerFile This class should not be instantiated + directly. + + """ + + def __init__(self, filename, precision, verbose, kwargs): + self.filename = filename + self.precision = precision + self.verbose = verbose + self.file = open(self.filename, 'rb') + # Get filesize to ensure this is not an empty file + self.file.seek(0, 2) + totalbytes = self.file.tell() + self.file.seek(0, 0) # reset to beginning + assert self.file.tell() == 0 + if totalbytes == 0: + raise IOError('datafile error: file is empty: ' + str(filename)) + self.nrow = 0 + self.ncol = 0 + self.nlay = 0 + self.times = [] + self.kstpkper = [] + self.recordarray = [] + self.iposarray = [] + + if precision == 'single': + self.realtype = np.float32 + elif precision == 'double': + self.realtype = np.float64 + else: + raise Exception('Unknown precision specified: ' + precision) + + self.model = None + self.dis = None + self.mg = None + if 'model' in kwargs.keys(): + self.model = kwargs.pop('model') + self.mg = self.model.modelgrid + self.dis = self.model.dis + if 'dis' in kwargs.keys(): + self.dis = kwargs.pop('dis') + self.mg = self.dis.parent.modelgrid + if "modelgrid" in kwargs.keys(): + self.mg = kwargs.pop('modelgrid') + if len(kwargs.keys()) > 0: + args = ','.join(kwargs.keys()) + raise Exception('LayerFile error: unrecognized kwargs: ' + args) + + # read through the file and build the pointer index + self._build_index() + + # now that we read the data and know nrow and ncol, + # we can make a generic sr if needed + if self.mg is None: + self.mg = StructuredGrid(delc=np.ones((self.nrow,)), + delr=np.ones(self.ncol, ), + xoff=0.0, yoff=0.0, + angrot=0.0) + return + + def to_shapefile(self, filename, kstpkper=None, totim=None, mflay=None, + attrib_name='lf_data'): + """ + Export model output data to a shapefile at a specific location + in LayerFile instance. + + Parameters + ---------- + filename : str + Shapefile name to write + kstpkper : tuple of ints + A tuple containing the time step and stress period (kstp, kper). + These are zero-based kstp and kper values. + totim : float + The simulation time. + mflay : integer + MODFLOW zero-based layer number to return. If None, then layer 1 + will be written + attrib_name : str + Base name of attribute columns. (default is 'lf_data') + + Returns + ---------- + None + + See Also + -------- + + Notes + ----- + + Examples + -------- + >>> import flopy + >>> hdobj = flopy.utils.HeadFile('test.hds') + >>> times = hdobj.get_times() + >>> hdobj.to_shapefile('test_heads_sp6.shp', totim=times[-1]) + """ + + plotarray = np.atleast_3d(self.get_data(kstpkper=kstpkper, + totim=totim, mflay=mflay) + .transpose()).transpose() + if mflay != None: + attrib_dict = { + attrib_name + '{}'.format(mflay): plotarray[0, :, :]} + else: + attrib_dict = {} + for k in range(plotarray.shape[0]): + name = attrib_name + '{}'.format(k) + attrib_dict[name] = plotarray[k] + + from ..export.shapefile_utils import write_grid_shapefile + write_grid_shapefile(filename, self.mg, attrib_dict) + + def plot(self, axes=None, kstpkper=None, totim=None, mflay=None, + filename_base=None, **kwargs): + """ + Plot 3-D model output data in a specific location + in LayerFile instance + + Parameters + ---------- + axes : list of matplotlib.pyplot.axis + List of matplotlib.pyplot.axis that will be used to plot + data for each layer. If axes=None axes will be generated. + (default is None) + kstpkper : tuple of ints + A tuple containing the time step and stress period (kstp, kper). + These are zero-based kstp and kper values. + totim : float + The simulation time. + mflay : int + MODFLOW zero-based layer number to return. If None, then all + all layers will be included. (default is None) + filename_base : str + Base file name that will be used to automatically generate file + names for output image files. Plots will be exported as image + files if file_name_base is not None. (default is None) + **kwargs : dict + pcolor : bool + Boolean used to determine if matplotlib.pyplot.pcolormesh + plot will be plotted. (default is True) + colorbar : bool + Boolean used to determine if a color bar will be added to + the matplotlib.pyplot.pcolormesh. Only used if pcolor=True. + (default is False) + contour : bool + Boolean used to determine if matplotlib.pyplot.contour + plot will be plotted. (default is False) + clabel : bool + Boolean used to determine if matplotlib.pyplot.clabel + will be plotted. Only used if contour=True. (default is False) + grid : bool + Boolean used to determine if the model grid will be plotted + on the figure. (default is False) + masked_values : list + List of unique values to be excluded from the plot. + file_extension : str + Valid matplotlib.pyplot file extension for savefig(). Only used + if filename_base is not None. (default is 'png') + + Returns + ---------- + None + + See Also + -------- + + Notes + ----- + + Examples + -------- + >>> import flopy + >>> hdobj = flopy.utils.HeadFile('test.hds') + >>> times = hdobj.get_times() + >>> hdobj.plot(totim=times[-1]) + + """ + + if 'file_extension' in kwargs: + fext = kwargs.pop('file_extension') + fext = fext.replace('.', '') + else: + fext = 'png' + + masked_values = kwargs.pop("masked_values", []) + if self.model is not None: + if hasattr(self.model, 'bas6') and self.model.bas6 is not None: + masked_values.append(self.model.bas6.hnoflo) + kwargs["masked_values"] = masked_values + + filenames = None + if filename_base is not None: + if mflay is not None: + i0 = int(mflay) + if i0 + 1 >= self.nlay: + i0 = self.nlay - 1 + i1 = i0 + 1 + else: + i0 = 0 + i1 = self.nlay + filenames = ['{}_Layer{}.{}'.format(filename_base, k + 1, fext) + for k in range(i0, i1)] + + # make sure we have a (lay,row,col) shape plotarray + plotarray = np.atleast_3d(self.get_data(kstpkper=kstpkper, + totim=totim, mflay=mflay) + .transpose()).transpose() + + from flopy.plot.plotutil import PlotUtilities + + return PlotUtilities._plot_array_helper(plotarray, + model=self.model, + axes=axes, + filenames=filenames, + mflay=mflay, + modelgrid=self.mg, + **kwargs) + + def _build_index(self): + """ + Build the recordarray and iposarray, which maps the header information + to the position in the formatted file. + """ + e = 'Abstract method _build_index called in LayerFile. ' + \ + 'This method needs to be overridden.' + raise Exception(e) + + def list_records(self): + """ + Print a list of all of the records in the file + obj.list_records() + + """ + for header in self.recordarray: + print(header) + return + + def _get_data_array(self, totim=0): + """ + Get the three dimensional data array for the + specified kstp and kper value or totim value. + + """ + + if totim >= 0.: + keyindices = np.where((self.recordarray['totim'] == totim))[0] + if len(keyindices) == 0: + msg = 'totim value ({}) not found in file...'.format(totim) + raise Exception(msg) + else: + raise Exception('Data not found...') + + # initialize head with nan and then fill it + idx = keyindices[0] + nrow = self.recordarray['nrow'][idx] + ncol = self.recordarray['ncol'][idx] + data = np.empty((self.nlay, nrow, ncol), dtype=self.realtype) + data[:, :, :] = np.nan + for idx in keyindices: + ipos = self.iposarray[idx] + ilay = self.recordarray['ilay'][idx] + if self.verbose: + msg = 'Byte position in file: {} for '.format(ipos) + \ + 'layer {}'.format(ilay) + print(msg) + self.file.seek(ipos, 0) + nrow = self.recordarray['nrow'][idx] + ncol = self.recordarray['ncol'][idx] + shp = (nrow, ncol) + data[ilay - 1] = self._read_data(shp) + return data + + def get_times(self): + """ + Get a list of unique times in the file + + Returns + ---------- + out : list of floats + List contains unique simulation times (totim) in binary file. + + """ + return self.times + + def get_kstpkper(self): + """ + Get a list of unique stress periods and time steps in the file + + Returns + ---------- + out : list of (kstp, kper) tuples + List of unique kstp, kper combinations in binary file. kstp and + kper values are presently zero-based. + + """ + kstpkper = [] + for kstp, kper in self.kstpkper: + kstpkper.append((kstp - 1, kper - 1)) + return kstpkper + + def get_data(self, kstpkper=None, idx=None, totim=None, mflay=None): + """ + Get data from the file for the specified conditions. + + Parameters + ---------- + idx : int + The zero-based record number. The first record is record 0. + kstpkper : tuple of ints + A tuple containing the time step and stress period (kstp, kper). + These are zero-based kstp and kper values. + totim : float + The simulation time. + mflay : integer + MODFLOW zero-based layer number to return. If None, then all + all layers will be included. (Default is None.) + + Returns + ---------- + data : numpy array + Array has size (nlay, nrow, ncol) if mflay is None or it has size + (nrow, ncol) if mlay is specified. + + See Also + -------- + + Notes + ----- + if both kstpkper and totim are None, will return the last entry + Examples + -------- + + """ + # One-based kstp and kper for pulling out of recarray + if kstpkper is not None: + kstp1 = kstpkper[0] + 1 + kper1 = kstpkper[1] + 1 + idx = np.where( + (self.recordarray['kstp'] == kstp1) & + (self.recordarray['kper'] == kper1)) + if idx[0].shape[0] == 0: + raise Exception("get_data() error: kstpkper not found:{0}". + format(kstpkper)) + totim1 = self.recordarray[idx]["totim"][0] + elif totim is not None: + totim1 = totim + elif idx is not None: + totim1 = self.recordarray['totim'][idx] + else: + totim1 = self.times[-1] + + data = self._get_data_array(totim1) + if mflay is None: + return data + else: + return data[mflay, :, :] + + def get_alldata(self, mflay=None, nodata=-9999): + """ + Get all of the data from the file. + + Parameters + ---------- + mflay : integer + MODFLOW zero-based layer number to return. If None, then all + all layers will be included. (Default is None.) + + nodata : float + The nodata value in the data array. All array values that have the + nodata value will be assigned np.nan. + + Returns + ---------- + data : numpy array + Array has size (ntimes, nlay, nrow, ncol) if mflay is None or it + has size (ntimes, nrow, ncol) if mlay is specified. + + See Also + -------- + + Notes + ----- + + Examples + -------- + + """ + rv = [] + for totim in self.times: + h = self.get_data(totim=totim, mflay=mflay) + rv.append(h) + rv = np.array(rv) + rv[rv == nodata] = np.nan + return rv + + def _read_data(self, shp): + """ + Read data from file + + """ + e = 'Abstract method _read_data called in LayerFile. ' + \ + 'This method needs to be overridden.' + raise Exception(e) + + def _build_kijlist(self, idx): + if isinstance(idx, list): + kijlist = idx + elif isinstance(idx, tuple): + kijlist = [idx] + else: + raise Exception('Could not build kijlist from ', idx) + + # Check to make sure that k, i, j are within range, otherwise + # the seek approach won't work. Can't use k = -1, for example. + for k, i, j in kijlist: + fail = False + errmsg = 'Invalid cell index. Cell ' + str( + (k, i, j)) + ' not within model grid: ' + \ + str((self.nlay, self.nrow, self.ncol)) + if k < 0 or k > self.nlay - 1: + fail = True + if i < 0 or i > self.nrow - 1: + fail = True + if j < 0 or j > self.ncol - 1: + fail = True + if fail: + raise Exception(errmsg) + return kijlist + + def _get_nstation(self, idx, kijlist): + if isinstance(idx, list): + return len(kijlist) + elif isinstance(idx, tuple): + return 1 + + def _init_result(self, nstation): + # Initialize result array and put times in first column + result = np.empty((len(self.times), nstation + 1), + dtype=self.realtype) + result[:, :] = np.nan + result[:, 0] = np.array(self.times) + return result + + def close(self): + """ + Close the file handle. + + """ + self.file.close() + return diff --git a/flopy/utils/datautil.py b/flopy/utils/datautil.py index 1431c0dcb9..5fd2ccbc0f 100644 --- a/flopy/utils/datautil.py +++ b/flopy/utils/datautil.py @@ -1,703 +1,703 @@ -import os -import numpy as np - - -def clean_name(name): - # remove bad characters - clean_string = name.replace(' ', '_') - clean_string = clean_string.replace('-', '_') - # remove anything after a parenthesis - index = clean_string.find('(') - if index != -1: - clean_string = clean_string[0:index] - return clean_string - - -def find_keyword(arr_line, keyword_dict): - # convert to lower case - arr_line_lower = [] - for word in arr_line: - # integers and floats are not keywords - if not DatumUtil.is_int(word) and not DatumUtil.is_float(word): - arr_line_lower.append(word.lower()) - # look for constants in order of most words to least words - key = '' - for num_words in range(len(arr_line_lower), -1, -1): - key = tuple(arr_line_lower[0:num_words]) - if len(key) > 0 and key in keyword_dict: - return key - return None - - -def max_tuple_abs_size(some_tuple): - max_size = 0 - for item in some_tuple: - item_abs = abs(item) - if item_abs > max_size: - max_size = item_abs - return max_size - - -class DatumUtil(object): - @staticmethod - def is_int(str): - try: - int(str) - return True - except TypeError: - return False - except ValueError: - return False - - @staticmethod - def is_float(str): - try: - float(str) - return True - except TypeError: - return False - except ValueError: - return False - - @staticmethod - def is_basic_type(obj): - if isinstance(obj, str) or isinstance(obj, int) or \ - isinstance(obj, float): - return True - return False - - -class PyListUtil(object): - """ - Class contains miscellaneous methods to work with and compare python lists - - Parameters - ---------- - path : string - file path to read/write to - max_error : float - maximum acceptable error when doing a compare of floating point numbers - - Methods - ------- - is_empty_list : (current_list : list) : boolean - determines if an n-dimensional list is empty - con_convert : (data : string, data_type : type that has conversion - operation) : boolean - returns true if data can be converted into data_type - max_multi_dim_list_size : (current_list : list) : boolean - determines the max number of items in a multi-dimensional list - 'current_list' - first_item : (current_list : list) : variable - returns the first item in the list 'current_list' - next_item : (current_list : list) : variable - returns the next item in the list 'current_list' - array_comp : (first_array : list, second_array : list) : boolean - compares two lists, returns true if they are identical (with max_error) - spilt_data_line : (line : string) : list - splits a string apart (using split) and then cleans up the results - dealing with various MODFLOW input file releated delimiters. returns - the delimiter type used. - clean_numeric : (text : string) : string - returns a cleaned up version of 'text' with only numeric characters - save_array_diff : (first_array : list, second_array : list, - first_array_name : string, second_array_name : string) - saves lists 'first_array' and 'second_array' to files first_array_name - and second_array_name and then saves the difference of the two - arrays to 'debug_array_diff.txt' - save_array(filename : string, multi_array : list) - saves 'multi_array' to the file 'filename' - """ - numeric_chars = {'0': 0, '1': 0, '2': 0, '3': 0, '4': 0, '5': 0, - '6': 0, '7': 0, '8': 0, '9': 0, '.': 0, '-': 0} - quote_list = {"'", '"'} - delimiter_list = {',': 1} - delimiter_used = None - line_num = 0 - consistent_delim = False - - def __init__(self, path=None, max_error=0.01): - self.max_error = max_error - if path: - self.path = path - else: - self.path = os.getcwd() - - @staticmethod - def has_one_item(current_list): - if not isinstance(current_list, list) and not isinstance(current_list, - np.ndarray): - return True - if len(current_list) != 1: - return False - if (isinstance(current_list[0], list) or - isinstance(current_list, np.ndarray)) and \ - len(current_list[0] != 0): - return False - return True - - @staticmethod - def is_empty_list(current_list): - if not isinstance(current_list, list): - return not current_list - - for item in current_list: - if isinstance(item, list): - # still in a list of lists, recurse - if not PyListUtil.is_empty_list(item): - return False - else: - return False - - return True - - @staticmethod - def max_multi_dim_list_size(current_list): - max_length = -1 - for item in current_list: - if len(item) > max_length: - max_length = len(item) - return max_length - - @staticmethod - def first_item(current_list): - if not isinstance(current_list, list) and not isinstance\ - (current_list, np.ndarray): - return current_list - - for item in current_list: - if isinstance(item, list) or isinstance(item, np.ndarray): - # still in a list of lists, recurse - return PyListUtil.first_item(item) - else: - return item - - @staticmethod - def next_item(current_list, new_list=True, nesting_change=0, - end_of_list=True): - # returns the next item in a nested list along with other information: - # (, , , - # - if not isinstance(current_list, list) and \ - not isinstance(current_list, np.ndarray): - yield (current_list, end_of_list, new_list, nesting_change) - else: - list_size = 1 - for item in current_list: - if isinstance(item, list) or isinstance(current_list, - np.ndarray): - # still in a list of lists, recurse - for item in PyListUtil.next_item(item, list_size == 1, - nesting_change + 1, - list_size == - len(current_list)): - yield item - nesting_change = -(nesting_change + 1) - else: - yield (item, list_size == len(current_list), - list_size == 1, nesting_change) - nesting_change = 0 - list_size += 1 - - @staticmethod - def next_list(current_list): - if not isinstance(current_list[0], list) and not \ - isinstance(current_list[0], np.ndarray): - yield current_list - else: - for lst in current_list: - if isinstance(lst[0], list) or isinstance(lst[0], np.ndarray): - for lst in PyListUtil.next_list(lst): - yield lst - else: - yield lst - - def array_comp(self, first_array, second_array): - diff = first_array - second_array - max = np.max(np.abs(diff)) - if max > self.max_error: - return False - return True - - @staticmethod - def reset_delimiter_used(): - PyListUtil.delimiter_used = None - PyListUtil.line_num = 0 - PyListUtil.consistent_delim = True - - @staticmethod - def split_data_line(line, external_file=False, delimiter_conf_length=15): - if PyListUtil.line_num > delimiter_conf_length and \ - PyListUtil.consistent_delim: - # consistent delimiter has been found. continue using that - # delimiter without doing further checks - if PyListUtil.delimiter_used is None: - comment_split = line.strip().split('#', 1) - clean_line = comment_split[0].strip().split() - else: - comment_split = line.strip().split('#', 1) - clean_line = comment_split[0].strip().split( - PyListUtil.delimiter_used) - if len(comment_split) > 1: - clean_line.append('#') - clean_line.append(comment_split[1]) - else: - # compare against the default split option without comments split - comment_split = line.strip().split('#', 1) - clean_line = comment_split[0].strip().split() - if len(comment_split) > 1: - clean_line.append('#') - clean_line.append(comment_split[1]) - # try different delimiters and use the one the breaks the data - # apart the most - max_split_size = len(clean_line) - max_split_type = None - for delimiter in PyListUtil.delimiter_list: - comment_split = line.strip().split('#') - alt_split = comment_split[0].strip().split(delimiter) - if len(comment_split) > 1: - alt_split.append('#') - alt_split.append(comment_split[1]) - alt_split_len = len(alt_split) - if alt_split_len > max_split_size: - max_split_size = len(alt_split) - max_split_type = delimiter - elif alt_split_len == max_split_size: - if max_split_type not in PyListUtil.delimiter_list or \ - PyListUtil.delimiter_list[delimiter] < \ - PyListUtil.delimiter_list[max_split_type]: - max_split_size = len(alt_split) - max_split_type = delimiter - - if max_split_type is not None: - clean_line = line.strip().split(max_split_type) - if PyListUtil.line_num == 0: - PyListUtil.delimiter_used = max_split_type - elif PyListUtil.delimiter_used != max_split_type: - PyListUtil.consistent_delim = False - PyListUtil.line_num += 1 - - arr_fixed_line = [] - index = 0 - # loop through line to fix quotes and delimiters - while index < len(clean_line): - item = clean_line[index] - if item and item not in PyListUtil.delimiter_list: - if item and item[0] in PyListUtil.quote_list: - # starts with a quote, handle quoted text - if item[-1] in PyListUtil.quote_list: - arr_fixed_line.append(item[1:-1]) - else: - arr_fixed_line.append(item[1:]) - # loop until trailing quote found - while index < len(clean_line): - index += 1 - if index < len(clean_line): - item = clean_line[index] - if item[-1] in PyListUtil.quote_list: - arr_fixed_line[-1] = \ - '{} {}'.format(arr_fixed_line[-1], - item[:-1]) - break - else: - arr_fixed_line[-1] = \ - '{} {}'.format(arr_fixed_line[-1], - item) - else: - # no quote, just append - arr_fixed_line.append(item) - index += 1 - - return arr_fixed_line - - @staticmethod - def clean_numeric(text): - if isinstance(text, str): - # remove all non-numeric text from leading and trailing positions - # of text - if text: - while text and ( - text[0] not in PyListUtil.numeric_chars or text[-1] - not in PyListUtil.numeric_chars): - if text[0] not in PyListUtil.numeric_chars: - text = text[1:] - if text and text[-1] not in PyListUtil.numeric_chars: - text = text[:-1] - return text - - def save_array_diff(self, first_array, second_array, first_array_name, - second_array_name): - try: - diff = first_array - second_array - self.save_array(first_array_name, first_array) - self.save_array(second_array_name, second_array) - self.save_array('debug_array_diff.txt', diff) - except: - print("An error occurred while outputting array differences.") - return False - return True - - # Saves an array with up to three dimensions - def save_array(self, filename, multi_array): - file_path = os.path.join(self.path, filename) - with open(file_path, 'w') as outfile: - outfile.write('{}\n'.format(str(multi_array.shape))) - if len(multi_array.shape) == 4: - for slice in multi_array: - for second_slice in slice: - for third_slice in second_slice: - for item in third_slice: - outfile.write(' {:10.3e}'.format(item)) - outfile.write('\n') - outfile.write('\n') - outfile.write('\n') - elif len(multi_array.shape) == 3: - for slice in multi_array: - np.savetxt(outfile, slice, fmt='%10.3e') - outfile.write('\n') - else: - np.savetxt(outfile, multi_array, fmt='%10.3e') - - -class MultiList(): - """ - Class for storing objects in an n-dimensional list which can be iterated - through as a single list. - - Parameters - ---------- - mdlist : list - multi-dimensional list to initialize the multi-list. either mdlist - or both shape and callback must be specified - shape : tuple - shape of the multi-list - callback : method - callback method that takes a location in the multi-list (tuple) and - returns an object to be stored at that location in the multi-list - - Methods - ------- - increment_dimension : (dimension, callback) - increments the size of one of the two dimensions of the multi-list - build_list : (callback) - builds a multi-list of shape self.list_shape, constructing objects - for the list using the supplied callback method - first_item : () : object - gets the first entry in the multi-list - get_total_size : () : int - returns the total number of entries in the multi-list - in_shape : (indexes) : boolean - returns whether a tuple of indexes are valid indexes for the shape of - the multi-list - inc_shape_idx : (indexes) : tuple - given a tuple of indexes pointing to an entry in the multi-list, - returns a tuple of indexes pointing to the next entry in the multi-list - first_index : () : tuple - returns a tuple of indexes pointing to the first entry in the - multi-list - indexes : (start_indexes=None, end_indexes=None) : iter(tuple) - returns an iterator that iterates from the location in the - multi-list defined by start_indexes to the location in the - multi-list defined by end_indexes - elements : () : iter(object) - returns an iterator that iterates over each object stored in the - multi-list - """ - - def __init__(self, mdlist=None, shape=None, callback=None): - if mdlist is not None: - self.multi_dim_list = mdlist - self.list_shape = MultiList._calc_shape(mdlist) - elif shape is not None: - self.list_shape = shape - self.multi_dim_list = [] - if callback is not None: - self.build_list(callback) - else: - raise Exception('MultiList requires either a mdlist or a shape ' - 'at initialization.') - - def __getitem__(self, k): - if isinstance(k, list) or isinstance(k, tuple): - item_ptr = self.multi_dim_list - for index in k: - item_ptr = item_ptr[index] - return item_ptr - else: - return self.multi_dim_list[k] - - @staticmethod - def _calc_shape(current_list): - shape = [] - if isinstance(current_list, list): - shape.append(len(current_list)) - sub_list = current_list[0] - if isinstance(sub_list, list): - shape += MultiList._calc_shape(sub_list) - elif isinstance(current_list, np.ndarray): - shape.append(current_list.shape[0]) - else: - return 1 - return tuple(shape) - - def increment_dimension(self, dimension, callback): - # ONLY SUPPORTS 1 OR 2 DIMENSIONAL MULTI-LISTS - # TODO: REWRITE TO SUPPORT N-DIMENSIONAL MULTI-LISTS - if len(self.list_shape) > 2: - raise Exception('Increment_dimension currently only supports 1 ' - 'or 2 dimensional multi-lists') - if len(self.list_shape) == 1: - self.multi_dim_list.append(callback(len(self.list_shape))) - self.list_shape = (self.list_shape[0] + 1,) - else: - if dimension == 1: - new_row_idx = len(self.multi_dim_list) - self.multi_dim_list.append([]) - for index in range(0, self.list_shape[1]): - self.multi_dim_list[-1].append(callback((new_row_idx, - index))) - self.list_shape = (self.list_shape[0] + 1, self.list_shape[1]) - elif dimension == 2: - new_col_idx = len(self.multi_dim_list[0]) - for index in range(0, self.list_shape[0]): - self.multi_dim_list[index].append(callback((index, - new_col_idx))) - self.list_shape = (self.list_shape[0], self.list_shape[1] + 1) - else: - raise Exception('For two dimensional lists "dimension" must ' - 'be 1 or 2.') - - def build_list(self, callback): - entry_points = [(self.multi_dim_list, self.first_index())] - shape_len = len(self.list_shape) - # loop through each dimension - for index, shape_size in enumerate(self.list_shape): - new_entry_points = [] - # loop through locations to add to the list - for entry_point in entry_points: - # loop through the size of current dimension - for val in range(0, shape_size): - if index < (shape_len - 1): - # this is a multi-dimensional multi-list, build out - # first dimension - entry_point[0].append([]) - if entry_point[1] is None: - new_location = (len(entry_point) - 1,) - else: - new_location = ((len(entry_point[0]) - 1), val) - new_entry_points.append((entry_point[0][-1], - new_location)) - else: - entry_point[0].append(callback(entry_point[1])) - entry_points = new_entry_points - - def first_item(self): - return PyListUtil.first_item(self.multi_dim_list) - - def get_total_size(self): - shape_size = 1 - for item in self.list_shape: - if item is None: - return 0 - else: - shape_size *= item - return shape_size - - def in_shape(self, indexes): - for index, item in zip(indexes, self.list_shape): - if index > item: - return False - return True - - def inc_shape_idx(self, indexes): - new_indexes = [] - incremented = False - for index, item in zip(indexes, self.list_shape): - if index == item: - new_indexes.append(0) - elif incremented: - new_indexes.append(index) - else: - incremented = True - new_indexes.append(index + 1) - if not incremented: - new_indexes[-1] += 1 - return tuple(new_indexes) - - def first_index(self): - first_index = [] - for index in self.list_shape: - first_index.append(0) - return tuple(first_index) - - def nth_index(self, n): - index = None - aii = ArrayIndexIter(self.list_shape, True) - index_num = 0 - while index_num <= n: - index = aii.next() - index_num += 1 - return index - - def indexes(self, start_indexes=None, end_indexes=None): - aii = ArrayIndexIter(self.list_shape, True) - if start_indexes is not None: - aii.current_location = list(start_indexes) - aii.current_index = len(aii.current_location) - 1 - if end_indexes is not None: - aii.end_location = list(end_indexes) - return aii - - def elements(self): - return MultiListIter(self.multi_dim_list, False) - - -class ArrayIndexIter(object): - def __init__(self, array_shape, index_as_tuple=False): - self.array_shape = array_shape - self.current_location = [] - self.end_location = [] - self.first_item = True - self.index_as_tuple = index_as_tuple - for item in array_shape: - self.current_location.append(0) - self.end_location.append(item) - self.current_index = len(self.current_location) - 1 - - def __iter__(self): - return self - - def __next__(self): - if self.first_item: - self.first_item = False - if self.current_location[self.current_index] < \ - self.end_location[self.current_index]: - if len(self.current_location) > 1 or self.index_as_tuple: - return tuple(self.current_location) - else: - return self.current_location[0] - while self.current_index >= 0: - location = self.current_location[self.current_index] - if location < self.end_location[self.current_index] - 1: - self.current_location[self.current_index] += 1 - self.current_index = len(self.current_location) - 1 - if len(self.current_location) > 1 or self.index_as_tuple: - return tuple(self.current_location) - else: - return self.current_location[0] - else: - self.current_location[self.current_index] = 0 - self.current_index -= 1 - raise StopIteration() - - next = __next__ # Python 2 support - - -class MultiListIter(object): - def __init__(self, multi_list, detailed_info=False, iter_leaf_lists=False): - self.multi_list = multi_list - self.detailed_info = detailed_info - if iter_leaf_lists: - self.val_iter = PyListUtil.next_list(self.multi_list) - else: - self.val_iter = PyListUtil.next_item(self.multi_list) - - def __iter__(self): - return self - - def __next__(self): - next_val = next(self.val_iter) - if self.detailed_info: - return next_val - else: - return next_val[0] - - next = __next__ # Python 2 support - - -class ConstIter(object): - def __init__(self, value): - self.value = value - - def __iter__(self): - return self - - def __next__(self): - return self.value - - next = __next__ # Python 2 support - - -class FileIter(object): - def __init__(self, file_path): - self.eof = False - try: - self._fd = open(file_path, 'r') - except: - self.eof = True - self._current_data = None - self._data_index = 0 - self._next_line() - - def __iter__(self): - return self - - def __next__(self): - if self.eof: - raise StopIteration() - else: - while self._current_data is not None and \ - self._data_index >= len(self._current_data): - self._next_line() - self._data_index = 0 - if self.eof: - raise StopIteration() - self._data_index += 1 - return self._current_data[self._data_index - 1] - - def close(self): - self._fd.close() - - def _next_line(self): - if self.eof: - return - data_line = self._fd.readline() - if data_line is None: - self.eof = True - return - self._current_data = PyListUtil.split_data_line(data_line) - - next = __next__ # Python 2 support - - -class NameIter(object): - def __init__(self, name, first_not_numbered=True): - self.name = name - self.iter_num = -1 - self.first_not_numbered = first_not_numbered - - def __iter__(self): - return self - - def __next__(self): - self.iter_num += 1 - if self.iter_num == 0 and self.first_not_numbered: - return self.name - else: - return '{}_{}'.format(self.name, self.iter_num) - - next = __next__ # Python 2 support - - -class PathIter(object): - def __init__(self, path, first_not_numbered=True): - self.path = path - self.name_iter = NameIter(path[-1], first_not_numbered) - - def __iter__(self): - return self - - def __next__(self): - return self.path[0:-1] + (self.name_iter.__next__(),) - - next = __next__ # Python 2 support +import os +import numpy as np + + +def clean_name(name): + # remove bad characters + clean_string = name.replace(' ', '_') + clean_string = clean_string.replace('-', '_') + # remove anything after a parenthesis + index = clean_string.find('(') + if index != -1: + clean_string = clean_string[0:index] + return clean_string + + +def find_keyword(arr_line, keyword_dict): + # convert to lower case + arr_line_lower = [] + for word in arr_line: + # integers and floats are not keywords + if not DatumUtil.is_int(word) and not DatumUtil.is_float(word): + arr_line_lower.append(word.lower()) + # look for constants in order of most words to least words + key = '' + for num_words in range(len(arr_line_lower), -1, -1): + key = tuple(arr_line_lower[0:num_words]) + if len(key) > 0 and key in keyword_dict: + return key + return None + + +def max_tuple_abs_size(some_tuple): + max_size = 0 + for item in some_tuple: + item_abs = abs(item) + if item_abs > max_size: + max_size = item_abs + return max_size + + +class DatumUtil(object): + @staticmethod + def is_int(str): + try: + int(str) + return True + except TypeError: + return False + except ValueError: + return False + + @staticmethod + def is_float(str): + try: + float(str) + return True + except TypeError: + return False + except ValueError: + return False + + @staticmethod + def is_basic_type(obj): + if isinstance(obj, str) or isinstance(obj, int) or \ + isinstance(obj, float): + return True + return False + + +class PyListUtil(object): + """ + Class contains miscellaneous methods to work with and compare python lists + + Parameters + ---------- + path : string + file path to read/write to + max_error : float + maximum acceptable error when doing a compare of floating point numbers + + Methods + ------- + is_empty_list : (current_list : list) : boolean + determines if an n-dimensional list is empty + con_convert : (data : string, data_type : type that has conversion + operation) : boolean + returns true if data can be converted into data_type + max_multi_dim_list_size : (current_list : list) : boolean + determines the max number of items in a multi-dimensional list + 'current_list' + first_item : (current_list : list) : variable + returns the first item in the list 'current_list' + next_item : (current_list : list) : variable + returns the next item in the list 'current_list' + array_comp : (first_array : list, second_array : list) : boolean + compares two lists, returns true if they are identical (with max_error) + spilt_data_line : (line : string) : list + splits a string apart (using split) and then cleans up the results + dealing with various MODFLOW input file releated delimiters. returns + the delimiter type used. + clean_numeric : (text : string) : string + returns a cleaned up version of 'text' with only numeric characters + save_array_diff : (first_array : list, second_array : list, + first_array_name : string, second_array_name : string) + saves lists 'first_array' and 'second_array' to files first_array_name + and second_array_name and then saves the difference of the two + arrays to 'debug_array_diff.txt' + save_array(filename : string, multi_array : list) + saves 'multi_array' to the file 'filename' + """ + numeric_chars = {'0': 0, '1': 0, '2': 0, '3': 0, '4': 0, '5': 0, + '6': 0, '7': 0, '8': 0, '9': 0, '.': 0, '-': 0} + quote_list = {"'", '"'} + delimiter_list = {',': 1} + delimiter_used = None + line_num = 0 + consistent_delim = False + + def __init__(self, path=None, max_error=0.01): + self.max_error = max_error + if path: + self.path = path + else: + self.path = os.getcwd() + + @staticmethod + def has_one_item(current_list): + if not isinstance(current_list, list) and not isinstance(current_list, + np.ndarray): + return True + if len(current_list) != 1: + return False + if (isinstance(current_list[0], list) or + isinstance(current_list, np.ndarray)) and \ + len(current_list[0] != 0): + return False + return True + + @staticmethod + def is_empty_list(current_list): + if not isinstance(current_list, list): + return not current_list + + for item in current_list: + if isinstance(item, list): + # still in a list of lists, recurse + if not PyListUtil.is_empty_list(item): + return False + else: + return False + + return True + + @staticmethod + def max_multi_dim_list_size(current_list): + max_length = -1 + for item in current_list: + if len(item) > max_length: + max_length = len(item) + return max_length + + @staticmethod + def first_item(current_list): + if not isinstance(current_list, list) and not isinstance\ + (current_list, np.ndarray): + return current_list + + for item in current_list: + if isinstance(item, list) or isinstance(item, np.ndarray): + # still in a list of lists, recurse + return PyListUtil.first_item(item) + else: + return item + + @staticmethod + def next_item(current_list, new_list=True, nesting_change=0, + end_of_list=True): + # returns the next item in a nested list along with other information: + # (, , , + # + if not isinstance(current_list, list) and \ + not isinstance(current_list, np.ndarray): + yield (current_list, end_of_list, new_list, nesting_change) + else: + list_size = 1 + for item in current_list: + if isinstance(item, list) or isinstance(current_list, + np.ndarray): + # still in a list of lists, recurse + for item in PyListUtil.next_item(item, list_size == 1, + nesting_change + 1, + list_size == + len(current_list)): + yield item + nesting_change = -(nesting_change + 1) + else: + yield (item, list_size == len(current_list), + list_size == 1, nesting_change) + nesting_change = 0 + list_size += 1 + + @staticmethod + def next_list(current_list): + if not isinstance(current_list[0], list) and not \ + isinstance(current_list[0], np.ndarray): + yield current_list + else: + for lst in current_list: + if isinstance(lst[0], list) or isinstance(lst[0], np.ndarray): + for lst in PyListUtil.next_list(lst): + yield lst + else: + yield lst + + def array_comp(self, first_array, second_array): + diff = first_array - second_array + max = np.max(np.abs(diff)) + if max > self.max_error: + return False + return True + + @staticmethod + def reset_delimiter_used(): + PyListUtil.delimiter_used = None + PyListUtil.line_num = 0 + PyListUtil.consistent_delim = True + + @staticmethod + def split_data_line(line, external_file=False, delimiter_conf_length=15): + if PyListUtil.line_num > delimiter_conf_length and \ + PyListUtil.consistent_delim: + # consistent delimiter has been found. continue using that + # delimiter without doing further checks + if PyListUtil.delimiter_used is None: + comment_split = line.strip().split('#', 1) + clean_line = comment_split[0].strip().split() + else: + comment_split = line.strip().split('#', 1) + clean_line = comment_split[0].strip().split( + PyListUtil.delimiter_used) + if len(comment_split) > 1: + clean_line.append('#') + clean_line.append(comment_split[1]) + else: + # compare against the default split option without comments split + comment_split = line.strip().split('#', 1) + clean_line = comment_split[0].strip().split() + if len(comment_split) > 1: + clean_line.append('#') + clean_line.append(comment_split[1]) + # try different delimiters and use the one the breaks the data + # apart the most + max_split_size = len(clean_line) + max_split_type = None + for delimiter in PyListUtil.delimiter_list: + comment_split = line.strip().split('#') + alt_split = comment_split[0].strip().split(delimiter) + if len(comment_split) > 1: + alt_split.append('#') + alt_split.append(comment_split[1]) + alt_split_len = len(alt_split) + if alt_split_len > max_split_size: + max_split_size = len(alt_split) + max_split_type = delimiter + elif alt_split_len == max_split_size: + if max_split_type not in PyListUtil.delimiter_list or \ + PyListUtil.delimiter_list[delimiter] < \ + PyListUtil.delimiter_list[max_split_type]: + max_split_size = len(alt_split) + max_split_type = delimiter + + if max_split_type is not None: + clean_line = line.strip().split(max_split_type) + if PyListUtil.line_num == 0: + PyListUtil.delimiter_used = max_split_type + elif PyListUtil.delimiter_used != max_split_type: + PyListUtil.consistent_delim = False + PyListUtil.line_num += 1 + + arr_fixed_line = [] + index = 0 + # loop through line to fix quotes and delimiters + while index < len(clean_line): + item = clean_line[index] + if item and item not in PyListUtil.delimiter_list: + if item and item[0] in PyListUtil.quote_list: + # starts with a quote, handle quoted text + if item[-1] in PyListUtil.quote_list: + arr_fixed_line.append(item[1:-1]) + else: + arr_fixed_line.append(item[1:]) + # loop until trailing quote found + while index < len(clean_line): + index += 1 + if index < len(clean_line): + item = clean_line[index] + if item[-1] in PyListUtil.quote_list: + arr_fixed_line[-1] = \ + '{} {}'.format(arr_fixed_line[-1], + item[:-1]) + break + else: + arr_fixed_line[-1] = \ + '{} {}'.format(arr_fixed_line[-1], + item) + else: + # no quote, just append + arr_fixed_line.append(item) + index += 1 + + return arr_fixed_line + + @staticmethod + def clean_numeric(text): + if isinstance(text, str): + # remove all non-numeric text from leading and trailing positions + # of text + if text: + while text and ( + text[0] not in PyListUtil.numeric_chars or text[-1] + not in PyListUtil.numeric_chars): + if text[0] not in PyListUtil.numeric_chars: + text = text[1:] + if text and text[-1] not in PyListUtil.numeric_chars: + text = text[:-1] + return text + + def save_array_diff(self, first_array, second_array, first_array_name, + second_array_name): + try: + diff = first_array - second_array + self.save_array(first_array_name, first_array) + self.save_array(second_array_name, second_array) + self.save_array('debug_array_diff.txt', diff) + except: + print("An error occurred while outputting array differences.") + return False + return True + + # Saves an array with up to three dimensions + def save_array(self, filename, multi_array): + file_path = os.path.join(self.path, filename) + with open(file_path, 'w') as outfile: + outfile.write('{}\n'.format(str(multi_array.shape))) + if len(multi_array.shape) == 4: + for slice in multi_array: + for second_slice in slice: + for third_slice in second_slice: + for item in third_slice: + outfile.write(' {:10.3e}'.format(item)) + outfile.write('\n') + outfile.write('\n') + outfile.write('\n') + elif len(multi_array.shape) == 3: + for slice in multi_array: + np.savetxt(outfile, slice, fmt='%10.3e') + outfile.write('\n') + else: + np.savetxt(outfile, multi_array, fmt='%10.3e') + + +class MultiList(): + """ + Class for storing objects in an n-dimensional list which can be iterated + through as a single list. + + Parameters + ---------- + mdlist : list + multi-dimensional list to initialize the multi-list. either mdlist + or both shape and callback must be specified + shape : tuple + shape of the multi-list + callback : method + callback method that takes a location in the multi-list (tuple) and + returns an object to be stored at that location in the multi-list + + Methods + ------- + increment_dimension : (dimension, callback) + increments the size of one of the two dimensions of the multi-list + build_list : (callback) + builds a multi-list of shape self.list_shape, constructing objects + for the list using the supplied callback method + first_item : () : object + gets the first entry in the multi-list + get_total_size : () : int + returns the total number of entries in the multi-list + in_shape : (indexes) : boolean + returns whether a tuple of indexes are valid indexes for the shape of + the multi-list + inc_shape_idx : (indexes) : tuple + given a tuple of indexes pointing to an entry in the multi-list, + returns a tuple of indexes pointing to the next entry in the multi-list + first_index : () : tuple + returns a tuple of indexes pointing to the first entry in the + multi-list + indexes : (start_indexes=None, end_indexes=None) : iter(tuple) + returns an iterator that iterates from the location in the + multi-list defined by start_indexes to the location in the + multi-list defined by end_indexes + elements : () : iter(object) + returns an iterator that iterates over each object stored in the + multi-list + """ + + def __init__(self, mdlist=None, shape=None, callback=None): + if mdlist is not None: + self.multi_dim_list = mdlist + self.list_shape = MultiList._calc_shape(mdlist) + elif shape is not None: + self.list_shape = shape + self.multi_dim_list = [] + if callback is not None: + self.build_list(callback) + else: + raise Exception('MultiList requires either a mdlist or a shape ' + 'at initialization.') + + def __getitem__(self, k): + if isinstance(k, list) or isinstance(k, tuple): + item_ptr = self.multi_dim_list + for index in k: + item_ptr = item_ptr[index] + return item_ptr + else: + return self.multi_dim_list[k] + + @staticmethod + def _calc_shape(current_list): + shape = [] + if isinstance(current_list, list): + shape.append(len(current_list)) + sub_list = current_list[0] + if isinstance(sub_list, list): + shape += MultiList._calc_shape(sub_list) + elif isinstance(current_list, np.ndarray): + shape.append(current_list.shape[0]) + else: + return 1 + return tuple(shape) + + def increment_dimension(self, dimension, callback): + # ONLY SUPPORTS 1 OR 2 DIMENSIONAL MULTI-LISTS + # TODO: REWRITE TO SUPPORT N-DIMENSIONAL MULTI-LISTS + if len(self.list_shape) > 2: + raise Exception('Increment_dimension currently only supports 1 ' + 'or 2 dimensional multi-lists') + if len(self.list_shape) == 1: + self.multi_dim_list.append(callback(len(self.list_shape))) + self.list_shape = (self.list_shape[0] + 1,) + else: + if dimension == 1: + new_row_idx = len(self.multi_dim_list) + self.multi_dim_list.append([]) + for index in range(0, self.list_shape[1]): + self.multi_dim_list[-1].append(callback((new_row_idx, + index))) + self.list_shape = (self.list_shape[0] + 1, self.list_shape[1]) + elif dimension == 2: + new_col_idx = len(self.multi_dim_list[0]) + for index in range(0, self.list_shape[0]): + self.multi_dim_list[index].append(callback((index, + new_col_idx))) + self.list_shape = (self.list_shape[0], self.list_shape[1] + 1) + else: + raise Exception('For two dimensional lists "dimension" must ' + 'be 1 or 2.') + + def build_list(self, callback): + entry_points = [(self.multi_dim_list, self.first_index())] + shape_len = len(self.list_shape) + # loop through each dimension + for index, shape_size in enumerate(self.list_shape): + new_entry_points = [] + # loop through locations to add to the list + for entry_point in entry_points: + # loop through the size of current dimension + for val in range(0, shape_size): + if index < (shape_len - 1): + # this is a multi-dimensional multi-list, build out + # first dimension + entry_point[0].append([]) + if entry_point[1] is None: + new_location = (len(entry_point) - 1,) + else: + new_location = ((len(entry_point[0]) - 1), val) + new_entry_points.append((entry_point[0][-1], + new_location)) + else: + entry_point[0].append(callback(entry_point[1])) + entry_points = new_entry_points + + def first_item(self): + return PyListUtil.first_item(self.multi_dim_list) + + def get_total_size(self): + shape_size = 1 + for item in self.list_shape: + if item is None: + return 0 + else: + shape_size *= item + return shape_size + + def in_shape(self, indexes): + for index, item in zip(indexes, self.list_shape): + if index > item: + return False + return True + + def inc_shape_idx(self, indexes): + new_indexes = [] + incremented = False + for index, item in zip(indexes, self.list_shape): + if index == item: + new_indexes.append(0) + elif incremented: + new_indexes.append(index) + else: + incremented = True + new_indexes.append(index + 1) + if not incremented: + new_indexes[-1] += 1 + return tuple(new_indexes) + + def first_index(self): + first_index = [] + for index in self.list_shape: + first_index.append(0) + return tuple(first_index) + + def nth_index(self, n): + index = None + aii = ArrayIndexIter(self.list_shape, True) + index_num = 0 + while index_num <= n: + index = aii.next() + index_num += 1 + return index + + def indexes(self, start_indexes=None, end_indexes=None): + aii = ArrayIndexIter(self.list_shape, True) + if start_indexes is not None: + aii.current_location = list(start_indexes) + aii.current_index = len(aii.current_location) - 1 + if end_indexes is not None: + aii.end_location = list(end_indexes) + return aii + + def elements(self): + return MultiListIter(self.multi_dim_list, False) + + +class ArrayIndexIter(object): + def __init__(self, array_shape, index_as_tuple=False): + self.array_shape = array_shape + self.current_location = [] + self.end_location = [] + self.first_item = True + self.index_as_tuple = index_as_tuple + for item in array_shape: + self.current_location.append(0) + self.end_location.append(item) + self.current_index = len(self.current_location) - 1 + + def __iter__(self): + return self + + def __next__(self): + if self.first_item: + self.first_item = False + if self.current_location[self.current_index] < \ + self.end_location[self.current_index]: + if len(self.current_location) > 1 or self.index_as_tuple: + return tuple(self.current_location) + else: + return self.current_location[0] + while self.current_index >= 0: + location = self.current_location[self.current_index] + if location < self.end_location[self.current_index] - 1: + self.current_location[self.current_index] += 1 + self.current_index = len(self.current_location) - 1 + if len(self.current_location) > 1 or self.index_as_tuple: + return tuple(self.current_location) + else: + return self.current_location[0] + else: + self.current_location[self.current_index] = 0 + self.current_index -= 1 + raise StopIteration() + + next = __next__ # Python 2 support + + +class MultiListIter(object): + def __init__(self, multi_list, detailed_info=False, iter_leaf_lists=False): + self.multi_list = multi_list + self.detailed_info = detailed_info + if iter_leaf_lists: + self.val_iter = PyListUtil.next_list(self.multi_list) + else: + self.val_iter = PyListUtil.next_item(self.multi_list) + + def __iter__(self): + return self + + def __next__(self): + next_val = next(self.val_iter) + if self.detailed_info: + return next_val + else: + return next_val[0] + + next = __next__ # Python 2 support + + +class ConstIter(object): + def __init__(self, value): + self.value = value + + def __iter__(self): + return self + + def __next__(self): + return self.value + + next = __next__ # Python 2 support + + +class FileIter(object): + def __init__(self, file_path): + self.eof = False + try: + self._fd = open(file_path, 'r') + except: + self.eof = True + self._current_data = None + self._data_index = 0 + self._next_line() + + def __iter__(self): + return self + + def __next__(self): + if self.eof: + raise StopIteration() + else: + while self._current_data is not None and \ + self._data_index >= len(self._current_data): + self._next_line() + self._data_index = 0 + if self.eof: + raise StopIteration() + self._data_index += 1 + return self._current_data[self._data_index - 1] + + def close(self): + self._fd.close() + + def _next_line(self): + if self.eof: + return + data_line = self._fd.readline() + if data_line is None: + self.eof = True + return + self._current_data = PyListUtil.split_data_line(data_line) + + next = __next__ # Python 2 support + + +class NameIter(object): + def __init__(self, name, first_not_numbered=True): + self.name = name + self.iter_num = -1 + self.first_not_numbered = first_not_numbered + + def __iter__(self): + return self + + def __next__(self): + self.iter_num += 1 + if self.iter_num == 0 and self.first_not_numbered: + return self.name + else: + return '{}_{}'.format(self.name, self.iter_num) + + next = __next__ # Python 2 support + + +class PathIter(object): + def __init__(self, path, first_not_numbered=True): + self.path = path + self.name_iter = NameIter(path[-1], first_not_numbered) + + def __iter__(self): + return self + + def __next__(self): + return self.path[0:-1] + (self.name_iter.__next__(),) + + next = __next__ # Python 2 support diff --git a/flopy/utils/flopy_io.py b/flopy/utils/flopy_io.py index ab227bd846..c7677916ae 100755 --- a/flopy/utils/flopy_io.py +++ b/flopy/utils/flopy_io.py @@ -1,486 +1,486 @@ -""" -Module for input/output utilities -""" -import os -import sys -import numpy as np - -try: - import pandas as pd -except: - pd = False - - -def _fmt_string(array, float_format='{}'): - """ - makes a formatting string for a rec-array; - given a desired float_format. - - Parameters - ---------- - array : np.recarray - float_format : str - formatter for floating point variable - - Returns - ------- - fmt_string : str - formatting string for writing output - """ - fmt_string = '' - for field in array.dtype.descr: - vtype = field[1][1].lower() - if vtype == 'i': - fmt_string += '{:.0f} ' - elif vtype == 'f': - fmt_string += '{} '.format(float_format) - elif vtype == 'o': - fmt_string += '{} ' - elif vtype == 's': - raise Exception("MfList error: 'str' type found in dtype." + \ - " This gives unpredictable results when " + \ - "recarray to file - change to 'object' type") - else: - raise Exception("MfList.fmt_string error: unknown vtype " + \ - "in dtype:" + vtype) - return fmt_string - - -def line_strip(line): - """ - Remove comments and replace commas from input text - for a free formatted modflow input file - - Parameters - ---------- - line : str - a line of text from a modflow input file - - Returns - ------- - str : line with comments removed and commas replaced - """ - for comment_flag in [';', '#', '!!']: - line = line.split(comment_flag)[0] - line = line.strip() - return line.replace(',', ' ') - - -def get_next_line(f): - """ - Get the next line from a file that is not a blank line - - Parameters - ---------- - f : filehandle - filehandle to a open file - - Returns - ------- - line : string - next non-empty line in a open file - - - """ - while True: - line = f.readline().rstrip() - if len(line) > 0: - break - return line - - -def line_parse(line): - """ - Convert a line of text into to a list of values. This handles the - case where a free formatted MODFLOW input file may have commas in - it. - """ - line = line_strip(line) - return line.split() - - -def pop_item(line, dtype=str): - if len(line) > 0: - if dtype == str: - return line.pop(0) - elif dtype == float: - return float(line.pop(0)) - elif dtype == int: - # handle strings like this: - # '-10.' - return int(float(line.pop(0))) - return dtype(0) - - -def write_fixed_var(v, length=10, ipos=None, free=False, comment=None): - """ - - Parameters - ---------- - v : list, int, float, bool, or numpy array - list, int, float, bool, or numpy array containing the data to be - written to a string. - length : int - length of each column for fixed column widths. (default is 10) - ipos : list, int, or numpy array - user-provided column widths. (default is None) - free : bool - boolean indicating if a free format string should be generated. - length and ipos are not used if free is True. (default is False) - comment : str - comment string to add to the end of the string - - Returns - ------- - out : str - fixed or free format string generated using user-provided data - - """ - if isinstance(v, np.ndarray): - v = v.tolist() - elif isinstance(v, int) or isinstance(v, float) or isinstance(v, bool): - v = [v] - ncol = len(v) - # construct ipos if it was not passed - if ipos is None: - ipos = [] - for i in range(ncol): - ipos.append(length) - else: - if isinstance(ipos, np.ndarray): - ipos = ipos.flatten().tolist() - elif isinstance(ipos, int): - ipos = [ipos] - if len(ipos) < ncol: - err = 'user provided ipos length ({})'.format(len(ipos)) + \ - 'should be greater than or equal ' + \ - 'to the length of v ({})'.format(ncol) - raise Exception(err) - out = '' - for n in range(ncol): - if free: - write_fmt = '{} ' - else: - if isinstance(v[n], (float, np.float, np.float32, np.float64)): - width = ipos[n] - 6 - vmin, vmax = 10**-width, 10**width - if abs(v[n]) < vmin or abs(v[n]) > vmax: - ctype = 'g' - else: - ctype = '.{}f'.format(width) - elif isinstance(v[n], (int, np.int, np.int32, np.int64)): - ctype = 'd' - else: - ctype = '' - write_fmt = '{{:>{}{}}}'.format(ipos[n],ctype) - out += write_fmt.format(v[n]) - if comment is not None: - out += ' # {}'.format(comment) - out += '\n' - return out - - -def read_fixed_var(line, ncol=1, length=10, ipos=None, free=False): - """ - Parse a fixed format line using user provided data - - Parameters - ---------- - line : str - text string to parse. - ncol : int - number of columns to parse from line. (default is 1) - length : int - length of each column for fixed column widths. (default is 10) - ipos : list, int, or numpy array - user-provided column widths. (default is None) - free : bool - boolean indicating if sting is free format. ncol, length, and - ipos are not used if free is True. (default is False) - - Returns - ------- - out : list - padded list containing data parsed from the passed text string - - """ - if free: - out = line.rstrip().split() - else: - # construct ipos if it was not passed - if ipos is None: - ipos = [] - for i in range(ncol): - ipos.append(length) - else: - if isinstance(ipos, np.ndarray): - ipos = ipos.flatten().tolist() - elif isinstance(ipos, int): - ipos = [ipos] - ncol = len(ipos) - line = line.rstrip() - out = [] - istart = 0 - for ivar in range(ncol): - istop = istart + ipos[ivar] - try: - txt = line[istart:istop] - if len(txt.strip()) > 0: - out.append(txt) - else: - out.append(0) - except: - break - istart = istop - return out - - -def flux_to_wel(cbc_file, text, precision="single", model=None, verbose=False): - """ - Convert flux in a binary cell budget file to a wel instance - - Parameters - ---------- - cbc_file : (str) cell budget file name - text : (str) text string of the desired flux type (e.g. "drains") - precision : (optional str) precision of the cell budget file - model : (optional) BaseModel instance. If passed, a new ModflowWel - instance will be added to model - verbose : bool flag passed to CellBudgetFile - - Returns - ------- - flopy.modflow.ModflowWel instance - - """ - from . import CellBudgetFile as CBF - from .util_list import MfList - from ..modflow import Modflow, ModflowWel - cbf = CBF(cbc_file, precision=precision, verbose=verbose) - - # create a empty numpy array of shape (time,layer,row,col) - m4d = np.zeros((cbf.nper, cbf.nlay, cbf.nrow, cbf.ncol), dtype=np.float32) - m4d[:] = np.NaN - - # process the records in the cell budget file - iper = -1 - for kstpkper in cbf.kstpkper: - - kstpkper = (kstpkper[0] - 1, kstpkper[1] - 1) - kper = kstpkper[1] - # if we haven't visited this kper yet - if kper != iper: - arr = cbf.get_data(kstpkper=kstpkper, text=text, full3D=True) - if len(arr) > 0: - arr = arr[0] - print(arr.max(), arr.min(), arr.sum()) - # masked where zero - arr[np.where(arr == 0.0)] = np.NaN - m4d[iper + 1] = arr - iper += 1 - - # model wasn't passed, then create a generic model - if model is None: - model = Modflow("test") - # if model doesn't have a wel package, then make a generic one... - # need this for the from_m4d method - if model.wel is None: - ModflowWel(model) - - # get the stress_period_data dict {kper:np recarray} - sp_data = MfList.from_4d(model, "WEL", {"flux": m4d}) - - wel = ModflowWel(model, stress_period_data=sp_data) - return wel - - -def loadtxt(file, delimiter=' ', dtype=None, skiprows=0, use_pandas=True, - **kwargs): - """ - Use pandas if it is available to load a text file - (significantly faster than n.loadtxt or genfromtxt see - http://stackoverflow.com/questions/18259393/numpy-loading-csv-too-slow-compared-to-matlab) - - Parameters - ---------- - file : file or str - File, filename, or generator to read. - delimiter : str, optional - The string used to separate values. By default, this is any whitespace. - dtype : data-type, optional - Data-type of the resulting array - skiprows : int, optional - Skip the first skiprows lines; default: 0. - use_pandas : bool - If true, the much faster pandas.read_csv method is used. - kwargs : dict - Keyword arguments passed to numpy.loadtxt or pandas.read_csv. - - Returns - ------- - ra : np.recarray - Numpy record array of file contents. - """ - # test if pandas should be used, if available - if use_pandas: - if pd: - if delimiter.isspace(): - kwargs['delim_whitespace'] = True - if isinstance(dtype, np.dtype) and 'names' not in kwargs: - kwargs['names'] = dtype.names - - # if use_pandas and pd then use pandas - if use_pandas and pd: - df = pd.read_csv(file, dtype=dtype, skiprows=skiprows, **kwargs) - return df.to_records(index=False) - # default use of numpy - else: - return np.loadtxt(file, dtype=dtype, skiprows=skiprows, **kwargs) - - -def get_url_text(url, error_msg=None): - """ - Get text from a url. - """ - from urllib.request import urlopen - try: - urlobj = urlopen(url) - text = urlobj.read().decode() - return text - except: - e = sys.exc_info() - print(e) - if error_msg is not None: - print(error_msg) - return - - -def ulstrd(f, nlist, ra, model, sfac_columns, ext_unit_dict): - """ - Read a list and allow for open/close, binary, external, sfac, etc. - - Parameters - ---------- - f : file handle - file handle for where the list is being read from - nlist : int - size of the list (number of rows) to read - ra : np.recarray - A record array of the correct size that will be filled with the list - model : model object - The model object (of type :class:`flopy.modflow.mf.Modflow`) to - which this package will be added. - sfac_columns : list - A list of strings containing the column names to scale by sfac - ext_unit_dict : dictionary, optional - If the list in the file is specified using EXTERNAL, - then in this case ext_unit_dict is required, which can be - constructed using the function - :class:`flopy.utils.mfreadnam.parsenamefile`. - - Returns - ------- - - """ - - # initialize variables - line = f.readline() - sfac = 1. - binary = False - ncol = len(ra.dtype.names) - line_list = line.strip().split() - close_the_file = False - file_handle = f - mode = 'r' - - # check for external - if line.strip().lower().startswith('external'): - inunit = int(line_list[1]) - errmsg = 'Could not find a file for unit {}'.format(inunit) - if ext_unit_dict is not None: - if inunit in ext_unit_dict: - namdata = ext_unit_dict[inunit] - file_handle = namdata.filehandle - else: - raise IOError(errmsg) - else: - raise IOError(errmsg) - if namdata.filetype == 'DATA(BINARY)': - binary = True - if not binary: - line = file_handle.readline() - - # or check for open/close - elif line.strip().lower().startswith('open/close'): - raw = line.strip().split() - fname = raw[1] - if '/' in fname: - raw = fname.split('/') - elif '\\' in fname: - raw = fname.split('\\') - else: - raw = [fname] - fname = os.path.join(*raw) - oc_filename = os.path.join(model.model_ws, fname) - msg = 'Package.load() error: open/close filename ' + \ - oc_filename + ' not found' - assert os.path.exists(oc_filename), msg - if '(binary)' in line.lower(): - binary = True - mode = 'rb' - file_handle = open(oc_filename, mode) - close_the_file = True - if not binary: - line = file_handle.readline() - - # check for scaling factor - if not binary: - line_list = line.strip().split() - if line.strip().lower().startswith('sfac'): - sfac = float(line_list[1]) - line = file_handle.readline() - - # fast binary read fromfile - if binary: - dtype2 = [] - for name in ra.dtype.names: - dtype2.append((name, np.float32)) - dtype2 = np.dtype(dtype2) - d = np.fromfile(file_handle, dtype=dtype2, count=nlist) - ra = np.array(d, dtype=ra.dtype) - ra = ra.view(np.recarray) - - # else, read ascii - else: - - for ii in range(nlist): - - # first line was already read - if ii != 0: - line = file_handle.readline() - - if model.free_format_input: - # whitespace separated - t = line.strip().split() - if len(t) < ncol: - t = t + (ncol - len(t)) * [0.0] - else: - t = t[:ncol] - t = tuple(t) - ra[ii] = t - else: - # fixed format - t = read_fixed_var(line, ncol=ncol) - t = tuple(t) - ra[ii] = t - - # scale the data and check - for column_name in sfac_columns: - ra[column_name] *= sfac - if 'auxsfac' in ra.dtype.names: - ra[column_name] *= ra['auxsfac'] - - if close_the_file: - file_handle.close() - - return ra +""" +Module for input/output utilities +""" +import os +import sys +import numpy as np + +try: + import pandas as pd +except: + pd = False + + +def _fmt_string(array, float_format='{}'): + """ + makes a formatting string for a rec-array; + given a desired float_format. + + Parameters + ---------- + array : np.recarray + float_format : str + formatter for floating point variable + + Returns + ------- + fmt_string : str + formatting string for writing output + """ + fmt_string = '' + for field in array.dtype.descr: + vtype = field[1][1].lower() + if vtype == 'i': + fmt_string += '{:.0f} ' + elif vtype == 'f': + fmt_string += '{} '.format(float_format) + elif vtype == 'o': + fmt_string += '{} ' + elif vtype == 's': + raise Exception("MfList error: 'str' type found in dtype." + \ + " This gives unpredictable results when " + \ + "recarray to file - change to 'object' type") + else: + raise Exception("MfList.fmt_string error: unknown vtype " + \ + "in dtype:" + vtype) + return fmt_string + + +def line_strip(line): + """ + Remove comments and replace commas from input text + for a free formatted modflow input file + + Parameters + ---------- + line : str + a line of text from a modflow input file + + Returns + ------- + str : line with comments removed and commas replaced + """ + for comment_flag in [';', '#', '!!']: + line = line.split(comment_flag)[0] + line = line.strip() + return line.replace(',', ' ') + + +def get_next_line(f): + """ + Get the next line from a file that is not a blank line + + Parameters + ---------- + f : filehandle + filehandle to a open file + + Returns + ------- + line : string + next non-empty line in a open file + + + """ + while True: + line = f.readline().rstrip() + if len(line) > 0: + break + return line + + +def line_parse(line): + """ + Convert a line of text into to a list of values. This handles the + case where a free formatted MODFLOW input file may have commas in + it. + """ + line = line_strip(line) + return line.split() + + +def pop_item(line, dtype=str): + if len(line) > 0: + if dtype == str: + return line.pop(0) + elif dtype == float: + return float(line.pop(0)) + elif dtype == int: + # handle strings like this: + # '-10.' + return int(float(line.pop(0))) + return dtype(0) + + +def write_fixed_var(v, length=10, ipos=None, free=False, comment=None): + """ + + Parameters + ---------- + v : list, int, float, bool, or numpy array + list, int, float, bool, or numpy array containing the data to be + written to a string. + length : int + length of each column for fixed column widths. (default is 10) + ipos : list, int, or numpy array + user-provided column widths. (default is None) + free : bool + boolean indicating if a free format string should be generated. + length and ipos are not used if free is True. (default is False) + comment : str + comment string to add to the end of the string + + Returns + ------- + out : str + fixed or free format string generated using user-provided data + + """ + if isinstance(v, np.ndarray): + v = v.tolist() + elif isinstance(v, int) or isinstance(v, float) or isinstance(v, bool): + v = [v] + ncol = len(v) + # construct ipos if it was not passed + if ipos is None: + ipos = [] + for i in range(ncol): + ipos.append(length) + else: + if isinstance(ipos, np.ndarray): + ipos = ipos.flatten().tolist() + elif isinstance(ipos, int): + ipos = [ipos] + if len(ipos) < ncol: + err = 'user provided ipos length ({})'.format(len(ipos)) + \ + 'should be greater than or equal ' + \ + 'to the length of v ({})'.format(ncol) + raise Exception(err) + out = '' + for n in range(ncol): + if free: + write_fmt = '{} ' + else: + if isinstance(v[n], (float, np.float, np.float32, np.float64)): + width = ipos[n] - 6 + vmin, vmax = 10**-width, 10**width + if abs(v[n]) < vmin or abs(v[n]) > vmax: + ctype = 'g' + else: + ctype = '.{}f'.format(width) + elif isinstance(v[n], (int, np.int, np.int32, np.int64)): + ctype = 'd' + else: + ctype = '' + write_fmt = '{{:>{}{}}}'.format(ipos[n],ctype) + out += write_fmt.format(v[n]) + if comment is not None: + out += ' # {}'.format(comment) + out += '\n' + return out + + +def read_fixed_var(line, ncol=1, length=10, ipos=None, free=False): + """ + Parse a fixed format line using user provided data + + Parameters + ---------- + line : str + text string to parse. + ncol : int + number of columns to parse from line. (default is 1) + length : int + length of each column for fixed column widths. (default is 10) + ipos : list, int, or numpy array + user-provided column widths. (default is None) + free : bool + boolean indicating if sting is free format. ncol, length, and + ipos are not used if free is True. (default is False) + + Returns + ------- + out : list + padded list containing data parsed from the passed text string + + """ + if free: + out = line.rstrip().split() + else: + # construct ipos if it was not passed + if ipos is None: + ipos = [] + for i in range(ncol): + ipos.append(length) + else: + if isinstance(ipos, np.ndarray): + ipos = ipos.flatten().tolist() + elif isinstance(ipos, int): + ipos = [ipos] + ncol = len(ipos) + line = line.rstrip() + out = [] + istart = 0 + for ivar in range(ncol): + istop = istart + ipos[ivar] + try: + txt = line[istart:istop] + if len(txt.strip()) > 0: + out.append(txt) + else: + out.append(0) + except: + break + istart = istop + return out + + +def flux_to_wel(cbc_file, text, precision="single", model=None, verbose=False): + """ + Convert flux in a binary cell budget file to a wel instance + + Parameters + ---------- + cbc_file : (str) cell budget file name + text : (str) text string of the desired flux type (e.g. "drains") + precision : (optional str) precision of the cell budget file + model : (optional) BaseModel instance. If passed, a new ModflowWel + instance will be added to model + verbose : bool flag passed to CellBudgetFile + + Returns + ------- + flopy.modflow.ModflowWel instance + + """ + from . import CellBudgetFile as CBF + from .util_list import MfList + from ..modflow import Modflow, ModflowWel + cbf = CBF(cbc_file, precision=precision, verbose=verbose) + + # create a empty numpy array of shape (time,layer,row,col) + m4d = np.zeros((cbf.nper, cbf.nlay, cbf.nrow, cbf.ncol), dtype=np.float32) + m4d[:] = np.NaN + + # process the records in the cell budget file + iper = -1 + for kstpkper in cbf.kstpkper: + + kstpkper = (kstpkper[0] - 1, kstpkper[1] - 1) + kper = kstpkper[1] + # if we haven't visited this kper yet + if kper != iper: + arr = cbf.get_data(kstpkper=kstpkper, text=text, full3D=True) + if len(arr) > 0: + arr = arr[0] + print(arr.max(), arr.min(), arr.sum()) + # masked where zero + arr[np.where(arr == 0.0)] = np.NaN + m4d[iper + 1] = arr + iper += 1 + + # model wasn't passed, then create a generic model + if model is None: + model = Modflow("test") + # if model doesn't have a wel package, then make a generic one... + # need this for the from_m4d method + if model.wel is None: + ModflowWel(model) + + # get the stress_period_data dict {kper:np recarray} + sp_data = MfList.from_4d(model, "WEL", {"flux": m4d}) + + wel = ModflowWel(model, stress_period_data=sp_data) + return wel + + +def loadtxt(file, delimiter=' ', dtype=None, skiprows=0, use_pandas=True, + **kwargs): + """ + Use pandas if it is available to load a text file + (significantly faster than n.loadtxt or genfromtxt see + http://stackoverflow.com/questions/18259393/numpy-loading-csv-too-slow-compared-to-matlab) + + Parameters + ---------- + file : file or str + File, filename, or generator to read. + delimiter : str, optional + The string used to separate values. By default, this is any whitespace. + dtype : data-type, optional + Data-type of the resulting array + skiprows : int, optional + Skip the first skiprows lines; default: 0. + use_pandas : bool + If true, the much faster pandas.read_csv method is used. + kwargs : dict + Keyword arguments passed to numpy.loadtxt or pandas.read_csv. + + Returns + ------- + ra : np.recarray + Numpy record array of file contents. + """ + # test if pandas should be used, if available + if use_pandas: + if pd: + if delimiter.isspace(): + kwargs['delim_whitespace'] = True + if isinstance(dtype, np.dtype) and 'names' not in kwargs: + kwargs['names'] = dtype.names + + # if use_pandas and pd then use pandas + if use_pandas and pd: + df = pd.read_csv(file, dtype=dtype, skiprows=skiprows, **kwargs) + return df.to_records(index=False) + # default use of numpy + else: + return np.loadtxt(file, dtype=dtype, skiprows=skiprows, **kwargs) + + +def get_url_text(url, error_msg=None): + """ + Get text from a url. + """ + from urllib.request import urlopen + try: + urlobj = urlopen(url) + text = urlobj.read().decode() + return text + except: + e = sys.exc_info() + print(e) + if error_msg is not None: + print(error_msg) + return + + +def ulstrd(f, nlist, ra, model, sfac_columns, ext_unit_dict): + """ + Read a list and allow for open/close, binary, external, sfac, etc. + + Parameters + ---------- + f : file handle + file handle for where the list is being read from + nlist : int + size of the list (number of rows) to read + ra : np.recarray + A record array of the correct size that will be filled with the list + model : model object + The model object (of type :class:`flopy.modflow.mf.Modflow`) to + which this package will be added. + sfac_columns : list + A list of strings containing the column names to scale by sfac + ext_unit_dict : dictionary, optional + If the list in the file is specified using EXTERNAL, + then in this case ext_unit_dict is required, which can be + constructed using the function + :class:`flopy.utils.mfreadnam.parsenamefile`. + + Returns + ------- + + """ + + # initialize variables + line = f.readline() + sfac = 1. + binary = False + ncol = len(ra.dtype.names) + line_list = line.strip().split() + close_the_file = False + file_handle = f + mode = 'r' + + # check for external + if line.strip().lower().startswith('external'): + inunit = int(line_list[1]) + errmsg = 'Could not find a file for unit {}'.format(inunit) + if ext_unit_dict is not None: + if inunit in ext_unit_dict: + namdata = ext_unit_dict[inunit] + file_handle = namdata.filehandle + else: + raise IOError(errmsg) + else: + raise IOError(errmsg) + if namdata.filetype == 'DATA(BINARY)': + binary = True + if not binary: + line = file_handle.readline() + + # or check for open/close + elif line.strip().lower().startswith('open/close'): + raw = line.strip().split() + fname = raw[1] + if '/' in fname: + raw = fname.split('/') + elif '\\' in fname: + raw = fname.split('\\') + else: + raw = [fname] + fname = os.path.join(*raw) + oc_filename = os.path.join(model.model_ws, fname) + msg = 'Package.load() error: open/close filename ' + \ + oc_filename + ' not found' + assert os.path.exists(oc_filename), msg + if '(binary)' in line.lower(): + binary = True + mode = 'rb' + file_handle = open(oc_filename, mode) + close_the_file = True + if not binary: + line = file_handle.readline() + + # check for scaling factor + if not binary: + line_list = line.strip().split() + if line.strip().lower().startswith('sfac'): + sfac = float(line_list[1]) + line = file_handle.readline() + + # fast binary read fromfile + if binary: + dtype2 = [] + for name in ra.dtype.names: + dtype2.append((name, np.float32)) + dtype2 = np.dtype(dtype2) + d = np.fromfile(file_handle, dtype=dtype2, count=nlist) + ra = np.array(d, dtype=ra.dtype) + ra = ra.view(np.recarray) + + # else, read ascii + else: + + for ii in range(nlist): + + # first line was already read + if ii != 0: + line = file_handle.readline() + + if model.free_format_input: + # whitespace separated + t = line.strip().split() + if len(t) < ncol: + t = t + (ncol - len(t)) * [0.0] + else: + t = t[:ncol] + t = tuple(t) + ra[ii] = t + else: + # fixed format + t = read_fixed_var(line, ncol=ncol) + t = tuple(t) + ra[ii] = t + + # scale the data and check + for column_name in sfac_columns: + ra[column_name] *= sfac + if 'auxsfac' in ra.dtype.names: + ra[column_name] *= ra['auxsfac'] + + if close_the_file: + file_handle.close() + + return ra diff --git a/flopy/utils/formattedfile.py b/flopy/utils/formattedfile.py index a6462d19da..f7d16bad63 100644 --- a/flopy/utils/formattedfile.py +++ b/flopy/utils/formattedfile.py @@ -1,390 +1,390 @@ -""" -Module to read MODFLOW formatted output files. The module contains one -important classes that can be accessed by the user. - -* FormattedHeadFile (Formatted head file. Can also be used for drawdown) - -""" - -import numpy as np -from ..utils.datafile import Header, LayerFile - - -def is_int(s): - try: - int(s) - return True - except ValueError: - return False - - -def is_float(s): - try: - float(s) - return True - except ValueError: - return False - - -class FormattedHeader(Header): - """ - The TextHeader class is a class to read in headers from MODFLOW - formatted files. - - Parameters - ---------- - text_ident is the text string in the header that identifies the type - of data (eg. 'head') precision is the precision of the floating point - data in the file - """ - - def __init__(self, text_ident, precision='single'): - Header.__init__(self, text_ident, precision) - self.format_string = '' - self.text_ident = text_ident - - def read_header(self, text_file): - """ - Read header information from a formatted file - - Parameters - ---------- - text_file is an open file object currently at the beginning of - the header - - Returns - ---------- - out : numpy array of header information - also stores the header's format string as self.format_string - - """ - - header_text = text_file.readline().decode('ascii') - arrheader = header_text.split() - - # Verify header exists and is in the expected format - if len(arrheader) >= 5 and arrheader[ - 4].upper() != self.text_ident.upper(): - raise Exception( - 'Expected header not found. Make sure the file being processed includes headers ' + - '(LABEL output control option): ' + header_text) - if len(arrheader) != 9 or not is_int(arrheader[0]) or not is_int( - arrheader[1]) or not is_float(arrheader[2]) \ - or not is_float(arrheader[3]) or not is_int( - arrheader[5]) or not is_int(arrheader[6]) or not is_int( - arrheader[7]): - raise Exception( - 'Unexpected format for FHDTextHeader: ' + header_text) - - headerinfo = np.empty([8], dtype=self.dtype) - headerinfo['kstp'] = int(arrheader[0]) - headerinfo['kper'] = int(arrheader[1]) - headerinfo['pertim'] = float(arrheader[2]) - headerinfo['totim'] = float(arrheader[3]) - headerinfo['text'] = arrheader[4] - headerinfo['ncol'] = int(arrheader[5]) - headerinfo['nrow'] = int(arrheader[6]) - headerinfo['ilay'] = int(arrheader[7]) - - self.format_string = arrheader[8] - - return headerinfo - - -class FormattedLayerFile(LayerFile): - """ - The FormattedLayerFile class is the super class from which specific derived - classes are formed. This class should not be instantiated directly - - """ - - def __init__(self, filename, precision, verbose, kwargs): - super(FormattedLayerFile, self).__init__(filename, precision, verbose, - kwargs) - return - - def _build_index(self): - """ - Build the recordarray and iposarray, which maps the header information - to the position in the formatted file. - """ - self.kstpkper # array of time step/stress periods with data available - self.recordarray # array of data headers - self.iposarray # array of seek positions for each record - self.nlay # Number of model layers - - # Get total file size - self.file.seek(0, 2) - self.totalbytes = self.file.tell() - self.file.seek(0, 0) - - # Process first header - self.header = self._get_text_header() - header_info = self.header.read_header(self.file)[0] - - self.nrow = header_info['nrow'] - self.ncol = header_info['ncol'] - - ipos = self.file.tell() - self._store_record(header_info, ipos) - - # Process enough data to calculate seek distance between headers - self._col_data_size = self._get_data_size(header_info) - self._data_size = self._col_data_size * self.nrow - - # While more data in file - while ipos + self._data_size < self.totalbytes: - # Seek and get next header - self.file.seek(ipos + self._data_size) - header_info = self.header.read_header(self.file)[0] - ipos = self.file.tell() - self._store_record(header_info, ipos) - - # self.recordarray contains a recordarray of all the headers. - self.recordarray = np.array(self.recordarray, self.header.get_dtype()) - self.iposarray = np.array(self.iposarray) - self.nlay = np.max(self.recordarray['ilay']) - return - - def _store_record(self, header, ipos): - """ - Store file header information in various formats for quick retrieval - - """ - self.recordarray.append(header) - self.iposarray.append(ipos) # store the position right after header2 - totim = header['totim'] - if totim > 0 and totim not in self.times: - self.times.append(totim) - kstpkper = (header['kstp'], header['kper']) - if kstpkper not in self.kstpkper: - self.kstpkper.append(kstpkper) - - def _get_text_header(self): - """ - Return a text header object containing header formatting information - - """ - raise Exception( - 'Abstract method _get_text_header called in FormattedLayerFile. ' + - 'This method needs to be overridden.') - - def _read_data(self, shp): - """ - Read 2-D data from file - - """ - - nrow, ncol = shp - current_row = 0 - current_col = 0 - result = np.empty((nrow, ncol), self.realtype) - # Loop until all data retrieved or eof - while ( - current_row < nrow or current_col < ncol) and self.file.tell() != self.totalbytes: - line = self.file.readline() - - # Read data into 2-D array - arrline = line.split() - for val in arrline: - if not is_float(val): - raise Exception( - 'Invalid data encountered while reading data file.' + - ' Unable to convert data to float.') - result[current_row, current_col] = float(val) - current_col += 1 - if current_col >= ncol: - current_row += 1 - if current_row < nrow: - current_col = 0 - - if current_row < nrow - 1 or current_col < ncol - 1: - raise Exception('Unexpected end of file while reading data.') - - return result - - def _read_val(self, i): - """ - Read ith data value from file - - """ - current_col = 0 - result = None - # Loop until data retrieved or eof - while ( - current_col < self.ncol - 1 or self.file.tell() == self.totalbytes) and current_col <= i: - line = self.file.readline() - arrline = line.split() - for val in arrline: - if not is_float(val): - raise Exception( - 'Invalid data encountered while reading data file.' + - ' Unable to convert data to float.') - result = float(val) - current_col = current_col + 1 - if current_col > i: - break - - if (current_col < self.ncol - 1) and (current_col < i): - raise Exception('Unexpected end of file while reading data.') - - return result - - def get_ts(self, idx): - """ - Get a time series from the formatted file. - - Parameters - ---------- - idx : tuple of ints, or a list of a tuple of ints - idx can be (layer, row, column) or it can be a list in the form - [(layer, row, column), (layer, row, column), ...]. The layer, - row, and column values must be zero based. - - Returns - ---------- - out : numpy array - Array has size (ntimes, ncells + 1). The first column in the - data array will contain time (totim). - - See Also - -------- - - Notes - ----- - - The layer, row, and column values must be zero-based, and must be - within the following ranges: 0 <= k < nlay; 0 <= i < nrow; 0 <= j < ncol - - Examples - -------- - - """ - kijlist = self._build_kijlist(idx) - nstation = self._get_nstation(idx, kijlist) - - # Initialize result array and put times in first column - result = self._init_result(nstation) - - istat = 1 - for k, i, j in kijlist: - ioffset_col = (i * self._col_data_size) - for irec, header in enumerate(self.recordarray): - # change ilay from header to zero-based - ilay = header['ilay'] - 1 - if ilay != k: - continue - ipos = self.iposarray[irec] - - # Calculate offset necessary to reach intended column - self.file.seek(ipos + ioffset_col, 0) - - # Find the time index and then put value into result in the - # correct location. - itim = np.where(result[:, 0] == header['totim'])[0] - result[itim, istat] = self._read_val(j) - istat += 1 - return result - - def close(self): - """ - Close the file handle. - - """ - self.file.close() - return - - -class FormattedHeadFile(FormattedLayerFile): - """ - FormattedHeadFile Class. - - Parameters - ---------- - filename : string - Name of the formatted head file - text : string - Name of the text string in the formatted head file. Default is 'head' - precision : string - 'single' or 'double'. Default is 'single'. - verbose : bool - Write information to the screen. Default is False. - - Attributes - ---------- - - Methods - ------- - - See Also - -------- - - Notes - ----- - The FormattedHeadFile class provides simple ways to retrieve 2d and 3d - head arrays from a MODFLOW formatted head file and time series - arrays for one or more cells. - - The FormattedHeadFile class is built on a record array consisting of - headers, which are record arrays of the modflow header information - (kstp, kper, pertim, totim, text, nrow, ncol, ilay) - and long integers, which are pointers to first bytes of data for - the corresponding data array. - - FormattedHeadFile can only read formatted head files containing headers. - Use the LABEL option in the output control file to generate head files - with headers. - - Examples - -------- - - >>> import flopy.utils.formattedfile as ff - >>> hdobj = ff.FormattedHeadFile('model.fhd', precision='single') - >>> hdobj.list_records() - >>> rec = hdobj.get_data(kstpkper=(1, 50)) - >>> rec2 = ddnobj.get_data(totim=100.) - - - """ - - def __init__(self, filename, text='head', precision='single', - verbose=False, **kwargs): - self.text = text - super(FormattedHeadFile, self).__init__(filename, precision, verbose, - kwargs) - return - - def _get_text_header(self): - """ - Return a text header object containing header formatting information - - """ - return FormattedHeader(self.text, self.precision) - - def _get_data_size(self, header): - """ - Calculate the size of the data set in terms of a seek distance - - """ - start_pos = self.file.tell() - data_count = 0 - # Loop through data until at end of column - while data_count < header['ncol']: - column_data = self.file.readline() - arr_column_data = column_data.split() - data_count += len(arr_column_data) - - if data_count != header['ncol']: - e = 'Unexpected data formatting in head file. Expected ' + \ - '{:d} columns, '.format(header['ncol']) + \ - 'but found {:d}.'.format(data_count) - raise Exception(e) - - # Calculate seek distance based on data size - stop_pos = self.file.tell() - data_seek_distance = stop_pos - start_pos - - # Return to last file position - self.file.seek(start_pos) - - return data_seek_distance +""" +Module to read MODFLOW formatted output files. The module contains one +important classes that can be accessed by the user. + +* FormattedHeadFile (Formatted head file. Can also be used for drawdown) + +""" + +import numpy as np +from ..utils.datafile import Header, LayerFile + + +def is_int(s): + try: + int(s) + return True + except ValueError: + return False + + +def is_float(s): + try: + float(s) + return True + except ValueError: + return False + + +class FormattedHeader(Header): + """ + The TextHeader class is a class to read in headers from MODFLOW + formatted files. + + Parameters + ---------- + text_ident is the text string in the header that identifies the type + of data (eg. 'head') precision is the precision of the floating point + data in the file + """ + + def __init__(self, text_ident, precision='single'): + Header.__init__(self, text_ident, precision) + self.format_string = '' + self.text_ident = text_ident + + def read_header(self, text_file): + """ + Read header information from a formatted file + + Parameters + ---------- + text_file is an open file object currently at the beginning of + the header + + Returns + ---------- + out : numpy array of header information + also stores the header's format string as self.format_string + + """ + + header_text = text_file.readline().decode('ascii') + arrheader = header_text.split() + + # Verify header exists and is in the expected format + if len(arrheader) >= 5 and arrheader[ + 4].upper() != self.text_ident.upper(): + raise Exception( + 'Expected header not found. Make sure the file being processed includes headers ' + + '(LABEL output control option): ' + header_text) + if len(arrheader) != 9 or not is_int(arrheader[0]) or not is_int( + arrheader[1]) or not is_float(arrheader[2]) \ + or not is_float(arrheader[3]) or not is_int( + arrheader[5]) or not is_int(arrheader[6]) or not is_int( + arrheader[7]): + raise Exception( + 'Unexpected format for FHDTextHeader: ' + header_text) + + headerinfo = np.empty([8], dtype=self.dtype) + headerinfo['kstp'] = int(arrheader[0]) + headerinfo['kper'] = int(arrheader[1]) + headerinfo['pertim'] = float(arrheader[2]) + headerinfo['totim'] = float(arrheader[3]) + headerinfo['text'] = arrheader[4] + headerinfo['ncol'] = int(arrheader[5]) + headerinfo['nrow'] = int(arrheader[6]) + headerinfo['ilay'] = int(arrheader[7]) + + self.format_string = arrheader[8] + + return headerinfo + + +class FormattedLayerFile(LayerFile): + """ + The FormattedLayerFile class is the super class from which specific derived + classes are formed. This class should not be instantiated directly + + """ + + def __init__(self, filename, precision, verbose, kwargs): + super(FormattedLayerFile, self).__init__(filename, precision, verbose, + kwargs) + return + + def _build_index(self): + """ + Build the recordarray and iposarray, which maps the header information + to the position in the formatted file. + """ + self.kstpkper # array of time step/stress periods with data available + self.recordarray # array of data headers + self.iposarray # array of seek positions for each record + self.nlay # Number of model layers + + # Get total file size + self.file.seek(0, 2) + self.totalbytes = self.file.tell() + self.file.seek(0, 0) + + # Process first header + self.header = self._get_text_header() + header_info = self.header.read_header(self.file)[0] + + self.nrow = header_info['nrow'] + self.ncol = header_info['ncol'] + + ipos = self.file.tell() + self._store_record(header_info, ipos) + + # Process enough data to calculate seek distance between headers + self._col_data_size = self._get_data_size(header_info) + self._data_size = self._col_data_size * self.nrow + + # While more data in file + while ipos + self._data_size < self.totalbytes: + # Seek and get next header + self.file.seek(ipos + self._data_size) + header_info = self.header.read_header(self.file)[0] + ipos = self.file.tell() + self._store_record(header_info, ipos) + + # self.recordarray contains a recordarray of all the headers. + self.recordarray = np.array(self.recordarray, self.header.get_dtype()) + self.iposarray = np.array(self.iposarray) + self.nlay = np.max(self.recordarray['ilay']) + return + + def _store_record(self, header, ipos): + """ + Store file header information in various formats for quick retrieval + + """ + self.recordarray.append(header) + self.iposarray.append(ipos) # store the position right after header2 + totim = header['totim'] + if totim > 0 and totim not in self.times: + self.times.append(totim) + kstpkper = (header['kstp'], header['kper']) + if kstpkper not in self.kstpkper: + self.kstpkper.append(kstpkper) + + def _get_text_header(self): + """ + Return a text header object containing header formatting information + + """ + raise Exception( + 'Abstract method _get_text_header called in FormattedLayerFile. ' + + 'This method needs to be overridden.') + + def _read_data(self, shp): + """ + Read 2-D data from file + + """ + + nrow, ncol = shp + current_row = 0 + current_col = 0 + result = np.empty((nrow, ncol), self.realtype) + # Loop until all data retrieved or eof + while ( + current_row < nrow or current_col < ncol) and self.file.tell() != self.totalbytes: + line = self.file.readline() + + # Read data into 2-D array + arrline = line.split() + for val in arrline: + if not is_float(val): + raise Exception( + 'Invalid data encountered while reading data file.' + + ' Unable to convert data to float.') + result[current_row, current_col] = float(val) + current_col += 1 + if current_col >= ncol: + current_row += 1 + if current_row < nrow: + current_col = 0 + + if current_row < nrow - 1 or current_col < ncol - 1: + raise Exception('Unexpected end of file while reading data.') + + return result + + def _read_val(self, i): + """ + Read ith data value from file + + """ + current_col = 0 + result = None + # Loop until data retrieved or eof + while ( + current_col < self.ncol - 1 or self.file.tell() == self.totalbytes) and current_col <= i: + line = self.file.readline() + arrline = line.split() + for val in arrline: + if not is_float(val): + raise Exception( + 'Invalid data encountered while reading data file.' + + ' Unable to convert data to float.') + result = float(val) + current_col = current_col + 1 + if current_col > i: + break + + if (current_col < self.ncol - 1) and (current_col < i): + raise Exception('Unexpected end of file while reading data.') + + return result + + def get_ts(self, idx): + """ + Get a time series from the formatted file. + + Parameters + ---------- + idx : tuple of ints, or a list of a tuple of ints + idx can be (layer, row, column) or it can be a list in the form + [(layer, row, column), (layer, row, column), ...]. The layer, + row, and column values must be zero based. + + Returns + ---------- + out : numpy array + Array has size (ntimes, ncells + 1). The first column in the + data array will contain time (totim). + + See Also + -------- + + Notes + ----- + + The layer, row, and column values must be zero-based, and must be + within the following ranges: 0 <= k < nlay; 0 <= i < nrow; 0 <= j < ncol + + Examples + -------- + + """ + kijlist = self._build_kijlist(idx) + nstation = self._get_nstation(idx, kijlist) + + # Initialize result array and put times in first column + result = self._init_result(nstation) + + istat = 1 + for k, i, j in kijlist: + ioffset_col = (i * self._col_data_size) + for irec, header in enumerate(self.recordarray): + # change ilay from header to zero-based + ilay = header['ilay'] - 1 + if ilay != k: + continue + ipos = self.iposarray[irec] + + # Calculate offset necessary to reach intended column + self.file.seek(ipos + ioffset_col, 0) + + # Find the time index and then put value into result in the + # correct location. + itim = np.where(result[:, 0] == header['totim'])[0] + result[itim, istat] = self._read_val(j) + istat += 1 + return result + + def close(self): + """ + Close the file handle. + + """ + self.file.close() + return + + +class FormattedHeadFile(FormattedLayerFile): + """ + FormattedHeadFile Class. + + Parameters + ---------- + filename : string + Name of the formatted head file + text : string + Name of the text string in the formatted head file. Default is 'head' + precision : string + 'single' or 'double'. Default is 'single'. + verbose : bool + Write information to the screen. Default is False. + + Attributes + ---------- + + Methods + ------- + + See Also + -------- + + Notes + ----- + The FormattedHeadFile class provides simple ways to retrieve 2d and 3d + head arrays from a MODFLOW formatted head file and time series + arrays for one or more cells. + + The FormattedHeadFile class is built on a record array consisting of + headers, which are record arrays of the modflow header information + (kstp, kper, pertim, totim, text, nrow, ncol, ilay) + and long integers, which are pointers to first bytes of data for + the corresponding data array. + + FormattedHeadFile can only read formatted head files containing headers. + Use the LABEL option in the output control file to generate head files + with headers. + + Examples + -------- + + >>> import flopy.utils.formattedfile as ff + >>> hdobj = ff.FormattedHeadFile('model.fhd', precision='single') + >>> hdobj.list_records() + >>> rec = hdobj.get_data(kstpkper=(1, 50)) + >>> rec2 = ddnobj.get_data(totim=100.) + + + """ + + def __init__(self, filename, text='head', precision='single', + verbose=False, **kwargs): + self.text = text + super(FormattedHeadFile, self).__init__(filename, precision, verbose, + kwargs) + return + + def _get_text_header(self): + """ + Return a text header object containing header formatting information + + """ + return FormattedHeader(self.text, self.precision) + + def _get_data_size(self, header): + """ + Calculate the size of the data set in terms of a seek distance + + """ + start_pos = self.file.tell() + data_count = 0 + # Loop through data until at end of column + while data_count < header['ncol']: + column_data = self.file.readline() + arr_column_data = column_data.split() + data_count += len(arr_column_data) + + if data_count != header['ncol']: + e = 'Unexpected data formatting in head file. Expected ' + \ + '{:d} columns, '.format(header['ncol']) + \ + 'but found {:d}.'.format(data_count) + raise Exception(e) + + # Calculate seek distance based on data size + stop_pos = self.file.tell() + data_seek_distance = stop_pos - start_pos + + # Return to last file position + self.file.seek(start_pos) + + return data_seek_distance diff --git a/flopy/utils/gridintersect.py b/flopy/utils/gridintersect.py index 1df9aec0b5..a280b33443 100644 --- a/flopy/utils/gridintersect.py +++ b/flopy/utils/gridintersect.py @@ -1,1477 +1,1477 @@ -import numpy as np -try: - import matplotlib.pyplot as plt -except ModuleNotFoundError: - plt = None - -from .geometry import transform - -try: - from shapely.geometry import (MultiPoint, Point, Polygon, box, - GeometryCollection) - from shapely.strtree import STRtree - from shapely.affinity import translate, rotate - from shapely.prepared import prep - shply = True -except ModuleNotFoundError: - shply = False - - -def parse_shapely_ix_result(collection, ix_result, shptyps=None): - """Recursive function for parsing shapely intersection results. Returns a - list of shapely shapes matching shptyp. - - Parameters - ---------- - collection : list - state variable for storing result, generally - an empty list - ix_result : shapely.geometry type - any shapely intersection result - shptyp : str, list of str, or None, optional - if None (default), return all types of shapes. - if str, return shapes of that type, if list of str, - return all types in list - - Returns - ------- - collection : list - list containing shapely geometries of type shptyp - """ - # convert shptyps to list if needed - if isinstance(shptyps, str): - shptyps = [shptyps] - elif shptyps is None: - shptyps = [None] - - # if empty - if ix_result.is_empty: - return collection - # base case: geom_type is partial or exact match to shptyp - elif ix_result.geom_type in shptyps: - collection.append(ix_result) - return collection - # recursion for collections - elif hasattr(ix_result, "geoms"): - for ishp in ix_result: - parse_shapely_ix_result(collection, ishp, shptyps=shptyps) - # if collecting all types - elif shptyps[0] is None: - return collection.append(ix_result) - return collection - - -class GridIntersect: - """Class for intersecting shapely shapes (Point, Linestring, Polygon, or - their Multi variants) with MODFLOW grids. Contains optimized search - routines for structured grids. - - Notes - ----- - - The STR-tree query is based on the bounding box of the shape or - collection, if the bounding box of the shape covers nearly the entire - grid, the query won't be able to limit the search space much, resulting - in slower performance. Therefore, it can sometimes be faster to - intersect each individual shape in a collection than it is to intersect - with the whole collection at once. - - Building the STR-tree can take a while for large grids. Once built the - intersect routines (for individual shapes) should be pretty fast. It - is possible to perform intersects without building the STR-tree by - setting `rtree=False`. - - The optimized routines for structured grids will often outperform - the shapely routines because of the reduced overhead of building and - parsing the STR-tree. However, for polygons the STR-tree implementation - is often faster than the optimized structured routines, especially - for larger grids. - """ - - def __init__(self, mfgrid, method=None, rtree=True): - """Intersect shapes (Point, Linestring, Polygon) with a modflow grid. - - Parameters - ---------- - mfgrid : flopy modflowgrid - MODFLOW grid as implemented in flopy - method : str, optional - default is None, which determines intersection method based on - the grid type. Options are either 'vertex' which uses shapely - interesection operations or 'structured' which uses optimized - methods that only work for structured grids - rtree : bool, optional - whether to build an STR-Tree, default is True. If False no - STR-tree is built (which saves some time), but intersects will - loop through all model gridcells (which is generally slower). - Only read when `method='vertex'`. - """ - if not shply: - msg = ("Shapely is needed for grid intersect operations! " - "Please install shapely if you need to use grid intersect " - "functionality.") - raise ModuleNotFoundError(msg) - - self.mfgrid = mfgrid - if method is None: - # determine method from grid_type - self.method = self.mfgrid.grid_type - else: - # set method - self.method = method - self.rtree = rtree - - if self.method == "vertex": - # set method to get gridshapes depending on grid type - self._set_method_get_gridshapes() - - # build STR-tree if specified - if self.rtree: - self.strtree = STRtree(self._get_gridshapes()) - - # set interesection methods - self.intersect_point = self._intersect_point_shapely - self.intersect_linestring = self._intersect_linestring_shapely - self.intersect_polygon = self._intersect_polygon_shapely - - elif self.method == "structured" and mfgrid.grid_type == "structured": - self.intersect_point = self._intersect_point_structured - self.intersect_linestring = self._intersect_linestring_structured - self.intersect_polygon = self._intersect_polygon_structured - - else: - raise NotImplementedError( - "Method '{0}' not recognized!".format) - - def _set_method_get_gridshapes(self): - """internal method, set self._get_gridshapes to the certain method for - obtaining gridcells.""" - # Set method for obtaining grid shapes - if self.mfgrid.grid_type == "structured": - self._get_gridshapes = self._rect_grid_to_shape_generator - elif self.mfgrid.grid_type == "vertex": - self._get_gridshapes = self._vtx_grid_to_shape_generator - elif self.mfgrid.grid_type == "unstructured": - raise NotImplementedError() - - def _rect_grid_to_shape_generator(self): - """internal method, generator yielding shapely polygons for structured - grid cells. - - Returns - ------- - generator : - generator of shapely Polygons - """ - for i in range(self.mfgrid.nrow): - for j in range(self.mfgrid.ncol): - xy = self.mfgrid.get_cell_vertices(i, j) - p = Polygon(xy) - p.name = (i, j) - yield p - - def _usg_grid_to_shape_generator(self): - """internal method, convert unstructred grid to list of shapely - polygons. - - Returns - ------- - list - list of shapely Polygons - """ - raise NotImplementedError() - - def _vtx_grid_to_shape_generator(self): - """internal method, generator yielding shapely polygons for vertex - grids. - - Returns - ------- - generator : - generator of shapely Polygons - """ - # for cell2d rec-arrays - if isinstance(self.mfgrid._cell2d, np.recarray): - for icell in self.mfgrid._cell2d.icell2d: - points = [] - icverts = ["icvert_{}".format(i) for i in - range(self.mfgrid._cell2d["ncvert"][icell])] - for iv in self.mfgrid._cell2d[icverts][icell]: - points.append((self.mfgrid._vertices.xv[iv], - self.mfgrid._vertices.yv[iv])) - # close the polygon, if necessary - if points[0] != points[-1]: - points.append(points[0]) - p = Polygon(points) - p.name = icell - yield p - # for cell2d lists - elif isinstance(self.mfgrid._cell2d, list): - for icell in range(len(self.mfgrid._cell2d)): - points = [] - for iv in self.mfgrid._cell2d[icell][-3:]: - points.append((self.mfgrid._vertices[iv][1], - self.mfgrid._vertices[iv][2])) - # close the polygon, if necessary - if points[0] != points[-1]: - points.append(points[0]) - p = Polygon(points) - p.name = icell - yield p - - def _rect_grid_to_shape_list(self): - """internal method, list of shapely polygons for structured grid cells. - - Returns - ------- - list : - list of shapely Polygons - """ - return list(self._rect_grid_to_shape_generator()) - - def _usg_grid_to_shape_list(self): - """internal method, convert unstructred grid to list of shapely - polygons. - - Returns - ------- - list - list of shapely Polygons - """ - raise NotImplementedError() - - def _vtx_grid_to_shape_list(self): - """internal method, list of shapely polygons for vertex grids. - - Returns - ------- - list : - list of shapely Polygons - """ - return list(self._vtx_grid_to_shape_generator()) - - def query_grid(self, shp): - """Perform spatial query on grid with shapely geometry. If no spatial - query is possible returns all grid cells. - - Parameters - ---------- - shp : shapely.geometry - shapely geometry - - Returns - ------- - list or generator expression - list or generator containing grid cells in query result - """ - if self.rtree: - result = self.strtree.query(shp) - else: - # no spatial query - result = self._get_gridshapes() - return result - - @staticmethod - def filter_query_result(qresult, shp): - """Filter query result to obtain grid cells that intersect with shape. - Used to (further) reduce query result to cells that definitely - intersect with shape. - - Parameters - ---------- - qresult : iterable - query result, iterable of polygons - shp : shapely.geometry - shapely geometry that is prepared and used to filter - query result - - Returns - ------- - qfiltered - filter or generator containing polygons that intersect with shape - """ - # prepare shape for efficient batch intersection check - prepshp = prep(shp) - # get only gridcells that intersect - qfiltered = filter(prepshp.intersects, qresult) - return qfiltered - - @staticmethod - def sort_gridshapes(shape_iter): - """Sort query result by node id. - - Parameters - ---------- - shape_iter : iterable - list or iterable of gridcells - - Returns - ------- - list - sorted list of gridcells - """ - if not isinstance(shape_iter, list): - shapelist = list(shape_iter) - else: - shapelist = shape_iter - - def sort_key(o): - return o.name - shapelist.sort(key=sort_key) - return shapelist - - def _intersect_point_shapely(self, shp, sort_by_cellid=True): - """intersect grid with Point or MultiPoint. - - Parameters - ---------- - shp : Point or MultiPoint - shapely Point or MultiPoint to intersect with grid. Note, - it is generally faster to loop over a MultiPoint and intersect - per point than to intersect a MultiPoint directly. - sort_by_cellid : bool, optional - flag whether to sort cells by id, used to ensure node - with lowest id is returned, by default True - - Returns - ------- - numpy.recarray - a record array containing information about the intersection - """ - # query grid - qresult = self.query_grid(shp) - # prepare shape for efficient batch intersection check - prepshp = prep(shp) - # get only gridcells that intersect - qfiltered = filter(prepshp.intersects, qresult) - - # sort cells to ensure lowest cell ids are returned - if sort_by_cellid: - qfiltered = self.sort_gridshapes(qfiltered) - - isectshp = [] - cellids = [] - vertices = [] - parsed_points = [] # for keeping track of points - - # loop over cells returned by filtered spatial query - for r in qfiltered: - name = r.name - # do intersection - intersect = shp.intersection(r) - # parse result per Point - collection = parse_shapely_ix_result( - [], intersect, shptyps=["Point"]) - # loop over intersection result and store information - cell_verts = [] - cell_shps = [] - for c in collection: - verts = c.__geo_interface__["coordinates"] - # avoid returning multiple cells for points on boundaries - if verts in parsed_points: - continue - parsed_points.append(verts) - cell_shps.append(c) # collect only new points - cell_verts.append(verts) - # if any new ix found - if len(cell_shps) > 0: - # combine new points in MultiPoint - isectshp.append(MultiPoint(cell_shps) if len(cell_shps) > 1 - else cell_shps[0]) - vertices.append(tuple(cell_verts)) - cellids.append(name) - - rec = np.recarray(len(isectshp), - names=["cellids", "vertices", "ixshapes"], - formats=["O", "O", "O"]) - rec.ixshapes = isectshp - rec.vertices = vertices - rec.cellids = cellids - - return rec - - def _intersect_linestring_shapely(self, shp, keepzerolengths=False, - sort_by_cellid=True): - """intersect with LineString or MultiLineString. - - Parameters - ---------- - shp : shapely.geometry.LineString or MultiLineString - LineString to intersect with the grid - keepzerolengths : bool, optional - keep linestrings with length zero, default is False - sort_by_cellid : bool, optional - flag whether to sort cells by id, used to ensure node - with lowest id is returned, by default True - - Returns - ------- - numpy.recarray - a record array containing information about the intersection - """ - # query grid - qresult = self.query_grid(shp) - # filter result further if possible (only strtree and filter methods) - qfiltered = self.filter_query_result(qresult, shp) - # sort cells to ensure lowest cell ids are returned - if sort_by_cellid: - qfiltered = self.sort_gridshapes(qfiltered) - - # initialize empty lists for storing results - isectshp = [] - cellids = [] - vertices = [] - lengths = [] - - # loop over cells returned by filtered spatial query - for r in qfiltered: - name = r.name - # do intersection - intersect = shp.intersection(r) - # parse result - collection = parse_shapely_ix_result( - [], intersect, shptyps=["LineString", "MultiLineString"]) - # loop over intersection result and store information - for c in collection: - verts = c.__geo_interface__["coordinates"] - # test if linestring was already processed (if on boundary) - if verts in vertices: - continue - # if keep zero don't check length - if not keepzerolengths: - if c.length == 0.: - continue - isectshp.append(c) - lengths.append(c.length) - vertices.append(verts) - cellids.append(name) - - rec = np.recarray(len(isectshp), - names=["cellids", "vertices", "lengths", "ixshapes"], - formats=["O", "O", "f8", "O"]) - rec.ixshapes = isectshp - rec.vertices = vertices - rec.lengths = lengths - rec.cellids = cellids - - return rec - - def _intersect_polygon_shapely(self, shp, sort_by_cellid=True): - """intersect with Polygon or MultiPolygon. - - Parameters - ---------- - shp : shapely.geometry.Polygon or MultiPolygon - shape to intersect with the grid - sort_by_cellid : bool, optional - flag whether to sort cells by id, used to ensure node - with lowest id is returned, by default True - - Returns - ------- - numpy.recarray - a record array containing information about the intersection - """ - # query grid - qresult = self.query_grid(shp) - # filter result further if possible (only strtree and filter methods) - qfiltered = self.filter_query_result(qresult, shp) - # sort cells to ensure lowest cell ids are returned - if sort_by_cellid: - qfiltered = self.sort_gridshapes(qfiltered) - - isectshp = [] - cellids = [] - vertices = [] - areas = [] - - # loop over cells returned by filtered spatial query - for r in qfiltered: - name = r.name - # do intersection - intersect = shp.intersection(r) - # parse result - collection = parse_shapely_ix_result( - [], intersect, shptyps=["Polygon", "MultiPolygon"]) - # loop over intersection result and store information - for c in collection: - # don't store intersections with 0 area - if c.area == 0.: - continue - verts = c.__geo_interface__["coordinates"] - isectshp.append(c) - areas.append(c.area) - vertices.append(verts) - cellids.append(name) - - rec = np.recarray(len(isectshp), - names=["cellids", "vertices", "areas", "ixshapes"], - formats=["O", "O", "f8", "O"]) - rec.ixshapes = isectshp - rec.vertices = vertices - rec.areas = areas - rec.cellids = cellids - - return rec - - def intersects(self, shp): - """Return cellIDs for shapes that intersect with shape. - - Parameters - ---------- - shp : shapely.geometry - shape to intersect with the grid - - Returns - ------- - rec : numpy.recarray - a record array containing cell IDs of the gridcells - the shape intersects with - """ - # query grid - qresult = self.query_grid(shp) - # filter result further if possible (only strtree and filter methods) - qfiltered = self.filter_query_result(qresult, shp) - # get cellids - cids = [cell.name for cell in qfiltered] - # build rec-array - rec = np.recarray(len(cids), - names=["cellids"], - formats=["O"]) - rec.cellids = cids - return rec - - def _intersect_point_structured(self, shp): - """intersection method for intersecting points with structured grids. - - Parameters - ---------- - shp : shapely.geometry.Point or MultiPoint - point shape to intersect with grid - - Returns - ------- - numpy.recarray - a record array containing information about the intersection - """ - nodelist = [] - - Xe, Ye = self.mfgrid.xyedges - - try: - iter(shp) - except TypeError: - shp = [shp] - - ixshapes = [] - for p in shp: - # if grid is rotated or offset transform point to local coords - if (self.mfgrid.angrot != 0. or self.mfgrid.xoffset != 0. - or self.mfgrid.yoffset != 0.): - rx, ry = transform(p.x, p.y, self.mfgrid.xoffset, - self.mfgrid.yoffset, - self.mfgrid.angrot_radians, - inverse=True) - else: - rx = p.x - ry = p.y - - # two dimensional point - jpos = ModflowGridIndices.find_position_in_array(Xe, rx) - ipos = ModflowGridIndices.find_position_in_array(Ye, ry) - - if jpos is not None and ipos is not None: - nodelist.append((ipos, jpos)) - ixshapes.append(p) - - # three dimensional point - if p._ndim == 3: - # find k - kpos = ModflowGridIndices.find_position_in_array( - self.mfgrid.botm[:, ipos, jpos], p.z) - if kpos is not None: - nodelist.append((kpos, ipos, jpos)) - - # remove duplicates - tempnodes = [] - tempshapes = [] - for node, ixs in zip(nodelist, ixshapes): - if node not in tempnodes: - tempnodes.append(node) - tempshapes.append(ixs) - else: - # TODO: not sure if this is correct - tempshapes[-1] = MultiPoint([tempshapes[-1], ixs]) - - ixshapes = tempshapes - nodelist = tempnodes - - rec = np.recarray(len(nodelist), names=["cellids", "ixshapes"], - formats=["O", "O"]) - rec.cellids = nodelist - rec.ixshapes = ixshapes - return rec - - def _intersect_linestring_structured(self, shp, keepzerolengths=False): - """method for intersecting linestrings with structured grids. - - Parameters - ---------- - shp : shapely.geometry.Linestring or MultiLineString - linestring to intersect with grid - keepzerolengths : bool, optional - if True keep intersection results with length=0, in - other words, grid cells the linestring does not cross - but does touch, by default False - - Returns - ------- - numpy.recarray - a record array containing information about the intersection - """ - # get local extent of grid - if (self.mfgrid.angrot != 0. or self.mfgrid.xoffset != 0. - or self.mfgrid.yoffset != 0.): - xmin = np.min(self.mfgrid.xyedges[0]) - xmax = np.max(self.mfgrid.xyedges[0]) - ymin = np.min(self.mfgrid.xyedges[1]) - ymax = np.max(self.mfgrid.xyedges[1]) - else: - xmin, xmax, ymin, ymax = self.mfgrid.extent - pl = box(xmin, ymin, xmax, ymax) - - # rotate and translate linestring to local coords - if (self.mfgrid.xoffset != 0. or self.mfgrid.yoffset != 0.): - shp = translate(shp, xoff=-self.mfgrid.xoffset, - yoff=-self.mfgrid.yoffset) - if self.mfgrid.angrot != 0.: - shp = rotate(shp, -self.mfgrid.angrot, origin=(0., 0.)) - - # clip line to mfgrid bbox - lineclip = shp.intersection(pl) - - if lineclip.length == 0.: # linestring does not intersect modelgrid - return np.recarray(0, names=["cellids", "vertices", - "lengths", "ixshapes"], - formats=["O", "O", "f8", "O"]) - if lineclip.geom_type == 'MultiLineString': # there are multiple lines - nodelist, lengths, vertices = [], [], [] - ixshapes = [] - for ls in lineclip: - n, l, v, ix = self._get_nodes_intersecting_linestring(ls) - nodelist += n - lengths += l - # if necessary, transform coordinates back to real - # world coordinates - if (self.mfgrid.angrot != 0. or self.mfgrid.xoffset != 0. - or self.mfgrid.yoffset != 0.): - v_realworld = [] - for pt in v: - rx, ry = transform([pt[0]], [pt[1]], - self.mfgrid.xoffset, - self.mfgrid.yoffset, - self.mfgrid.angrot_radians, - inverse=False) - v_realworld.append([rx, ry]) - ix_realworld = rotate( - ix, self.mfgrid.angrot, origin=(0., 0.)) - ix_realworld = translate( - ix_realworld, self.mfgrid.xoffset, self.mfgrid.yoffset) - else: - v_realworld = v - ix_realworld = ix - vertices += v_realworld - ixshapes += ix_realworld - else: # linestring is fully within grid - nodelist, lengths, vertices, ixshapes = \ - self._get_nodes_intersecting_linestring( - lineclip) - # if necessary, transform coordinates back to real - # world coordinates - if (self.mfgrid.angrot != 0. or self.mfgrid.xoffset != 0. - or self.mfgrid.yoffset != 0.): - v_realworld = [] - for pt in vertices: - rx, ry = transform([pt[0]], [pt[1]], self.mfgrid.xoffset, - self.mfgrid.yoffset, - self.mfgrid.angrot_radians, - inverse=False) - v_realworld.append([rx, ry]) - vertices = v_realworld - - ix_shapes_realworld = [] - for ixs in ixshapes: - ixs = rotate(ixs, self.mfgrid.angrot, origin=(0., 0.)) - ixs = translate(ixs, self.mfgrid.xoffset, - self.mfgrid.yoffset) - ix_shapes_realworld.append(ixs) - ixshapes = ix_shapes_realworld - - # bundle linestrings in same cell - tempnodes = [] - templengths = [] - tempverts = [] - tempshapes = [] - unique_nodes = list(set(nodelist)) - if len(unique_nodes) < len(nodelist): - for inode in unique_nodes: - templengths.append( - sum([l for l, i in zip(lengths, nodelist) if i == inode])) - tempverts.append( - [v for v, i in zip(vertices, nodelist) if i == inode]) - tempshapes.append( - [ix for ix, i in zip(ixshapes, nodelist) if i == inode]) - - nodelist = unique_nodes - lengths = templengths - vertices = tempverts - ixshapes = tempshapes - - # eliminate any nodes that have a zero length - if not keepzerolengths: - tempnodes = [] - templengths = [] - tempverts = [] - tempshapes = [] - for i, _ in enumerate(nodelist): - if lengths[i] > 0: - tempnodes.append(nodelist[i]) - templengths.append(lengths[i]) - tempverts.append(vertices[i]) - tempshapes.append(ixshapes[i]) - nodelist = tempnodes - lengths = templengths - vertices = tempverts - ixshapes = tempshapes - - rec = np.recarray(len(nodelist), - names=["cellids", "vertices", "lengths", "ixshapes"], - formats=["O", "O", "f8", "O"]) - rec.vertices = vertices - rec.lengths = lengths - rec.cellids = nodelist - rec.ixshapes = ixshapes - - return rec - - def _get_nodes_intersecting_linestring(self, linestring): - """helper function, intersect the linestring with the a structured grid - and return a list of node indices and the length of the line in that - node. - - Parameters - ---------- - linestring: shapely.geometry.LineString or MultiLineString - shape to intersect with the grid - - Returns - ------- - nodelist, lengths, vertices: lists - lists containing node ids, lengths of intersects and the - start and end points of the intersects - """ - nodelist = [] - lengths = [] - vertices = [] - ixshapes = [] - - # start at the beginning of the line - x, y = linestring.xy - - # linestring already in local coords but - # because intersect_point does transform again - # we transform back to real world here if necessary - if (self.mfgrid.angrot != 0. or self.mfgrid.xoffset != 0. - or self.mfgrid.yoffset != 0.): - x0, y0 = transform([x[0]], [y[0]], self.mfgrid.xoffset, - self.mfgrid.yoffset, self.mfgrid.angrot_radians, - inverse=False) - else: - x0 = [x[0]] - y0 = [y[0]] - - (i, j) = self.intersect_point(Point(x0[0], y0[0])).cellids[0] - Xe, Ye = self.mfgrid.xyedges - xmin = Xe[j] - xmax = Xe[j + 1] - ymax = Ye[i] - ymin = Ye[i + 1] - pl = box(xmin, ymin, xmax, ymax) - intersect = linestring.intersection(pl) - # if linestring starts in cell, exits, and re-enters - # a MultiLineString is returned. - ixshapes.append(intersect) - length = intersect.length - lengths.append(length) - if intersect.geom_type == "MultiLineString": - x, y = [], [] - for igeom in intersect.geoms: - x.append(igeom.xy[0]) - y.append(igeom.xy[1]) - x = np.concatenate(x) - y = np.concatenate(y) - else: - x = intersect.xy[0] - y = intersect.xy[1] - verts = [(ixy[0], ixy[1]) for ixy in zip(x, y)] - vertices.append(verts) - nodelist.append((i, j)) - - n = 0 - while True: - (i, j) = nodelist[n] - node, length, verts, ixshape = \ - self._check_adjacent_cells_intersecting_line( - linestring, (i, j), nodelist) - - for inode, ilength, ivert, ix in zip(node, length, verts, ixshape): - if inode is not None: - if ivert not in vertices: - nodelist.append(inode) - lengths.append(ilength) - vertices.append(ivert) - ixshapes.append(ix) - - if n == len(nodelist) - 1: - break - n += 1 - - return nodelist, lengths, vertices, ixshapes - - def _check_adjacent_cells_intersecting_line(self, linestring, i_j, - nodelist): - """helper method that follows a line through a structured grid. - - Parameters - ---------- - linestring : shapely.geometry.LineString - shape to intersect with the grid - i_j : tuple - tuple containing (nrow, ncol) - nodelist : list of tuples - list of node ids that have already been added - as intersections - - Returns - ------- - node, length, verts: lists - lists containing nodes, lengths and vertices of - intersections with adjacent cells relative to the - current cell (i, j) - """ - i, j = i_j - - Xe, Ye = self.mfgrid.xyedges - - node = [] - length = [] - verts = [] - ixshape = [] - - # check to left - if j > 0: - ii = i - jj = j - 1 - if (ii, jj) not in nodelist: - xmin = Xe[jj] - xmax = Xe[jj + 1] - ymax = Ye[ii] - ymin = Ye[ii + 1] - pl = box(xmin, ymin, xmax, ymax) - if linestring.intersects(pl): - intersect = linestring.intersection(pl) - ixshape.append(intersect) - length.append(intersect.length) - if intersect.geom_type == "MultiLineString": - x, y = [], [] - for igeom in intersect.geoms: - x.append(igeom.xy[0]) - y.append(igeom.xy[1]) - x = np.concatenate(x) - y = np.concatenate(y) - else: - x = intersect.xy[0] - y = intersect.xy[1] - verts.append([(ixy[0], ixy[1]) - for ixy in zip(*intersect.xy)]) - node.append((ii, jj)) - - # check to right - if j < self.mfgrid.ncol - 1: - ii = i - jj = j + 1 - if (ii, jj) not in nodelist: - xmin = Xe[jj] - xmax = Xe[jj + 1] - ymax = Ye[ii] - ymin = Ye[ii + 1] - pl = box(xmin, ymin, xmax, ymax) - if linestring.intersects(pl): - intersect = linestring.intersection(pl) - ixshape.append(intersect) - length.append(intersect.length) - if intersect.geom_type == "MultiLineString": - x, y = [], [] - for igeom in intersect.geoms: - x.append(igeom.xy[0]) - y.append(igeom.xy[1]) - x = np.concatenate(x) - y = np.concatenate(y) - else: - x = intersect.xy[0] - y = intersect.xy[1] - verts.append([(ixy[0], ixy[1]) - for ixy in zip(*intersect.xy)]) - node.append((ii, jj)) - - # check to back - if i > 0: - ii = i - 1 - jj = j - if (ii, jj) not in nodelist: - xmin = Xe[jj] - xmax = Xe[jj + 1] - ymax = Ye[ii] - ymin = Ye[ii + 1] - pl = box(xmin, ymin, xmax, ymax) - if linestring.intersects(pl): - intersect = linestring.intersection(pl) - ixshape.append(intersect) - length.append(intersect.length) - if intersect.geom_type == "MultiLineString": - x, y = [], [] - for igeom in intersect.geoms: - x.append(igeom.xy[0]) - y.append(igeom.xy[1]) - x = np.concatenate(x) - y = np.concatenate(y) - else: - x = intersect.xy[0] - y = intersect.xy[1] - verts.append([(ixy[0], ixy[1]) for ixy in - zip(*intersect.xy)]) - node.append((ii, jj)) - - # check to front - if i < self.mfgrid.nrow - 1: - ii = i + 1 - jj = j - if (ii, jj) not in nodelist: - xmin = Xe[jj] - xmax = Xe[jj + 1] - ymax = Ye[ii] - ymin = Ye[ii + 1] - pl = box(xmin, ymin, xmax, ymax) - if linestring.intersects(pl): - intersect = linestring.intersection(pl) - ixshape.append(intersect) - length.append(intersect.length) - if intersect.geom_type == "MultiLineString": - x, y = [], [] - for igeom in intersect.geoms: - x.append(igeom.xy[0]) - y.append(igeom.xy[1]) - x = np.concatenate(x) - y = np.concatenate(y) - else: - x = intersect.xy[0] - y = intersect.xy[1] - verts.append([(ixy[0], ixy[1]) for ixy in zip(x, y)]) - node.append((ii, jj)) - - return node, length, verts, ixshape - - def _intersect_rectangle_structured(self, rectangle): - """intersect a rectangle with a structured grid to retrieve node ids of - intersecting grid cells. - - Note: only works in local coordinates (i.e. non-rotated grid - with origin at (0, 0)) - - Parameters - ---------- - rectangle : list of tuples - list of lower-left coordinate and upper-right - coordinate: [(xmin, ymin), (xmax, ymax)] - - Returns - ------- - nodelist: list of tuples - list of tuples containing node ids with which - the rectangle intersects - """ - - nodelist = [] - - # return if rectangle does not contain any cells - if (self.mfgrid.angrot != 0. or self.mfgrid.xoffset != 0. - or self.mfgrid.yoffset != 0.): - minx = np.min(self.mfgrid.xyedges[0]) - maxx = np.max(self.mfgrid.xyedges[0]) - miny = np.min(self.mfgrid.xyedges[1]) - maxy = np.max(self.mfgrid.xyedges[1]) - local_extent = [minx, maxx, miny, maxy] - else: - local_extent = self.mfgrid.extent - - xmin, xmax, ymin, ymax = local_extent - bgrid = box(xmin, ymin, xmax, ymax) - (rxmin, rymin), (rxmax, rymax) = rectangle - b = box(rxmin, rymin, rxmax, rymax) - - if not b.intersects(bgrid): - # return with nodelist as an empty list - return [] - - Xe, Ye = self.mfgrid.xyedges - - jmin = ModflowGridIndices.find_position_in_array(Xe, xmin) - if jmin is None: - if xmin <= Xe[0]: - jmin = 0 - elif xmin >= Xe[-1]: - jmin = self.mfgrid.ncol - 1 - - jmax = ModflowGridIndices.find_position_in_array(Xe, xmax) - if jmax is None: - if xmax <= Xe[0]: - jmax = 0 - elif xmax >= Xe[-1]: - jmax = self.mfgrid.ncol - 1 - - imin = ModflowGridIndices.find_position_in_array(Ye, ymax) - if imin is None: - if ymax >= Ye[0]: - imin = 0 - elif ymax <= Ye[-1]: - imin = self.mfgrid.nrow - 1 - - imax = ModflowGridIndices.find_position_in_array(Ye, ymin) - if imax is None: - if ymin >= Ye[0]: - imax = 0 - elif ymin <= Ye[-1]: - imax = self.mfgrid.nrow - 1 - - for i in range(imin, imax + 1): - for j in range(jmin, jmax + 1): - nodelist.append((i, j)) - - return nodelist - - def _intersect_polygon_structured(self, shp): - """intersect polygon with a structured grid. Uses bounding box of the - Polygon to limit search space. - - Notes - ----- - If performance is slow, try setting the method to 'vertex' - in the GridIntersect object. For polygons this is often - faster. - - Parameters - ---------- - shp : shapely.geometry.Polygon - polygon to intersect with the grid - - Returns - ------- - numpy.recarray - a record array containing information about the intersection - """ - - # initialize the result lists - nodelist = [] - areas = [] - vertices = [] - ixshapes = [] - - # transform polygon to local grid coordinates - if (self.mfgrid.xoffset != 0. or self.mfgrid.yoffset != 0.): - shp = translate(shp, xoff=-self.mfgrid.xoffset, - yoff=-self.mfgrid.yoffset) - if self.mfgrid.angrot != 0.: - shp = rotate(shp, -self.mfgrid.angrot, origin=(0., 0.)) - - # use the bounds of the polygon to restrict the cell search - minx, miny, maxx, maxy = shp.bounds - rectangle = ((minx, miny), (maxx, maxy)) - nodes = self._intersect_rectangle_structured(rectangle) - - for (i, j) in nodes: - if (self.mfgrid.angrot != 0. or self.mfgrid.xoffset != 0. - or self.mfgrid.yoffset != 0.): - cell_coords = [(self.mfgrid.xyedges[0][j], - self.mfgrid.xyedges[1][i]), - (self.mfgrid.xyedges[0][j + 1], - self.mfgrid.xyedges[1][i]), - (self.mfgrid.xyedges[0][j + 1], - self.mfgrid.xyedges[1][i + 1]), - (self.mfgrid.xyedges[0][j], - self.mfgrid.xyedges[1][i + 1])] - else: - cell_coords = self.mfgrid.get_cell_vertices(i, j) - node_polygon = Polygon(cell_coords) - if shp.intersects(node_polygon): - intersect = shp.intersection(node_polygon) - if intersect.area > 0.: - nodelist.append((i, j)) - areas.append(intersect.area) - - # if necessary, transform coordinates back to real - # world coordinates - if (self.mfgrid.angrot != 0. or self.mfgrid.xoffset != 0. - or self.mfgrid.yoffset != 0.): - v_realworld = [] - if intersect.geom_type.startswith("Multi"): - for ipoly in intersect: - v_realworld += \ - self._transform_geo_interface_polygon( - ipoly) - else: - v_realworld += \ - self._transform_geo_interface_polygon( - intersect) - intersect_realworld = rotate(intersect, - self.mfgrid.angrot, - origin=(0., 0.)) - intersect_realworld = translate(intersect_realworld, - self.mfgrid.xoffset, - self.mfgrid.yoffset) - else: - v_realworld = intersect.__geo_interface__[ - "coordinates"] - intersect_realworld = intersect - ixshapes.append(intersect_realworld) - vertices.append(v_realworld) - - rec = np.recarray(len(nodelist), - names=["cellids", "vertices", "areas", "ixshapes"], - formats=["O", "O", "f8", "O"]) - rec.vertices = vertices - rec.areas = areas - rec.cellids = nodelist - rec.ixshapes = ixshapes - - return rec - - def _transform_geo_interface_polygon(self, polygon): - """Internal method, helper function to transform geometry - __geo_interface__. - - Used for translating intersection result coordinates back into - real-world coordinates. - - Parameters - ---------- - polygon : shapely.geometry.Polygon - polygon to transform coordinates for - - Returns - ------- - geom_list : list - list containing transformed coordinates in same structure as - the original __geo_interface__. - """ - - if polygon.geom_type.startswith("Multi"): - raise TypeError("Does not support Multi geometries!") - - geom_list = [] - for coords in polygon.__geo_interface__["coordinates"]: - geoms = [] - try: - # test depth of list/tuple - _ = coords[0][0][0] - if len(coords) == 2: - shell, holes = coords - else: - raise ValueError("Cannot parse __geo_interface__") - except TypeError: - shell = coords - holes = None - except Exception as e: - raise e - # transform shell coordinates - shell_pts = [] - for pt in shell: - rx, ry = transform([pt[0]], [pt[1]], - self.mfgrid.xoffset, - self.mfgrid.yoffset, - self.mfgrid.angrot_radians, - inverse=False) - shell_pts.append((rx, ry)) - geoms.append(shell_pts) - # transform holes coordinates if necessary - if holes: - holes_pts = [] - for pt in holes: - rx, ry = transform([pt[0]], [pt[1]], - self.mfgrid.xoffset, - self.mfgrid.yoffset, - self.mfgrid.angrot_radians, - inverse=False) - holes_pts.append((rx, ry)) - geoms.append(holes_pts) - # append (shells, holes) to transformed coordinates list - geom_list.append(tuple(geoms)) - return geom_list - - @staticmethod - def plot_polygon(rec, ax=None, **kwargs): - """method to plot the polygon intersection results from the resulting - numpy.recarray. - - Note: only works when recarray has 'intersects' column! - - Parameters - ---------- - rec : numpy.recarray - record array containing intersection results - (the resulting shapes) - ax : matplotlib.pyplot.axes, optional - axes to plot onto, if not provided, creates a new figure - **kwargs: - passed to the plot function - - Returns - ------- - ax: matplotlib.pyplot.axes - returns the axes handle - """ - try: - from descartes import PolygonPatch - except ModuleNotFoundError: - msg = 'descartes package needed for plotting polygons' - if plt is None: - msg = 'matplotlib and descartes packages needed for ' + \ - 'plotting polygons' - raise ModuleNotFoundError(msg) - - if plt is None: - msg = 'matplotlib package needed for plotting polygons' - raise ModuleNotFoundError(msg) - - if ax is None: - _, ax = plt.subplots() - - for i, ishp in enumerate(rec.ixshapes): - if "facecolor" in kwargs: - fc = kwargs.pop("facecolor") - else: - fc = "C{}".format(i % 10) - ppi = PolygonPatch(ishp, facecolor=fc, **kwargs) - ax.add_patch(ppi) - - return ax - - @staticmethod - def plot_linestring(rec, ax=None, **kwargs): - """method to plot the linestring intersection results from the - resulting numpy.recarray. - - Note: only works when recarray has 'intersects' column! - - Parameters - ---------- - rec : numpy.recarray - record array containing intersection results - (the resulting shapes) - ax : matplotlib.pyplot.axes, optional - axes to plot onto, if not provided, creates a new figure - **kwargs: - passed to the plot function - - Returns - ------- - ax: matplotlib.pyplot.axes - returns the axes handle - """ - if plt is None: - msg = 'matplotlib package needed for plotting polygons' - raise ModuleNotFoundError(msg) - - if ax is None: - _, ax = plt.subplots() - - for i, ishp in enumerate(rec.ixshapes): - if "c" in kwargs: - c = kwargs.pop("c") - elif "color" in kwargs: - c = kwargs.pop("color") - else: - c = "C{}".format(i % 10) - if ishp.type == "MultiLineString": - for part in ishp: - ax.plot(part.xy[0], part.xy[1], ls="-", - c=c, **kwargs) - else: - ax.plot(ishp.xy[0], ishp.xy[1], ls="-", - c=c, **kwargs) - - return ax - - @staticmethod - def plot_point(rec, ax=None, **kwargs): - """method to plot the point intersection results from the resulting - numpy.recarray. - - Note: only works when recarray has 'intersects' column! - - Parameters - ---------- - rec : numpy.recarray - record array containing intersection results - ax : matplotlib.pyplot.axes, optional - axes to plot onto, if not provided, creates a new figure - **kwargs: - passed to the scatter function - - Returns - ------- - ax: matplotlib.pyplot.axes - returns the axes handle - """ - if plt is None: - msg = 'matplotlib package needed for plotting polygons' - raise ModuleNotFoundError(msg) - - if ax is None: - _, ax = plt.subplots() - - x, y = [], [] - geo_coll = GeometryCollection(list(rec.ixshapes)) - collection = parse_shapely_ix_result([], geo_coll, ["Point"]) - for c in collection: - x.append(c.x) - y.append(c.y) - ax.scatter(x, y, **kwargs) - - return ax - - -class ModflowGridIndices: - """Collection of methods that can be used to find cell indices for a - structured, but irregularly spaced MODFLOW grid.""" - - @staticmethod - def find_position_in_array(arr, x): - """If arr has x positions for the left edge of a cell, then return the - cell index containing x. - - Parameters - ---------- - arr : A one dimensional array (such as Xe) that contains - coordinates for the left cell edge. - - x : float - The x position to find in arr. - """ - jpos = None - - if x == arr[-1]: - return len(arr) - 2 - - if x < min(arr[0], arr[-1]): - return None - - if x > max(arr[0], arr[-1]): - return None - - # go through each position - for j in range(len(arr) - 1): - xl = arr[j] - xr = arr[j + 1] - frac = (x - xl) / (xr - xl) - if 0. <= frac <= 1.0: - # if min(xl, xr) <= x < max(xl, xr): - jpos = j - return jpos - - return jpos - - @staticmethod - def kij_from_nodenumber(nodenumber, nlay, nrow, ncol): - """Convert the modflow node number to a zero-based layer, row and - column format. Return (k0, i0, j0). - - Parameters - ---------- - nodenumber: int - The cell nodenumber, ranging from 1 to number of - nodes. - nlay: int - The number of layers. - nrow: int - The number of rows. - ncol: int - The number of columns. - """ - if nodenumber > nlay * nrow * ncol: - raise Exception('Error in function kij_from_nodenumber...') - n = nodenumber - 1 - k = int(n / nrow / ncol) - i = int((n - k * nrow * ncol) / ncol) - j = n - k * nrow * ncol - i * ncol - return (k, i, j) - - @staticmethod - def nodenumber_from_kij(k, i, j, nrow, ncol): - """Calculate the nodenumber using the zero-based layer, row, and column - values. The first node has a value of 1. - - Parameters - ---------- - k : int - The model layer number as a zero-based value. - i : int - The model row number as a zero-based value. - j : int - The model column number as a zero-based value. - nrow : int - The number of model rows. - ncol : int - The number of model columns. - """ - return k * nrow * ncol + i * ncol + j + 1 - - @staticmethod - def nn0_from_kij(k, i, j, nrow, ncol): - """Calculate the zero-based nodenumber using the zero-based layer, row, - and column values. The first node has a value of 0. - - Parameters - ---------- - k : int - The model layer number as a zero-based value. - i : int - The model row number as a zero-based value. - j : int - The model column number as a zero-based value. - nrow : int - The number of model rows. - ncol : int - The number of model columns. - """ - return k * nrow * ncol + i * ncol + j - - @staticmethod - def kij_from_nn0(n, nlay, nrow, ncol): - """Convert the node number to a zero-based layer, row and column - format. Return (k0, i0, j0). - - Parameters - ---------- - nodenumber : int - The cell nodenumber, ranging from 0 to number of - nodes - 1. - nlay : int - The number of layers. - nrow : int - The number of rows. - ncol : int - The number of columns. - """ - if n > nlay * nrow * ncol: - raise Exception('Error in function kij_from_nodenumber...') - k = int(n / nrow / ncol) - i = int((n - k * nrow * ncol) / ncol) - j = n - k * nrow * ncol - i * ncol - return (k, i, j) +import numpy as np +try: + import matplotlib.pyplot as plt +except ModuleNotFoundError: + plt = None + +from .geometry import transform + +try: + from shapely.geometry import (MultiPoint, Point, Polygon, box, + GeometryCollection) + from shapely.strtree import STRtree + from shapely.affinity import translate, rotate + from shapely.prepared import prep + shply = True +except ModuleNotFoundError: + shply = False + + +def parse_shapely_ix_result(collection, ix_result, shptyps=None): + """Recursive function for parsing shapely intersection results. Returns a + list of shapely shapes matching shptyp. + + Parameters + ---------- + collection : list + state variable for storing result, generally + an empty list + ix_result : shapely.geometry type + any shapely intersection result + shptyp : str, list of str, or None, optional + if None (default), return all types of shapes. + if str, return shapes of that type, if list of str, + return all types in list + + Returns + ------- + collection : list + list containing shapely geometries of type shptyp + """ + # convert shptyps to list if needed + if isinstance(shptyps, str): + shptyps = [shptyps] + elif shptyps is None: + shptyps = [None] + + # if empty + if ix_result.is_empty: + return collection + # base case: geom_type is partial or exact match to shptyp + elif ix_result.geom_type in shptyps: + collection.append(ix_result) + return collection + # recursion for collections + elif hasattr(ix_result, "geoms"): + for ishp in ix_result: + parse_shapely_ix_result(collection, ishp, shptyps=shptyps) + # if collecting all types + elif shptyps[0] is None: + return collection.append(ix_result) + return collection + + +class GridIntersect: + """Class for intersecting shapely shapes (Point, Linestring, Polygon, or + their Multi variants) with MODFLOW grids. Contains optimized search + routines for structured grids. + + Notes + ----- + - The STR-tree query is based on the bounding box of the shape or + collection, if the bounding box of the shape covers nearly the entire + grid, the query won't be able to limit the search space much, resulting + in slower performance. Therefore, it can sometimes be faster to + intersect each individual shape in a collection than it is to intersect + with the whole collection at once. + - Building the STR-tree can take a while for large grids. Once built the + intersect routines (for individual shapes) should be pretty fast. It + is possible to perform intersects without building the STR-tree by + setting `rtree=False`. + - The optimized routines for structured grids will often outperform + the shapely routines because of the reduced overhead of building and + parsing the STR-tree. However, for polygons the STR-tree implementation + is often faster than the optimized structured routines, especially + for larger grids. + """ + + def __init__(self, mfgrid, method=None, rtree=True): + """Intersect shapes (Point, Linestring, Polygon) with a modflow grid. + + Parameters + ---------- + mfgrid : flopy modflowgrid + MODFLOW grid as implemented in flopy + method : str, optional + default is None, which determines intersection method based on + the grid type. Options are either 'vertex' which uses shapely + interesection operations or 'structured' which uses optimized + methods that only work for structured grids + rtree : bool, optional + whether to build an STR-Tree, default is True. If False no + STR-tree is built (which saves some time), but intersects will + loop through all model gridcells (which is generally slower). + Only read when `method='vertex'`. + """ + if not shply: + msg = ("Shapely is needed for grid intersect operations! " + "Please install shapely if you need to use grid intersect " + "functionality.") + raise ModuleNotFoundError(msg) + + self.mfgrid = mfgrid + if method is None: + # determine method from grid_type + self.method = self.mfgrid.grid_type + else: + # set method + self.method = method + self.rtree = rtree + + if self.method == "vertex": + # set method to get gridshapes depending on grid type + self._set_method_get_gridshapes() + + # build STR-tree if specified + if self.rtree: + self.strtree = STRtree(self._get_gridshapes()) + + # set interesection methods + self.intersect_point = self._intersect_point_shapely + self.intersect_linestring = self._intersect_linestring_shapely + self.intersect_polygon = self._intersect_polygon_shapely + + elif self.method == "structured" and mfgrid.grid_type == "structured": + self.intersect_point = self._intersect_point_structured + self.intersect_linestring = self._intersect_linestring_structured + self.intersect_polygon = self._intersect_polygon_structured + + else: + raise NotImplementedError( + "Method '{0}' not recognized!".format) + + def _set_method_get_gridshapes(self): + """internal method, set self._get_gridshapes to the certain method for + obtaining gridcells.""" + # Set method for obtaining grid shapes + if self.mfgrid.grid_type == "structured": + self._get_gridshapes = self._rect_grid_to_shape_generator + elif self.mfgrid.grid_type == "vertex": + self._get_gridshapes = self._vtx_grid_to_shape_generator + elif self.mfgrid.grid_type == "unstructured": + raise NotImplementedError() + + def _rect_grid_to_shape_generator(self): + """internal method, generator yielding shapely polygons for structured + grid cells. + + Returns + ------- + generator : + generator of shapely Polygons + """ + for i in range(self.mfgrid.nrow): + for j in range(self.mfgrid.ncol): + xy = self.mfgrid.get_cell_vertices(i, j) + p = Polygon(xy) + p.name = (i, j) + yield p + + def _usg_grid_to_shape_generator(self): + """internal method, convert unstructred grid to list of shapely + polygons. + + Returns + ------- + list + list of shapely Polygons + """ + raise NotImplementedError() + + def _vtx_grid_to_shape_generator(self): + """internal method, generator yielding shapely polygons for vertex + grids. + + Returns + ------- + generator : + generator of shapely Polygons + """ + # for cell2d rec-arrays + if isinstance(self.mfgrid._cell2d, np.recarray): + for icell in self.mfgrid._cell2d.icell2d: + points = [] + icverts = ["icvert_{}".format(i) for i in + range(self.mfgrid._cell2d["ncvert"][icell])] + for iv in self.mfgrid._cell2d[icverts][icell]: + points.append((self.mfgrid._vertices.xv[iv], + self.mfgrid._vertices.yv[iv])) + # close the polygon, if necessary + if points[0] != points[-1]: + points.append(points[0]) + p = Polygon(points) + p.name = icell + yield p + # for cell2d lists + elif isinstance(self.mfgrid._cell2d, list): + for icell in range(len(self.mfgrid._cell2d)): + points = [] + for iv in self.mfgrid._cell2d[icell][-3:]: + points.append((self.mfgrid._vertices[iv][1], + self.mfgrid._vertices[iv][2])) + # close the polygon, if necessary + if points[0] != points[-1]: + points.append(points[0]) + p = Polygon(points) + p.name = icell + yield p + + def _rect_grid_to_shape_list(self): + """internal method, list of shapely polygons for structured grid cells. + + Returns + ------- + list : + list of shapely Polygons + """ + return list(self._rect_grid_to_shape_generator()) + + def _usg_grid_to_shape_list(self): + """internal method, convert unstructred grid to list of shapely + polygons. + + Returns + ------- + list + list of shapely Polygons + """ + raise NotImplementedError() + + def _vtx_grid_to_shape_list(self): + """internal method, list of shapely polygons for vertex grids. + + Returns + ------- + list : + list of shapely Polygons + """ + return list(self._vtx_grid_to_shape_generator()) + + def query_grid(self, shp): + """Perform spatial query on grid with shapely geometry. If no spatial + query is possible returns all grid cells. + + Parameters + ---------- + shp : shapely.geometry + shapely geometry + + Returns + ------- + list or generator expression + list or generator containing grid cells in query result + """ + if self.rtree: + result = self.strtree.query(shp) + else: + # no spatial query + result = self._get_gridshapes() + return result + + @staticmethod + def filter_query_result(qresult, shp): + """Filter query result to obtain grid cells that intersect with shape. + Used to (further) reduce query result to cells that definitely + intersect with shape. + + Parameters + ---------- + qresult : iterable + query result, iterable of polygons + shp : shapely.geometry + shapely geometry that is prepared and used to filter + query result + + Returns + ------- + qfiltered + filter or generator containing polygons that intersect with shape + """ + # prepare shape for efficient batch intersection check + prepshp = prep(shp) + # get only gridcells that intersect + qfiltered = filter(prepshp.intersects, qresult) + return qfiltered + + @staticmethod + def sort_gridshapes(shape_iter): + """Sort query result by node id. + + Parameters + ---------- + shape_iter : iterable + list or iterable of gridcells + + Returns + ------- + list + sorted list of gridcells + """ + if not isinstance(shape_iter, list): + shapelist = list(shape_iter) + else: + shapelist = shape_iter + + def sort_key(o): + return o.name + shapelist.sort(key=sort_key) + return shapelist + + def _intersect_point_shapely(self, shp, sort_by_cellid=True): + """intersect grid with Point or MultiPoint. + + Parameters + ---------- + shp : Point or MultiPoint + shapely Point or MultiPoint to intersect with grid. Note, + it is generally faster to loop over a MultiPoint and intersect + per point than to intersect a MultiPoint directly. + sort_by_cellid : bool, optional + flag whether to sort cells by id, used to ensure node + with lowest id is returned, by default True + + Returns + ------- + numpy.recarray + a record array containing information about the intersection + """ + # query grid + qresult = self.query_grid(shp) + # prepare shape for efficient batch intersection check + prepshp = prep(shp) + # get only gridcells that intersect + qfiltered = filter(prepshp.intersects, qresult) + + # sort cells to ensure lowest cell ids are returned + if sort_by_cellid: + qfiltered = self.sort_gridshapes(qfiltered) + + isectshp = [] + cellids = [] + vertices = [] + parsed_points = [] # for keeping track of points + + # loop over cells returned by filtered spatial query + for r in qfiltered: + name = r.name + # do intersection + intersect = shp.intersection(r) + # parse result per Point + collection = parse_shapely_ix_result( + [], intersect, shptyps=["Point"]) + # loop over intersection result and store information + cell_verts = [] + cell_shps = [] + for c in collection: + verts = c.__geo_interface__["coordinates"] + # avoid returning multiple cells for points on boundaries + if verts in parsed_points: + continue + parsed_points.append(verts) + cell_shps.append(c) # collect only new points + cell_verts.append(verts) + # if any new ix found + if len(cell_shps) > 0: + # combine new points in MultiPoint + isectshp.append(MultiPoint(cell_shps) if len(cell_shps) > 1 + else cell_shps[0]) + vertices.append(tuple(cell_verts)) + cellids.append(name) + + rec = np.recarray(len(isectshp), + names=["cellids", "vertices", "ixshapes"], + formats=["O", "O", "O"]) + rec.ixshapes = isectshp + rec.vertices = vertices + rec.cellids = cellids + + return rec + + def _intersect_linestring_shapely(self, shp, keepzerolengths=False, + sort_by_cellid=True): + """intersect with LineString or MultiLineString. + + Parameters + ---------- + shp : shapely.geometry.LineString or MultiLineString + LineString to intersect with the grid + keepzerolengths : bool, optional + keep linestrings with length zero, default is False + sort_by_cellid : bool, optional + flag whether to sort cells by id, used to ensure node + with lowest id is returned, by default True + + Returns + ------- + numpy.recarray + a record array containing information about the intersection + """ + # query grid + qresult = self.query_grid(shp) + # filter result further if possible (only strtree and filter methods) + qfiltered = self.filter_query_result(qresult, shp) + # sort cells to ensure lowest cell ids are returned + if sort_by_cellid: + qfiltered = self.sort_gridshapes(qfiltered) + + # initialize empty lists for storing results + isectshp = [] + cellids = [] + vertices = [] + lengths = [] + + # loop over cells returned by filtered spatial query + for r in qfiltered: + name = r.name + # do intersection + intersect = shp.intersection(r) + # parse result + collection = parse_shapely_ix_result( + [], intersect, shptyps=["LineString", "MultiLineString"]) + # loop over intersection result and store information + for c in collection: + verts = c.__geo_interface__["coordinates"] + # test if linestring was already processed (if on boundary) + if verts in vertices: + continue + # if keep zero don't check length + if not keepzerolengths: + if c.length == 0.: + continue + isectshp.append(c) + lengths.append(c.length) + vertices.append(verts) + cellids.append(name) + + rec = np.recarray(len(isectshp), + names=["cellids", "vertices", "lengths", "ixshapes"], + formats=["O", "O", "f8", "O"]) + rec.ixshapes = isectshp + rec.vertices = vertices + rec.lengths = lengths + rec.cellids = cellids + + return rec + + def _intersect_polygon_shapely(self, shp, sort_by_cellid=True): + """intersect with Polygon or MultiPolygon. + + Parameters + ---------- + shp : shapely.geometry.Polygon or MultiPolygon + shape to intersect with the grid + sort_by_cellid : bool, optional + flag whether to sort cells by id, used to ensure node + with lowest id is returned, by default True + + Returns + ------- + numpy.recarray + a record array containing information about the intersection + """ + # query grid + qresult = self.query_grid(shp) + # filter result further if possible (only strtree and filter methods) + qfiltered = self.filter_query_result(qresult, shp) + # sort cells to ensure lowest cell ids are returned + if sort_by_cellid: + qfiltered = self.sort_gridshapes(qfiltered) + + isectshp = [] + cellids = [] + vertices = [] + areas = [] + + # loop over cells returned by filtered spatial query + for r in qfiltered: + name = r.name + # do intersection + intersect = shp.intersection(r) + # parse result + collection = parse_shapely_ix_result( + [], intersect, shptyps=["Polygon", "MultiPolygon"]) + # loop over intersection result and store information + for c in collection: + # don't store intersections with 0 area + if c.area == 0.: + continue + verts = c.__geo_interface__["coordinates"] + isectshp.append(c) + areas.append(c.area) + vertices.append(verts) + cellids.append(name) + + rec = np.recarray(len(isectshp), + names=["cellids", "vertices", "areas", "ixshapes"], + formats=["O", "O", "f8", "O"]) + rec.ixshapes = isectshp + rec.vertices = vertices + rec.areas = areas + rec.cellids = cellids + + return rec + + def intersects(self, shp): + """Return cellIDs for shapes that intersect with shape. + + Parameters + ---------- + shp : shapely.geometry + shape to intersect with the grid + + Returns + ------- + rec : numpy.recarray + a record array containing cell IDs of the gridcells + the shape intersects with + """ + # query grid + qresult = self.query_grid(shp) + # filter result further if possible (only strtree and filter methods) + qfiltered = self.filter_query_result(qresult, shp) + # get cellids + cids = [cell.name for cell in qfiltered] + # build rec-array + rec = np.recarray(len(cids), + names=["cellids"], + formats=["O"]) + rec.cellids = cids + return rec + + def _intersect_point_structured(self, shp): + """intersection method for intersecting points with structured grids. + + Parameters + ---------- + shp : shapely.geometry.Point or MultiPoint + point shape to intersect with grid + + Returns + ------- + numpy.recarray + a record array containing information about the intersection + """ + nodelist = [] + + Xe, Ye = self.mfgrid.xyedges + + try: + iter(shp) + except TypeError: + shp = [shp] + + ixshapes = [] + for p in shp: + # if grid is rotated or offset transform point to local coords + if (self.mfgrid.angrot != 0. or self.mfgrid.xoffset != 0. + or self.mfgrid.yoffset != 0.): + rx, ry = transform(p.x, p.y, self.mfgrid.xoffset, + self.mfgrid.yoffset, + self.mfgrid.angrot_radians, + inverse=True) + else: + rx = p.x + ry = p.y + + # two dimensional point + jpos = ModflowGridIndices.find_position_in_array(Xe, rx) + ipos = ModflowGridIndices.find_position_in_array(Ye, ry) + + if jpos is not None and ipos is not None: + nodelist.append((ipos, jpos)) + ixshapes.append(p) + + # three dimensional point + if p._ndim == 3: + # find k + kpos = ModflowGridIndices.find_position_in_array( + self.mfgrid.botm[:, ipos, jpos], p.z) + if kpos is not None: + nodelist.append((kpos, ipos, jpos)) + + # remove duplicates + tempnodes = [] + tempshapes = [] + for node, ixs in zip(nodelist, ixshapes): + if node not in tempnodes: + tempnodes.append(node) + tempshapes.append(ixs) + else: + # TODO: not sure if this is correct + tempshapes[-1] = MultiPoint([tempshapes[-1], ixs]) + + ixshapes = tempshapes + nodelist = tempnodes + + rec = np.recarray(len(nodelist), names=["cellids", "ixshapes"], + formats=["O", "O"]) + rec.cellids = nodelist + rec.ixshapes = ixshapes + return rec + + def _intersect_linestring_structured(self, shp, keepzerolengths=False): + """method for intersecting linestrings with structured grids. + + Parameters + ---------- + shp : shapely.geometry.Linestring or MultiLineString + linestring to intersect with grid + keepzerolengths : bool, optional + if True keep intersection results with length=0, in + other words, grid cells the linestring does not cross + but does touch, by default False + + Returns + ------- + numpy.recarray + a record array containing information about the intersection + """ + # get local extent of grid + if (self.mfgrid.angrot != 0. or self.mfgrid.xoffset != 0. + or self.mfgrid.yoffset != 0.): + xmin = np.min(self.mfgrid.xyedges[0]) + xmax = np.max(self.mfgrid.xyedges[0]) + ymin = np.min(self.mfgrid.xyedges[1]) + ymax = np.max(self.mfgrid.xyedges[1]) + else: + xmin, xmax, ymin, ymax = self.mfgrid.extent + pl = box(xmin, ymin, xmax, ymax) + + # rotate and translate linestring to local coords + if (self.mfgrid.xoffset != 0. or self.mfgrid.yoffset != 0.): + shp = translate(shp, xoff=-self.mfgrid.xoffset, + yoff=-self.mfgrid.yoffset) + if self.mfgrid.angrot != 0.: + shp = rotate(shp, -self.mfgrid.angrot, origin=(0., 0.)) + + # clip line to mfgrid bbox + lineclip = shp.intersection(pl) + + if lineclip.length == 0.: # linestring does not intersect modelgrid + return np.recarray(0, names=["cellids", "vertices", + "lengths", "ixshapes"], + formats=["O", "O", "f8", "O"]) + if lineclip.geom_type == 'MultiLineString': # there are multiple lines + nodelist, lengths, vertices = [], [], [] + ixshapes = [] + for ls in lineclip: + n, l, v, ix = self._get_nodes_intersecting_linestring(ls) + nodelist += n + lengths += l + # if necessary, transform coordinates back to real + # world coordinates + if (self.mfgrid.angrot != 0. or self.mfgrid.xoffset != 0. + or self.mfgrid.yoffset != 0.): + v_realworld = [] + for pt in v: + rx, ry = transform([pt[0]], [pt[1]], + self.mfgrid.xoffset, + self.mfgrid.yoffset, + self.mfgrid.angrot_radians, + inverse=False) + v_realworld.append([rx, ry]) + ix_realworld = rotate( + ix, self.mfgrid.angrot, origin=(0., 0.)) + ix_realworld = translate( + ix_realworld, self.mfgrid.xoffset, self.mfgrid.yoffset) + else: + v_realworld = v + ix_realworld = ix + vertices += v_realworld + ixshapes += ix_realworld + else: # linestring is fully within grid + nodelist, lengths, vertices, ixshapes = \ + self._get_nodes_intersecting_linestring( + lineclip) + # if necessary, transform coordinates back to real + # world coordinates + if (self.mfgrid.angrot != 0. or self.mfgrid.xoffset != 0. + or self.mfgrid.yoffset != 0.): + v_realworld = [] + for pt in vertices: + rx, ry = transform([pt[0]], [pt[1]], self.mfgrid.xoffset, + self.mfgrid.yoffset, + self.mfgrid.angrot_radians, + inverse=False) + v_realworld.append([rx, ry]) + vertices = v_realworld + + ix_shapes_realworld = [] + for ixs in ixshapes: + ixs = rotate(ixs, self.mfgrid.angrot, origin=(0., 0.)) + ixs = translate(ixs, self.mfgrid.xoffset, + self.mfgrid.yoffset) + ix_shapes_realworld.append(ixs) + ixshapes = ix_shapes_realworld + + # bundle linestrings in same cell + tempnodes = [] + templengths = [] + tempverts = [] + tempshapes = [] + unique_nodes = list(set(nodelist)) + if len(unique_nodes) < len(nodelist): + for inode in unique_nodes: + templengths.append( + sum([l for l, i in zip(lengths, nodelist) if i == inode])) + tempverts.append( + [v for v, i in zip(vertices, nodelist) if i == inode]) + tempshapes.append( + [ix for ix, i in zip(ixshapes, nodelist) if i == inode]) + + nodelist = unique_nodes + lengths = templengths + vertices = tempverts + ixshapes = tempshapes + + # eliminate any nodes that have a zero length + if not keepzerolengths: + tempnodes = [] + templengths = [] + tempverts = [] + tempshapes = [] + for i, _ in enumerate(nodelist): + if lengths[i] > 0: + tempnodes.append(nodelist[i]) + templengths.append(lengths[i]) + tempverts.append(vertices[i]) + tempshapes.append(ixshapes[i]) + nodelist = tempnodes + lengths = templengths + vertices = tempverts + ixshapes = tempshapes + + rec = np.recarray(len(nodelist), + names=["cellids", "vertices", "lengths", "ixshapes"], + formats=["O", "O", "f8", "O"]) + rec.vertices = vertices + rec.lengths = lengths + rec.cellids = nodelist + rec.ixshapes = ixshapes + + return rec + + def _get_nodes_intersecting_linestring(self, linestring): + """helper function, intersect the linestring with the a structured grid + and return a list of node indices and the length of the line in that + node. + + Parameters + ---------- + linestring: shapely.geometry.LineString or MultiLineString + shape to intersect with the grid + + Returns + ------- + nodelist, lengths, vertices: lists + lists containing node ids, lengths of intersects and the + start and end points of the intersects + """ + nodelist = [] + lengths = [] + vertices = [] + ixshapes = [] + + # start at the beginning of the line + x, y = linestring.xy + + # linestring already in local coords but + # because intersect_point does transform again + # we transform back to real world here if necessary + if (self.mfgrid.angrot != 0. or self.mfgrid.xoffset != 0. + or self.mfgrid.yoffset != 0.): + x0, y0 = transform([x[0]], [y[0]], self.mfgrid.xoffset, + self.mfgrid.yoffset, self.mfgrid.angrot_radians, + inverse=False) + else: + x0 = [x[0]] + y0 = [y[0]] + + (i, j) = self.intersect_point(Point(x0[0], y0[0])).cellids[0] + Xe, Ye = self.mfgrid.xyedges + xmin = Xe[j] + xmax = Xe[j + 1] + ymax = Ye[i] + ymin = Ye[i + 1] + pl = box(xmin, ymin, xmax, ymax) + intersect = linestring.intersection(pl) + # if linestring starts in cell, exits, and re-enters + # a MultiLineString is returned. + ixshapes.append(intersect) + length = intersect.length + lengths.append(length) + if intersect.geom_type == "MultiLineString": + x, y = [], [] + for igeom in intersect.geoms: + x.append(igeom.xy[0]) + y.append(igeom.xy[1]) + x = np.concatenate(x) + y = np.concatenate(y) + else: + x = intersect.xy[0] + y = intersect.xy[1] + verts = [(ixy[0], ixy[1]) for ixy in zip(x, y)] + vertices.append(verts) + nodelist.append((i, j)) + + n = 0 + while True: + (i, j) = nodelist[n] + node, length, verts, ixshape = \ + self._check_adjacent_cells_intersecting_line( + linestring, (i, j), nodelist) + + for inode, ilength, ivert, ix in zip(node, length, verts, ixshape): + if inode is not None: + if ivert not in vertices: + nodelist.append(inode) + lengths.append(ilength) + vertices.append(ivert) + ixshapes.append(ix) + + if n == len(nodelist) - 1: + break + n += 1 + + return nodelist, lengths, vertices, ixshapes + + def _check_adjacent_cells_intersecting_line(self, linestring, i_j, + nodelist): + """helper method that follows a line through a structured grid. + + Parameters + ---------- + linestring : shapely.geometry.LineString + shape to intersect with the grid + i_j : tuple + tuple containing (nrow, ncol) + nodelist : list of tuples + list of node ids that have already been added + as intersections + + Returns + ------- + node, length, verts: lists + lists containing nodes, lengths and vertices of + intersections with adjacent cells relative to the + current cell (i, j) + """ + i, j = i_j + + Xe, Ye = self.mfgrid.xyedges + + node = [] + length = [] + verts = [] + ixshape = [] + + # check to left + if j > 0: + ii = i + jj = j - 1 + if (ii, jj) not in nodelist: + xmin = Xe[jj] + xmax = Xe[jj + 1] + ymax = Ye[ii] + ymin = Ye[ii + 1] + pl = box(xmin, ymin, xmax, ymax) + if linestring.intersects(pl): + intersect = linestring.intersection(pl) + ixshape.append(intersect) + length.append(intersect.length) + if intersect.geom_type == "MultiLineString": + x, y = [], [] + for igeom in intersect.geoms: + x.append(igeom.xy[0]) + y.append(igeom.xy[1]) + x = np.concatenate(x) + y = np.concatenate(y) + else: + x = intersect.xy[0] + y = intersect.xy[1] + verts.append([(ixy[0], ixy[1]) + for ixy in zip(*intersect.xy)]) + node.append((ii, jj)) + + # check to right + if j < self.mfgrid.ncol - 1: + ii = i + jj = j + 1 + if (ii, jj) not in nodelist: + xmin = Xe[jj] + xmax = Xe[jj + 1] + ymax = Ye[ii] + ymin = Ye[ii + 1] + pl = box(xmin, ymin, xmax, ymax) + if linestring.intersects(pl): + intersect = linestring.intersection(pl) + ixshape.append(intersect) + length.append(intersect.length) + if intersect.geom_type == "MultiLineString": + x, y = [], [] + for igeom in intersect.geoms: + x.append(igeom.xy[0]) + y.append(igeom.xy[1]) + x = np.concatenate(x) + y = np.concatenate(y) + else: + x = intersect.xy[0] + y = intersect.xy[1] + verts.append([(ixy[0], ixy[1]) + for ixy in zip(*intersect.xy)]) + node.append((ii, jj)) + + # check to back + if i > 0: + ii = i - 1 + jj = j + if (ii, jj) not in nodelist: + xmin = Xe[jj] + xmax = Xe[jj + 1] + ymax = Ye[ii] + ymin = Ye[ii + 1] + pl = box(xmin, ymin, xmax, ymax) + if linestring.intersects(pl): + intersect = linestring.intersection(pl) + ixshape.append(intersect) + length.append(intersect.length) + if intersect.geom_type == "MultiLineString": + x, y = [], [] + for igeom in intersect.geoms: + x.append(igeom.xy[0]) + y.append(igeom.xy[1]) + x = np.concatenate(x) + y = np.concatenate(y) + else: + x = intersect.xy[0] + y = intersect.xy[1] + verts.append([(ixy[0], ixy[1]) for ixy in + zip(*intersect.xy)]) + node.append((ii, jj)) + + # check to front + if i < self.mfgrid.nrow - 1: + ii = i + 1 + jj = j + if (ii, jj) not in nodelist: + xmin = Xe[jj] + xmax = Xe[jj + 1] + ymax = Ye[ii] + ymin = Ye[ii + 1] + pl = box(xmin, ymin, xmax, ymax) + if linestring.intersects(pl): + intersect = linestring.intersection(pl) + ixshape.append(intersect) + length.append(intersect.length) + if intersect.geom_type == "MultiLineString": + x, y = [], [] + for igeom in intersect.geoms: + x.append(igeom.xy[0]) + y.append(igeom.xy[1]) + x = np.concatenate(x) + y = np.concatenate(y) + else: + x = intersect.xy[0] + y = intersect.xy[1] + verts.append([(ixy[0], ixy[1]) for ixy in zip(x, y)]) + node.append((ii, jj)) + + return node, length, verts, ixshape + + def _intersect_rectangle_structured(self, rectangle): + """intersect a rectangle with a structured grid to retrieve node ids of + intersecting grid cells. + + Note: only works in local coordinates (i.e. non-rotated grid + with origin at (0, 0)) + + Parameters + ---------- + rectangle : list of tuples + list of lower-left coordinate and upper-right + coordinate: [(xmin, ymin), (xmax, ymax)] + + Returns + ------- + nodelist: list of tuples + list of tuples containing node ids with which + the rectangle intersects + """ + + nodelist = [] + + # return if rectangle does not contain any cells + if (self.mfgrid.angrot != 0. or self.mfgrid.xoffset != 0. + or self.mfgrid.yoffset != 0.): + minx = np.min(self.mfgrid.xyedges[0]) + maxx = np.max(self.mfgrid.xyedges[0]) + miny = np.min(self.mfgrid.xyedges[1]) + maxy = np.max(self.mfgrid.xyedges[1]) + local_extent = [minx, maxx, miny, maxy] + else: + local_extent = self.mfgrid.extent + + xmin, xmax, ymin, ymax = local_extent + bgrid = box(xmin, ymin, xmax, ymax) + (rxmin, rymin), (rxmax, rymax) = rectangle + b = box(rxmin, rymin, rxmax, rymax) + + if not b.intersects(bgrid): + # return with nodelist as an empty list + return [] + + Xe, Ye = self.mfgrid.xyedges + + jmin = ModflowGridIndices.find_position_in_array(Xe, xmin) + if jmin is None: + if xmin <= Xe[0]: + jmin = 0 + elif xmin >= Xe[-1]: + jmin = self.mfgrid.ncol - 1 + + jmax = ModflowGridIndices.find_position_in_array(Xe, xmax) + if jmax is None: + if xmax <= Xe[0]: + jmax = 0 + elif xmax >= Xe[-1]: + jmax = self.mfgrid.ncol - 1 + + imin = ModflowGridIndices.find_position_in_array(Ye, ymax) + if imin is None: + if ymax >= Ye[0]: + imin = 0 + elif ymax <= Ye[-1]: + imin = self.mfgrid.nrow - 1 + + imax = ModflowGridIndices.find_position_in_array(Ye, ymin) + if imax is None: + if ymin >= Ye[0]: + imax = 0 + elif ymin <= Ye[-1]: + imax = self.mfgrid.nrow - 1 + + for i in range(imin, imax + 1): + for j in range(jmin, jmax + 1): + nodelist.append((i, j)) + + return nodelist + + def _intersect_polygon_structured(self, shp): + """intersect polygon with a structured grid. Uses bounding box of the + Polygon to limit search space. + + Notes + ----- + If performance is slow, try setting the method to 'vertex' + in the GridIntersect object. For polygons this is often + faster. + + Parameters + ---------- + shp : shapely.geometry.Polygon + polygon to intersect with the grid + + Returns + ------- + numpy.recarray + a record array containing information about the intersection + """ + + # initialize the result lists + nodelist = [] + areas = [] + vertices = [] + ixshapes = [] + + # transform polygon to local grid coordinates + if (self.mfgrid.xoffset != 0. or self.mfgrid.yoffset != 0.): + shp = translate(shp, xoff=-self.mfgrid.xoffset, + yoff=-self.mfgrid.yoffset) + if self.mfgrid.angrot != 0.: + shp = rotate(shp, -self.mfgrid.angrot, origin=(0., 0.)) + + # use the bounds of the polygon to restrict the cell search + minx, miny, maxx, maxy = shp.bounds + rectangle = ((minx, miny), (maxx, maxy)) + nodes = self._intersect_rectangle_structured(rectangle) + + for (i, j) in nodes: + if (self.mfgrid.angrot != 0. or self.mfgrid.xoffset != 0. + or self.mfgrid.yoffset != 0.): + cell_coords = [(self.mfgrid.xyedges[0][j], + self.mfgrid.xyedges[1][i]), + (self.mfgrid.xyedges[0][j + 1], + self.mfgrid.xyedges[1][i]), + (self.mfgrid.xyedges[0][j + 1], + self.mfgrid.xyedges[1][i + 1]), + (self.mfgrid.xyedges[0][j], + self.mfgrid.xyedges[1][i + 1])] + else: + cell_coords = self.mfgrid.get_cell_vertices(i, j) + node_polygon = Polygon(cell_coords) + if shp.intersects(node_polygon): + intersect = shp.intersection(node_polygon) + if intersect.area > 0.: + nodelist.append((i, j)) + areas.append(intersect.area) + + # if necessary, transform coordinates back to real + # world coordinates + if (self.mfgrid.angrot != 0. or self.mfgrid.xoffset != 0. + or self.mfgrid.yoffset != 0.): + v_realworld = [] + if intersect.geom_type.startswith("Multi"): + for ipoly in intersect: + v_realworld += \ + self._transform_geo_interface_polygon( + ipoly) + else: + v_realworld += \ + self._transform_geo_interface_polygon( + intersect) + intersect_realworld = rotate(intersect, + self.mfgrid.angrot, + origin=(0., 0.)) + intersect_realworld = translate(intersect_realworld, + self.mfgrid.xoffset, + self.mfgrid.yoffset) + else: + v_realworld = intersect.__geo_interface__[ + "coordinates"] + intersect_realworld = intersect + ixshapes.append(intersect_realworld) + vertices.append(v_realworld) + + rec = np.recarray(len(nodelist), + names=["cellids", "vertices", "areas", "ixshapes"], + formats=["O", "O", "f8", "O"]) + rec.vertices = vertices + rec.areas = areas + rec.cellids = nodelist + rec.ixshapes = ixshapes + + return rec + + def _transform_geo_interface_polygon(self, polygon): + """Internal method, helper function to transform geometry + __geo_interface__. + + Used for translating intersection result coordinates back into + real-world coordinates. + + Parameters + ---------- + polygon : shapely.geometry.Polygon + polygon to transform coordinates for + + Returns + ------- + geom_list : list + list containing transformed coordinates in same structure as + the original __geo_interface__. + """ + + if polygon.geom_type.startswith("Multi"): + raise TypeError("Does not support Multi geometries!") + + geom_list = [] + for coords in polygon.__geo_interface__["coordinates"]: + geoms = [] + try: + # test depth of list/tuple + _ = coords[0][0][0] + if len(coords) == 2: + shell, holes = coords + else: + raise ValueError("Cannot parse __geo_interface__") + except TypeError: + shell = coords + holes = None + except Exception as e: + raise e + # transform shell coordinates + shell_pts = [] + for pt in shell: + rx, ry = transform([pt[0]], [pt[1]], + self.mfgrid.xoffset, + self.mfgrid.yoffset, + self.mfgrid.angrot_radians, + inverse=False) + shell_pts.append((rx, ry)) + geoms.append(shell_pts) + # transform holes coordinates if necessary + if holes: + holes_pts = [] + for pt in holes: + rx, ry = transform([pt[0]], [pt[1]], + self.mfgrid.xoffset, + self.mfgrid.yoffset, + self.mfgrid.angrot_radians, + inverse=False) + holes_pts.append((rx, ry)) + geoms.append(holes_pts) + # append (shells, holes) to transformed coordinates list + geom_list.append(tuple(geoms)) + return geom_list + + @staticmethod + def plot_polygon(rec, ax=None, **kwargs): + """method to plot the polygon intersection results from the resulting + numpy.recarray. + + Note: only works when recarray has 'intersects' column! + + Parameters + ---------- + rec : numpy.recarray + record array containing intersection results + (the resulting shapes) + ax : matplotlib.pyplot.axes, optional + axes to plot onto, if not provided, creates a new figure + **kwargs: + passed to the plot function + + Returns + ------- + ax: matplotlib.pyplot.axes + returns the axes handle + """ + try: + from descartes import PolygonPatch + except ModuleNotFoundError: + msg = 'descartes package needed for plotting polygons' + if plt is None: + msg = 'matplotlib and descartes packages needed for ' + \ + 'plotting polygons' + raise ModuleNotFoundError(msg) + + if plt is None: + msg = 'matplotlib package needed for plotting polygons' + raise ModuleNotFoundError(msg) + + if ax is None: + _, ax = plt.subplots() + + for i, ishp in enumerate(rec.ixshapes): + if "facecolor" in kwargs: + fc = kwargs.pop("facecolor") + else: + fc = "C{}".format(i % 10) + ppi = PolygonPatch(ishp, facecolor=fc, **kwargs) + ax.add_patch(ppi) + + return ax + + @staticmethod + def plot_linestring(rec, ax=None, **kwargs): + """method to plot the linestring intersection results from the + resulting numpy.recarray. + + Note: only works when recarray has 'intersects' column! + + Parameters + ---------- + rec : numpy.recarray + record array containing intersection results + (the resulting shapes) + ax : matplotlib.pyplot.axes, optional + axes to plot onto, if not provided, creates a new figure + **kwargs: + passed to the plot function + + Returns + ------- + ax: matplotlib.pyplot.axes + returns the axes handle + """ + if plt is None: + msg = 'matplotlib package needed for plotting polygons' + raise ModuleNotFoundError(msg) + + if ax is None: + _, ax = plt.subplots() + + for i, ishp in enumerate(rec.ixshapes): + if "c" in kwargs: + c = kwargs.pop("c") + elif "color" in kwargs: + c = kwargs.pop("color") + else: + c = "C{}".format(i % 10) + if ishp.type == "MultiLineString": + for part in ishp: + ax.plot(part.xy[0], part.xy[1], ls="-", + c=c, **kwargs) + else: + ax.plot(ishp.xy[0], ishp.xy[1], ls="-", + c=c, **kwargs) + + return ax + + @staticmethod + def plot_point(rec, ax=None, **kwargs): + """method to plot the point intersection results from the resulting + numpy.recarray. + + Note: only works when recarray has 'intersects' column! + + Parameters + ---------- + rec : numpy.recarray + record array containing intersection results + ax : matplotlib.pyplot.axes, optional + axes to plot onto, if not provided, creates a new figure + **kwargs: + passed to the scatter function + + Returns + ------- + ax: matplotlib.pyplot.axes + returns the axes handle + """ + if plt is None: + msg = 'matplotlib package needed for plotting polygons' + raise ModuleNotFoundError(msg) + + if ax is None: + _, ax = plt.subplots() + + x, y = [], [] + geo_coll = GeometryCollection(list(rec.ixshapes)) + collection = parse_shapely_ix_result([], geo_coll, ["Point"]) + for c in collection: + x.append(c.x) + y.append(c.y) + ax.scatter(x, y, **kwargs) + + return ax + + +class ModflowGridIndices: + """Collection of methods that can be used to find cell indices for a + structured, but irregularly spaced MODFLOW grid.""" + + @staticmethod + def find_position_in_array(arr, x): + """If arr has x positions for the left edge of a cell, then return the + cell index containing x. + + Parameters + ---------- + arr : A one dimensional array (such as Xe) that contains + coordinates for the left cell edge. + + x : float + The x position to find in arr. + """ + jpos = None + + if x == arr[-1]: + return len(arr) - 2 + + if x < min(arr[0], arr[-1]): + return None + + if x > max(arr[0], arr[-1]): + return None + + # go through each position + for j in range(len(arr) - 1): + xl = arr[j] + xr = arr[j + 1] + frac = (x - xl) / (xr - xl) + if 0. <= frac <= 1.0: + # if min(xl, xr) <= x < max(xl, xr): + jpos = j + return jpos + + return jpos + + @staticmethod + def kij_from_nodenumber(nodenumber, nlay, nrow, ncol): + """Convert the modflow node number to a zero-based layer, row and + column format. Return (k0, i0, j0). + + Parameters + ---------- + nodenumber: int + The cell nodenumber, ranging from 1 to number of + nodes. + nlay: int + The number of layers. + nrow: int + The number of rows. + ncol: int + The number of columns. + """ + if nodenumber > nlay * nrow * ncol: + raise Exception('Error in function kij_from_nodenumber...') + n = nodenumber - 1 + k = int(n / nrow / ncol) + i = int((n - k * nrow * ncol) / ncol) + j = n - k * nrow * ncol - i * ncol + return (k, i, j) + + @staticmethod + def nodenumber_from_kij(k, i, j, nrow, ncol): + """Calculate the nodenumber using the zero-based layer, row, and column + values. The first node has a value of 1. + + Parameters + ---------- + k : int + The model layer number as a zero-based value. + i : int + The model row number as a zero-based value. + j : int + The model column number as a zero-based value. + nrow : int + The number of model rows. + ncol : int + The number of model columns. + """ + return k * nrow * ncol + i * ncol + j + 1 + + @staticmethod + def nn0_from_kij(k, i, j, nrow, ncol): + """Calculate the zero-based nodenumber using the zero-based layer, row, + and column values. The first node has a value of 0. + + Parameters + ---------- + k : int + The model layer number as a zero-based value. + i : int + The model row number as a zero-based value. + j : int + The model column number as a zero-based value. + nrow : int + The number of model rows. + ncol : int + The number of model columns. + """ + return k * nrow * ncol + i * ncol + j + + @staticmethod + def kij_from_nn0(n, nlay, nrow, ncol): + """Convert the node number to a zero-based layer, row and column + format. Return (k0, i0, j0). + + Parameters + ---------- + nodenumber : int + The cell nodenumber, ranging from 0 to number of + nodes - 1. + nlay : int + The number of layers. + nrow : int + The number of rows. + ncol : int + The number of columns. + """ + if n > nlay * nrow * ncol: + raise Exception('Error in function kij_from_nodenumber...') + k = int(n / nrow / ncol) + i = int((n - k * nrow * ncol) / ncol) + j = n - k * nrow * ncol - i * ncol + return (k, i, j) diff --git a/flopy/utils/mfreadnam.py b/flopy/utils/mfreadnam.py index 230d6d0f84..6825a1a8a6 100644 --- a/flopy/utils/mfreadnam.py +++ b/flopy/utils/mfreadnam.py @@ -1,280 +1,280 @@ -""" -mfreadnam module. Contains the NamData class. Note that the user can access -the NamData class as `flopy.modflow.NamData`. - -Additional information about the MODFLOW name file can be found at the `Online -MODFLOW Guide -`_. - -""" -import os -import sys - -if sys.version_info < (3, 6): - from collections import OrderedDict - - dict = OrderedDict - - -class NamData(object): - """ - MODFLOW Namefile Class. - - Parameters - ---------- - pkgtype : string - String identifying the type of MODFLOW package. See the - mfnam_packages dictionary keys in the model object for a list - of supported packages. This dictionary is also passed in as packages. - name : string - Filename of the package file identified in the name file - handle : file handle - File handle referring to the file identified by `name` - packages : dictionary - Dictionary of package objects as defined in the - `mfnam_packages` attribute of :class:`flopy.modflow.mf.Modflow`. - - Attributes - ---------- - filehandle : file handle - File handle to the package file. Read from `handle`. - filename : string - Filename of the package file identified in the name file. - Read from `name`. - filetype : string - String identifying the type of MODFLOW package. Read from - `pkgtype`. - package : string - Package type. Only assigned if `pkgtype` is found in the keys - of `packages` - - Methods - ------- - - See Also - -------- - - Notes - ----- - - Examples - -------- - - """ - - def __init__(self, pkgtype, name, handle, packages): - self.filehandle = handle - self.filename = name - self.filetype = pkgtype - self.package = None - if self.filetype.lower() in packages: - self.package = packages[self.filetype.lower()] - - def __repr__(self): - return "filename:{0}, filetype:{1}".format(self.filename, - self.filetype) - - -def getfiletypeunit(nf, filetype): - """ - Method to return unit number of a package from a NamData instance - - Parameters - ---------- - nf : NamData instance - filetype : string, name of package seeking information for - - Returns - ------- - cunit : int, unit number corresponding to the package type - - """ - for cunit, cvals in nf.items(): - if cvals.filetype.lower() == filetype.lower(): - return cunit - print('Name file does not contain file of type "{0}"'.format(filetype)) - return None - - -def parsenamefile(namfilename, packages, verbose=True): - """ - Returns dict from the nam file with NamData keyed by unit number - - Parameters - ---------- - namefilename : str - Name of the MODFLOW namefile to parse. - packages : dict - Dictionary of package objects as defined in the `mfnam_packages` - attribute of :class:`flopy.modflow.mf.Modflow`. - verbose : bool - Print messages to screen. Default is True. - - Returns - ------- - dict or OrderedDict - For each file listed in the name file, a - :class:`flopy.utils.mfreadnam.NamData` instance - is stored in the returned dict keyed by unit number. Prior to Python - version 3.6 the return object is an OrderedDict to retain the order - of items in the nam file. - - Raises - ------ - IOError: - If namfilename does not exist in the directory. - ValueError: - For lines that cannot be parsed. - """ - # initiate the ext_unit_dict ordered dictionary - ext_unit_dict = dict() - - if verbose: - print('Parsing the namefile --> {0:s}'.format(namfilename)) - - if not os.path.isfile(namfilename): - # help diagnose the namfile and directory - e = 'Could not find {} '.format(namfilename) + \ - 'in directory {}'.format(os.path.dirname(namfilename)) - raise IOError(e) - with open(namfilename, 'r') as fp: - lines = fp.readlines() - - for ln, line in enumerate(lines, 1): - line = line.strip() - if len(line) == 0 or line.startswith('#'): - # skip blank lines or comments - continue - items = line.split() - # ensure we have at least three items - if len(items) < 3: - e = 'line number {} has fewer than 3 items: {}'.format(ln, line) - raise ValueError(e) - ftype, key, fpath = items[0:3] - ftype = ftype.upper() - - # remove quotes in file path - if '"' in fpath: - fpath = fpath.replace('"', '') - if "'" in fpath: - fpath = fpath.replace("'", "") - - # need make filenames with paths system agnostic - if '/' in fpath: - raw = fpath.split('/') - elif '\\' in fpath: - raw = fpath.split('\\') - else: - raw = [fpath] - fpath = os.path.join(*raw) - - fname = os.path.join(os.path.dirname(namfilename), fpath) - if not os.path.isfile(fname) or not os.path.exists(fname): - # change to lower and make comparison (required for linux) - dn = os.path.dirname(fname) - fls = os.listdir(dn) - lownams = [f.lower() for f in fls] - bname = os.path.basename(fname) - if bname.lower() in lownams: - idx = lownams.index(bname.lower()) - fname = os.path.join(dn, fls[idx]) - # open the file - kwargs = {} - if ftype == 'DATA(BINARY)': - openmode = 'rb' - else: - openmode = 'r' - kwargs['errors'] = 'replace' - try: - filehandle = open(fname, openmode, **kwargs) - except IOError: - if verbose: - print('could not set filehandle to {0:s}'.format(fpath)) - filehandle = None - # be sure the second value is an integer - try: - key = int(key) - except ValueError: - raise ValueError('line number {}: the unit number (second item) ' - 'is not an integer: {}'.format(ln, line)) - # Trap for the case where unit numbers are specified as zero - # In this case, the package must have a variable called - # unit number attached to it. If not, then the key is set - # to fname - if key == 0: - ftype_lower = ftype.lower() - if ftype_lower in packages: - key = packages[ftype_lower].reservedunit() - else: - key = ftype - ext_unit_dict[key] = NamData(ftype, fname, filehandle, packages) - return ext_unit_dict - - -def attribs_from_namfile_header(namefile): - # check for reference info in the nam file header - defaults = {"xll": None, "yll": None, - "xul": None, "yul": None, "rotation": 0., - "proj4_str": None} - if namefile is None: - return defaults - header = [] - with open(namefile, 'r') as f: - for line in f: - if not line.startswith('#'): - break - header.extend(line.strip().replace('#', '').split(';')) - - for item in header: - if "xll" in item.lower(): - try: - xll = float(item.split(':')[1]) - defaults["xll"] = xll - except: - print(' could not parse xll ' + - 'in {}'.format(namefile)) - elif "yll" in item.lower(): - try: - yll = float(item.split(':')[1]) - defaults["yll"] = yll - except: - print(' could not parse yll ' + - 'in {}'.format(namefile)) - elif "xul" in item.lower(): - try: - xul = float(item.split(':')[1]) - defaults["xul"] = xul - except: - print(' could not parse xul ' + - 'in {}'.format(namefile)) - elif "yul" in item.lower(): - try: - yul = float(item.split(':')[1]) - defaults["yul"] = yul - except: - print(' could not parse yul ' + - 'in {}'.format(namefile)) - elif "rotation" in item.lower(): - try: - angrot = float(item.split(':')[1]) - defaults["rotation"] = angrot - except: - print(' could not parse rotation ' + - 'in {}'.format(namefile)) - elif "proj4_str" in item.lower(): - try: - proj4 = ':'.join(item.split(':')[1:]).strip() - if proj4.lower() == 'none': - proj4 = None - defaults['proj4_str'] = proj4 - except: - print(' could not parse proj4_str ' + - 'in {}'.format(namefile)) - elif "start" in item.lower(): - try: - start_datetime = item.split(':')[1].strip() - defaults["start_datetime"] = start_datetime - except: - print(' could not parse start ' + - 'in {}'.format(namefile)) - return defaults +""" +mfreadnam module. Contains the NamData class. Note that the user can access +the NamData class as `flopy.modflow.NamData`. + +Additional information about the MODFLOW name file can be found at the `Online +MODFLOW Guide +`_. + +""" +import os +import sys + +if sys.version_info < (3, 6): + from collections import OrderedDict + + dict = OrderedDict + + +class NamData(object): + """ + MODFLOW Namefile Class. + + Parameters + ---------- + pkgtype : string + String identifying the type of MODFLOW package. See the + mfnam_packages dictionary keys in the model object for a list + of supported packages. This dictionary is also passed in as packages. + name : string + Filename of the package file identified in the name file + handle : file handle + File handle referring to the file identified by `name` + packages : dictionary + Dictionary of package objects as defined in the + `mfnam_packages` attribute of :class:`flopy.modflow.mf.Modflow`. + + Attributes + ---------- + filehandle : file handle + File handle to the package file. Read from `handle`. + filename : string + Filename of the package file identified in the name file. + Read from `name`. + filetype : string + String identifying the type of MODFLOW package. Read from + `pkgtype`. + package : string + Package type. Only assigned if `pkgtype` is found in the keys + of `packages` + + Methods + ------- + + See Also + -------- + + Notes + ----- + + Examples + -------- + + """ + + def __init__(self, pkgtype, name, handle, packages): + self.filehandle = handle + self.filename = name + self.filetype = pkgtype + self.package = None + if self.filetype.lower() in packages: + self.package = packages[self.filetype.lower()] + + def __repr__(self): + return "filename:{0}, filetype:{1}".format(self.filename, + self.filetype) + + +def getfiletypeunit(nf, filetype): + """ + Method to return unit number of a package from a NamData instance + + Parameters + ---------- + nf : NamData instance + filetype : string, name of package seeking information for + + Returns + ------- + cunit : int, unit number corresponding to the package type + + """ + for cunit, cvals in nf.items(): + if cvals.filetype.lower() == filetype.lower(): + return cunit + print('Name file does not contain file of type "{0}"'.format(filetype)) + return None + + +def parsenamefile(namfilename, packages, verbose=True): + """ + Returns dict from the nam file with NamData keyed by unit number + + Parameters + ---------- + namefilename : str + Name of the MODFLOW namefile to parse. + packages : dict + Dictionary of package objects as defined in the `mfnam_packages` + attribute of :class:`flopy.modflow.mf.Modflow`. + verbose : bool + Print messages to screen. Default is True. + + Returns + ------- + dict or OrderedDict + For each file listed in the name file, a + :class:`flopy.utils.mfreadnam.NamData` instance + is stored in the returned dict keyed by unit number. Prior to Python + version 3.6 the return object is an OrderedDict to retain the order + of items in the nam file. + + Raises + ------ + IOError: + If namfilename does not exist in the directory. + ValueError: + For lines that cannot be parsed. + """ + # initiate the ext_unit_dict ordered dictionary + ext_unit_dict = dict() + + if verbose: + print('Parsing the namefile --> {0:s}'.format(namfilename)) + + if not os.path.isfile(namfilename): + # help diagnose the namfile and directory + e = 'Could not find {} '.format(namfilename) + \ + 'in directory {}'.format(os.path.dirname(namfilename)) + raise IOError(e) + with open(namfilename, 'r') as fp: + lines = fp.readlines() + + for ln, line in enumerate(lines, 1): + line = line.strip() + if len(line) == 0 or line.startswith('#'): + # skip blank lines or comments + continue + items = line.split() + # ensure we have at least three items + if len(items) < 3: + e = 'line number {} has fewer than 3 items: {}'.format(ln, line) + raise ValueError(e) + ftype, key, fpath = items[0:3] + ftype = ftype.upper() + + # remove quotes in file path + if '"' in fpath: + fpath = fpath.replace('"', '') + if "'" in fpath: + fpath = fpath.replace("'", "") + + # need make filenames with paths system agnostic + if '/' in fpath: + raw = fpath.split('/') + elif '\\' in fpath: + raw = fpath.split('\\') + else: + raw = [fpath] + fpath = os.path.join(*raw) + + fname = os.path.join(os.path.dirname(namfilename), fpath) + if not os.path.isfile(fname) or not os.path.exists(fname): + # change to lower and make comparison (required for linux) + dn = os.path.dirname(fname) + fls = os.listdir(dn) + lownams = [f.lower() for f in fls] + bname = os.path.basename(fname) + if bname.lower() in lownams: + idx = lownams.index(bname.lower()) + fname = os.path.join(dn, fls[idx]) + # open the file + kwargs = {} + if ftype == 'DATA(BINARY)': + openmode = 'rb' + else: + openmode = 'r' + kwargs['errors'] = 'replace' + try: + filehandle = open(fname, openmode, **kwargs) + except IOError: + if verbose: + print('could not set filehandle to {0:s}'.format(fpath)) + filehandle = None + # be sure the second value is an integer + try: + key = int(key) + except ValueError: + raise ValueError('line number {}: the unit number (second item) ' + 'is not an integer: {}'.format(ln, line)) + # Trap for the case where unit numbers are specified as zero + # In this case, the package must have a variable called + # unit number attached to it. If not, then the key is set + # to fname + if key == 0: + ftype_lower = ftype.lower() + if ftype_lower in packages: + key = packages[ftype_lower].reservedunit() + else: + key = ftype + ext_unit_dict[key] = NamData(ftype, fname, filehandle, packages) + return ext_unit_dict + + +def attribs_from_namfile_header(namefile): + # check for reference info in the nam file header + defaults = {"xll": None, "yll": None, + "xul": None, "yul": None, "rotation": 0., + "proj4_str": None} + if namefile is None: + return defaults + header = [] + with open(namefile, 'r') as f: + for line in f: + if not line.startswith('#'): + break + header.extend(line.strip().replace('#', '').split(';')) + + for item in header: + if "xll" in item.lower(): + try: + xll = float(item.split(':')[1]) + defaults["xll"] = xll + except: + print(' could not parse xll ' + + 'in {}'.format(namefile)) + elif "yll" in item.lower(): + try: + yll = float(item.split(':')[1]) + defaults["yll"] = yll + except: + print(' could not parse yll ' + + 'in {}'.format(namefile)) + elif "xul" in item.lower(): + try: + xul = float(item.split(':')[1]) + defaults["xul"] = xul + except: + print(' could not parse xul ' + + 'in {}'.format(namefile)) + elif "yul" in item.lower(): + try: + yul = float(item.split(':')[1]) + defaults["yul"] = yul + except: + print(' could not parse yul ' + + 'in {}'.format(namefile)) + elif "rotation" in item.lower(): + try: + angrot = float(item.split(':')[1]) + defaults["rotation"] = angrot + except: + print(' could not parse rotation ' + + 'in {}'.format(namefile)) + elif "proj4_str" in item.lower(): + try: + proj4 = ':'.join(item.split(':')[1:]).strip() + if proj4.lower() == 'none': + proj4 = None + defaults['proj4_str'] = proj4 + except: + print(' could not parse proj4_str ' + + 'in {}'.format(namefile)) + elif "start" in item.lower(): + try: + start_datetime = item.split(':')[1].strip() + defaults["start_datetime"] = start_datetime + except: + print(' could not parse start ' + + 'in {}'.format(namefile)) + return defaults diff --git a/flopy/utils/observationfile.py b/flopy/utils/observationfile.py index 708cd07431..47f3efdeea 100644 --- a/flopy/utils/observationfile.py +++ b/flopy/utils/observationfile.py @@ -1,538 +1,538 @@ -import numpy as np - -from ..utils.utils_def import FlopyBinaryData - - -class ObsFiles(FlopyBinaryData): - def __init__(self): - super(ObsFiles, self).__init__() - return - - def get_times(self): - """ - Get a list of unique times in the file - - Returns - ---------- - out : list of floats - List contains unique simulation times (totim) in binary file. - - """ - return self.data['totim'].reshape(self.get_ntimes()).tolist() - - def get_ntimes(self): - """ - Get the number of times in the file - - Returns - ---------- - out : int - The number of simulation times (totim) in binary file. - - """ - return self.data['totim'].shape[0] - - def get_nobs(self): - """ - Get the number of observations in the file - - Returns - ---------- - out : tuple of int - A tupe with the number of records and number of flow items - in the file. The number of flow items is non-zero only if - swrtype='flow'. - - """ - return self.nobs - - def get_obsnames(self): - """ - Get a list of observation names in the file - - Returns - ---------- - out : list of strings - List of observation names in the binary file. totim is not - included in the list of observation names. - - """ - return list(self.data.dtype.names[1:]) - - def get_data(self, idx=None, obsname=None, totim=None): - """ - Get data from the observation file. - - Parameters - ---------- - idx : int - The zero-based record number. The first record is record 0. - If idx is None and totim are None, data for all simulation times - are returned. (default is None) - obsname : string - The name of the observation to return. If obsname is None, all - observation data are returned. (default is None) - totim : float - The simulation time to return. If idx is None and totim are None, - data for all simulation times are returned. (default is None) - - Returns - ---------- - data : numpy record array - Array has size (ntimes, nitems). totim is always returned. nitems - is 2 if idx or obsname is not None or nobs+1. - - See Also - -------- - - Notes - ----- - If both idx and obsname are None, will return all of the observation - data. - - Examples - -------- - >>> hyd = HydmodObs("my_model.hyd") - >>> ts = hyd.get_data() - - """ - i0 = 0 - i1 = self.data.shape[0] - if totim is not None: - idx = np.where(self.data['totim'] == totim)[0][0] - i0 = idx - i1 = idx + 1 - elif idx is not None: - if idx < i1: - i0 = idx - i1 = i0 + 1 - r = None - if obsname is None: - obsname = self.get_obsnames() - else: - if obsname is not None: - if obsname not in self.data.dtype.names: - obsname = None - else: - if not isinstance(obsname, list): - obsname = [obsname] - if obsname is not None: - obsname.insert(0, 'totim') - r = get_selection(self.data, obsname)[i0:i1] - return r - - def get_dataframe(self, start_datetime='1-1-1970', - idx=None, obsname=None, totim=None, timeunit='D'): - """ - Get pandas dataframe with the incremental and cumulative water budget - items in the hydmod file. - - Parameters - ---------- - start_datetime : str - If start_datetime is passed as None, the rows are indexed on totim. - Otherwise, a DatetimeIndex is set. (default is 1-1-1970). - idx : int - The zero-based record number. The first record is record 0. - If idx is None and totim are None, a dataframe with all simulation - times is returned. (default is None) - obsname : string - The name of the observation to return. If obsname is None, all - observation data are returned. (default is None) - totim : float - The simulation time to return. If idx is None and totim are None, - a dataframe with all simulation times is returned. - (default is None) - timeunit : string - time unit of the simulation time. Valid values are 'S'econds, - 'M'inutes, 'H'ours, 'D'ays, 'Y'ears. (default is 'D'). - - Returns - ------- - out : pandas dataframe - Pandas dataframe of selected data. - - See Also - -------- - - Notes - ----- - If both idx and obsname are None, will return all of the observation - data as a dataframe. - - Examples - -------- - >>> hyd = HydmodObs("my_model.hyd") - >>> df = hyd.get_dataframes() - - """ - - try: - import pandas as pd - from ..utils.utils_def import totim_to_datetime - except Exception as e: - msg = "ObsFiles.get_dataframe() error import pandas: " + str(e) - raise ImportError(msg) - - i0 = 0 - i1 = self.data.shape[0] - if totim is not None: - idx = np.where(self.data['totim'] == totim)[0][0] - i0 = idx - i1 = idx + 1 - elif idx is not None: - if idx < i1: - i0 = idx - i1 = i0 + 1 - - if obsname is None: - obsname = self.get_obsnames() - else: - if obsname is not None: - if obsname not in self.data.dtype.names: - obsname = None - else: - if not isinstance(obsname, list): - obsname = [obsname] - if obsname is None: - return None - - obsname.insert(0, 'totim') - - dti = self.get_times()[i0:i1] - if start_datetime is not None: - dti = totim_to_datetime(dti, - start=pd.to_datetime(start_datetime), - timeunit=timeunit) - - df = pd.DataFrame(self.data[i0:i1], index=dti, columns=obsname) - return df - - def _read_data(self): - - if self.data is not None: - return - - while True: - try: - r = self.read_record(count=1) - if self.data is None: - self.data = r.copy() - elif r.size == 0: - break - else: - # should be hstack based on (https://mail.scipy.org/pipermail/numpy-discussion/2010-June/051107.html) - self.data = np.hstack((self.data, r)) - except: - break - return - - def _build_dtype(self): - """ - Build the recordarray and iposarray, which maps the header information - to the position in the formatted file. - """ - raise Exception( - 'Abstract method _build_dtype called in BinaryFiles. This method needs to be overridden.') - - def _build_index(self): - """ - Build the recordarray and iposarray, which maps the header information - to the position in the formatted file. - """ - raise Exception( - 'Abstract method _build_index called in BinaryFiles. This method needs to be overridden.') - - -class Mf6Obs(ObsFiles): - """ - Mf6Obs Class - used to read ascii and binary MODFLOW6 observation output - - Parameters - ---------- - filename : str - Name of the hydmod output file - verbose : boolean - If true, print additional information to to the screen during the - extraction. (default is False) - hydlbl_len : int - Length of hydmod labels. (default is 20) - - Returns - ------- - None - - """ - - def __init__(self, filename, verbose=False, isBinary=True): - """ - Class constructor. - - """ - super(Mf6Obs, self).__init__() - # initialize class information - self.verbose = verbose - if isBinary: - # --open binary head file - self.file = open(filename, 'rb') - - # read control line - cline = self.read_text(nchar=100) - precision = 'single' - if 'double' in cline[5:11].lower(): - precision = 'double' - self.set_float(precision) - lenobsname = int(cline[11:]) - - # get number of observations - self.nobs = self.read_integer() - - # # continue reading the file - # self.v = np.empty(self.nobs, dtype=np.float) - # self.v.fill(1.0E+32) - - # read obsnames - obsnames = [] - for idx in range(0, self.nobs): - cid = self.read_text(lenobsname) - obsnames.append(cid) - self.obsnames = np.array(obsnames) - - # build dtype - self._build_dtype() - - # build index - self._build_index() - - self.data = None - self._read_data() - else: - # --open binary head file - self.file = open(filename, 'r') - - # read header line - line = self.file.readline() - t = line.rstrip().split(',') - self.set_float('double') - - # get number of observations - self.nobs = len(t) - 1 - - # set obsnames - obsnames = [] - for idx in range(1, self.nobs + 1): - obsnames.append(t[idx]) - self.obsnames = np.array(obsnames) - - # build dtype - self._build_dtype() - - # build index - self._build_index() - - # read ascii data - self.data = np.loadtxt(self.file, dtype=self.dtype, delimiter=',', - ndmin=1) - return - - def _build_dtype(self): - - # create dtype - dtype = [('totim', self.floattype)] - for site in self.obsnames: - if not isinstance(site, str): - site_name = site.decode().strip() - else: - site_name = site.strip() - dtype.append((site_name, self.floattype)) - self.dtype = np.dtype(dtype) - return - - def _build_index(self): - return - - -class HydmodObs(ObsFiles): - """ - HydmodObs Class - used to read binary MODFLOW HYDMOD package output - - Parameters - ---------- - filename : str - Name of the hydmod output file - verbose : boolean - If true, print additional information to to the screen during the - extraction. (default is False) - hydlbl_len : int - Length of hydmod labels. (default is 20) - - Returns - ------- - None - - """ - - def __init__(self, filename, verbose=False, hydlbl_len=20): - """ - Class constructor. - - """ - super(HydmodObs, self).__init__() - # initialize class information - self.verbose = verbose - # --open binary head file - self.file = open(filename, 'rb') - # NHYDTOT,ITMUNI - self.nobs = self.read_integer() - precision = 'single' - if self.nobs < 0: - self.nobs = abs(self.nobs) - precision = 'double' - self.set_float(precision) - - # continue reading the file - self.itmuni = self.read_integer() - self.v = np.empty(self.nobs, dtype=np.float) - self.v.fill(1.0E+32) - ctime = self.read_text(nchar=4) - self.hydlbl_len = int(hydlbl_len) - # read HYDLBL - hydlbl = [] - for idx in range(0, self.nobs): - cid = self.read_text(self.hydlbl_len) - hydlbl.append(cid) - self.hydlbl = np.array(hydlbl) - - # build dtype - self._build_dtype() - - # build index - self._build_index() - - self.data = None - self._read_data() - - def _build_dtype(self): - - # create dtype - dtype = [('totim', self.floattype)] - for site in self.hydlbl: - if not isinstance(site, str): - site_name = site.decode().strip() - else: - site_name = site.strip() - dtype.append((site_name, self.floattype)) - self.dtype = np.dtype(dtype) - return - - def _build_index(self): - return - - -class SwrObs(ObsFiles): - """ - Read binary SWR observations output from MODFLOW SWR Process - observation files - - Parameters - ---------- - filename : string - Name of the cell budget file - precision : string - 'single' or 'double'. Default is 'double'. - verbose : bool - Write information to the screen. Default is False. - - Attributes - ---------- - - Methods - ------- - - See Also - -------- - - Notes - ----- - - Examples - -------- - - >>> import flopy - >>> so = flopy.utils.SwrObs('mymodel.swr.obs') - - """ - - def __init__(self, filename, precision='double', verbose=False): - """ - Class constructor. - - """ - super(SwrObs, self).__init__() - self.set_float(precision=precision) - # initialize class information - self.verbose = verbose - # open binary head file - self.file = open(filename, 'rb') - - # NOBS - self.nobs = self.read_integer() - # read obsnames - obsnames = [] - for idx in range(0, self.nobs): - cid = self.read_text() - if isinstance(cid, bytes): - cid = cid.decode() - obsnames.append(cid.strip()) - self.obs = obsnames - - # read header information - self._build_dtype() - - # build index - self._build_index() - - # read data - self.data = None - self._read_data() - - def _build_dtype(self): - vdata = [('totim', self.floattype)] - for name in self.obs: - vdata.append((str(name), self.floattype)) - self.dtype = np.dtype(vdata) - return - - def _build_index(self): - return - - -def get_selection(data, names): - """ - - Parameters - ---------- - data : numpy recarray - recarray of data to make a selection from - names : string or list of strings - column names to return - - Returns - ------- - out : numpy recarray - recarray with selection - - """ - if not isinstance(names, list): - names = [names] - ierr = 0 - for name in names: - if name not in data.dtype.names: - ierr += 1 - print('Error: {} is not a valid column name'.format(name)) - if ierr > 0: - raise Exception('Error: {} names did not match'.format(ierr)) - - # Valid list of names so make a selection - dtype2 = np.dtype({name: data.dtype.fields[name] for name in names}) - return np.ndarray(data.shape, dtype2, data, 0, data.strides) +import numpy as np + +from ..utils.utils_def import FlopyBinaryData + + +class ObsFiles(FlopyBinaryData): + def __init__(self): + super(ObsFiles, self).__init__() + return + + def get_times(self): + """ + Get a list of unique times in the file + + Returns + ---------- + out : list of floats + List contains unique simulation times (totim) in binary file. + + """ + return self.data['totim'].reshape(self.get_ntimes()).tolist() + + def get_ntimes(self): + """ + Get the number of times in the file + + Returns + ---------- + out : int + The number of simulation times (totim) in binary file. + + """ + return self.data['totim'].shape[0] + + def get_nobs(self): + """ + Get the number of observations in the file + + Returns + ---------- + out : tuple of int + A tupe with the number of records and number of flow items + in the file. The number of flow items is non-zero only if + swrtype='flow'. + + """ + return self.nobs + + def get_obsnames(self): + """ + Get a list of observation names in the file + + Returns + ---------- + out : list of strings + List of observation names in the binary file. totim is not + included in the list of observation names. + + """ + return list(self.data.dtype.names[1:]) + + def get_data(self, idx=None, obsname=None, totim=None): + """ + Get data from the observation file. + + Parameters + ---------- + idx : int + The zero-based record number. The first record is record 0. + If idx is None and totim are None, data for all simulation times + are returned. (default is None) + obsname : string + The name of the observation to return. If obsname is None, all + observation data are returned. (default is None) + totim : float + The simulation time to return. If idx is None and totim are None, + data for all simulation times are returned. (default is None) + + Returns + ---------- + data : numpy record array + Array has size (ntimes, nitems). totim is always returned. nitems + is 2 if idx or obsname is not None or nobs+1. + + See Also + -------- + + Notes + ----- + If both idx and obsname are None, will return all of the observation + data. + + Examples + -------- + >>> hyd = HydmodObs("my_model.hyd") + >>> ts = hyd.get_data() + + """ + i0 = 0 + i1 = self.data.shape[0] + if totim is not None: + idx = np.where(self.data['totim'] == totim)[0][0] + i0 = idx + i1 = idx + 1 + elif idx is not None: + if idx < i1: + i0 = idx + i1 = i0 + 1 + r = None + if obsname is None: + obsname = self.get_obsnames() + else: + if obsname is not None: + if obsname not in self.data.dtype.names: + obsname = None + else: + if not isinstance(obsname, list): + obsname = [obsname] + if obsname is not None: + obsname.insert(0, 'totim') + r = get_selection(self.data, obsname)[i0:i1] + return r + + def get_dataframe(self, start_datetime='1-1-1970', + idx=None, obsname=None, totim=None, timeunit='D'): + """ + Get pandas dataframe with the incremental and cumulative water budget + items in the hydmod file. + + Parameters + ---------- + start_datetime : str + If start_datetime is passed as None, the rows are indexed on totim. + Otherwise, a DatetimeIndex is set. (default is 1-1-1970). + idx : int + The zero-based record number. The first record is record 0. + If idx is None and totim are None, a dataframe with all simulation + times is returned. (default is None) + obsname : string + The name of the observation to return. If obsname is None, all + observation data are returned. (default is None) + totim : float + The simulation time to return. If idx is None and totim are None, + a dataframe with all simulation times is returned. + (default is None) + timeunit : string + time unit of the simulation time. Valid values are 'S'econds, + 'M'inutes, 'H'ours, 'D'ays, 'Y'ears. (default is 'D'). + + Returns + ------- + out : pandas dataframe + Pandas dataframe of selected data. + + See Also + -------- + + Notes + ----- + If both idx and obsname are None, will return all of the observation + data as a dataframe. + + Examples + -------- + >>> hyd = HydmodObs("my_model.hyd") + >>> df = hyd.get_dataframes() + + """ + + try: + import pandas as pd + from ..utils.utils_def import totim_to_datetime + except Exception as e: + msg = "ObsFiles.get_dataframe() error import pandas: " + str(e) + raise ImportError(msg) + + i0 = 0 + i1 = self.data.shape[0] + if totim is not None: + idx = np.where(self.data['totim'] == totim)[0][0] + i0 = idx + i1 = idx + 1 + elif idx is not None: + if idx < i1: + i0 = idx + i1 = i0 + 1 + + if obsname is None: + obsname = self.get_obsnames() + else: + if obsname is not None: + if obsname not in self.data.dtype.names: + obsname = None + else: + if not isinstance(obsname, list): + obsname = [obsname] + if obsname is None: + return None + + obsname.insert(0, 'totim') + + dti = self.get_times()[i0:i1] + if start_datetime is not None: + dti = totim_to_datetime(dti, + start=pd.to_datetime(start_datetime), + timeunit=timeunit) + + df = pd.DataFrame(self.data[i0:i1], index=dti, columns=obsname) + return df + + def _read_data(self): + + if self.data is not None: + return + + while True: + try: + r = self.read_record(count=1) + if self.data is None: + self.data = r.copy() + elif r.size == 0: + break + else: + # should be hstack based on (https://mail.scipy.org/pipermail/numpy-discussion/2010-June/051107.html) + self.data = np.hstack((self.data, r)) + except: + break + return + + def _build_dtype(self): + """ + Build the recordarray and iposarray, which maps the header information + to the position in the formatted file. + """ + raise Exception( + 'Abstract method _build_dtype called in BinaryFiles. This method needs to be overridden.') + + def _build_index(self): + """ + Build the recordarray and iposarray, which maps the header information + to the position in the formatted file. + """ + raise Exception( + 'Abstract method _build_index called in BinaryFiles. This method needs to be overridden.') + + +class Mf6Obs(ObsFiles): + """ + Mf6Obs Class - used to read ascii and binary MODFLOW6 observation output + + Parameters + ---------- + filename : str + Name of the hydmod output file + verbose : boolean + If true, print additional information to to the screen during the + extraction. (default is False) + hydlbl_len : int + Length of hydmod labels. (default is 20) + + Returns + ------- + None + + """ + + def __init__(self, filename, verbose=False, isBinary=True): + """ + Class constructor. + + """ + super(Mf6Obs, self).__init__() + # initialize class information + self.verbose = verbose + if isBinary: + # --open binary head file + self.file = open(filename, 'rb') + + # read control line + cline = self.read_text(nchar=100) + precision = 'single' + if 'double' in cline[5:11].lower(): + precision = 'double' + self.set_float(precision) + lenobsname = int(cline[11:]) + + # get number of observations + self.nobs = self.read_integer() + + # # continue reading the file + # self.v = np.empty(self.nobs, dtype=np.float) + # self.v.fill(1.0E+32) + + # read obsnames + obsnames = [] + for idx in range(0, self.nobs): + cid = self.read_text(lenobsname) + obsnames.append(cid) + self.obsnames = np.array(obsnames) + + # build dtype + self._build_dtype() + + # build index + self._build_index() + + self.data = None + self._read_data() + else: + # --open binary head file + self.file = open(filename, 'r') + + # read header line + line = self.file.readline() + t = line.rstrip().split(',') + self.set_float('double') + + # get number of observations + self.nobs = len(t) - 1 + + # set obsnames + obsnames = [] + for idx in range(1, self.nobs + 1): + obsnames.append(t[idx]) + self.obsnames = np.array(obsnames) + + # build dtype + self._build_dtype() + + # build index + self._build_index() + + # read ascii data + self.data = np.loadtxt(self.file, dtype=self.dtype, delimiter=',', + ndmin=1) + return + + def _build_dtype(self): + + # create dtype + dtype = [('totim', self.floattype)] + for site in self.obsnames: + if not isinstance(site, str): + site_name = site.decode().strip() + else: + site_name = site.strip() + dtype.append((site_name, self.floattype)) + self.dtype = np.dtype(dtype) + return + + def _build_index(self): + return + + +class HydmodObs(ObsFiles): + """ + HydmodObs Class - used to read binary MODFLOW HYDMOD package output + + Parameters + ---------- + filename : str + Name of the hydmod output file + verbose : boolean + If true, print additional information to to the screen during the + extraction. (default is False) + hydlbl_len : int + Length of hydmod labels. (default is 20) + + Returns + ------- + None + + """ + + def __init__(self, filename, verbose=False, hydlbl_len=20): + """ + Class constructor. + + """ + super(HydmodObs, self).__init__() + # initialize class information + self.verbose = verbose + # --open binary head file + self.file = open(filename, 'rb') + # NHYDTOT,ITMUNI + self.nobs = self.read_integer() + precision = 'single' + if self.nobs < 0: + self.nobs = abs(self.nobs) + precision = 'double' + self.set_float(precision) + + # continue reading the file + self.itmuni = self.read_integer() + self.v = np.empty(self.nobs, dtype=np.float) + self.v.fill(1.0E+32) + ctime = self.read_text(nchar=4) + self.hydlbl_len = int(hydlbl_len) + # read HYDLBL + hydlbl = [] + for idx in range(0, self.nobs): + cid = self.read_text(self.hydlbl_len) + hydlbl.append(cid) + self.hydlbl = np.array(hydlbl) + + # build dtype + self._build_dtype() + + # build index + self._build_index() + + self.data = None + self._read_data() + + def _build_dtype(self): + + # create dtype + dtype = [('totim', self.floattype)] + for site in self.hydlbl: + if not isinstance(site, str): + site_name = site.decode().strip() + else: + site_name = site.strip() + dtype.append((site_name, self.floattype)) + self.dtype = np.dtype(dtype) + return + + def _build_index(self): + return + + +class SwrObs(ObsFiles): + """ + Read binary SWR observations output from MODFLOW SWR Process + observation files + + Parameters + ---------- + filename : string + Name of the cell budget file + precision : string + 'single' or 'double'. Default is 'double'. + verbose : bool + Write information to the screen. Default is False. + + Attributes + ---------- + + Methods + ------- + + See Also + -------- + + Notes + ----- + + Examples + -------- + + >>> import flopy + >>> so = flopy.utils.SwrObs('mymodel.swr.obs') + + """ + + def __init__(self, filename, precision='double', verbose=False): + """ + Class constructor. + + """ + super(SwrObs, self).__init__() + self.set_float(precision=precision) + # initialize class information + self.verbose = verbose + # open binary head file + self.file = open(filename, 'rb') + + # NOBS + self.nobs = self.read_integer() + # read obsnames + obsnames = [] + for idx in range(0, self.nobs): + cid = self.read_text() + if isinstance(cid, bytes): + cid = cid.decode() + obsnames.append(cid.strip()) + self.obs = obsnames + + # read header information + self._build_dtype() + + # build index + self._build_index() + + # read data + self.data = None + self._read_data() + + def _build_dtype(self): + vdata = [('totim', self.floattype)] + for name in self.obs: + vdata.append((str(name), self.floattype)) + self.dtype = np.dtype(vdata) + return + + def _build_index(self): + return + + +def get_selection(data, names): + """ + + Parameters + ---------- + data : numpy recarray + recarray of data to make a selection from + names : string or list of strings + column names to return + + Returns + ------- + out : numpy recarray + recarray with selection + + """ + if not isinstance(names, list): + names = [names] + ierr = 0 + for name in names: + if name not in data.dtype.names: + ierr += 1 + print('Error: {} is not a valid column name'.format(name)) + if ierr > 0: + raise Exception('Error: {} names did not match'.format(ierr)) + + # Valid list of names so make a selection + dtype2 = np.dtype({name: data.dtype.fields[name] for name in names}) + return np.ndarray(data.shape, dtype2, data, 0, data.strides) diff --git a/flopy/utils/optionblock.py b/flopy/utils/optionblock.py index 5d0eaca44f..504d1f7c30 100644 --- a/flopy/utils/optionblock.py +++ b/flopy/utils/optionblock.py @@ -1,490 +1,490 @@ -from collections import OrderedDict -import numpy as np -from ..utils import flopy_io - - -class OptionBlock(object): - """ - Parent class to for option blocks within - Modflow-nwt models. This class contains base - information and routines that can be shared throughout - all option block classes. - - Parameters - ---------- - options_line : str - single line based options string - package : flopy.pakbase.Package instance - valid packages include ModflowWel, ModflowSfr2, ModflowUzf1 - block : bool - flag to write as single line or block type - - """ - nested = "nested" - dtype = "dtype" - n_nested = "nvars" - vars = "vars" - optional = "optional" - - simple_flag = OrderedDict([(dtype, np.bool_), - (nested, False), - (optional, False)]) - simple_str = OrderedDict([(dtype, str), - (nested, False), - (optional, False)]) - simple_float = OrderedDict([(dtype, float), - (nested, False), - (optional, False)]) - simple_int = OrderedDict([(dtype, int), - (nested, False), - (optional, False)]) - - simple_tabfile = OrderedDict([(dtype, np.bool_), - (nested, True), - (n_nested, 2), - (vars, OrderedDict([('numtab', simple_int), - ('maxval', - simple_int)]))]) - - def __init__(self, options_line, package, block=True): - self._context = package._options - self._attr_types = {} - self.options_line = options_line - self.package = package - self.auxillary = [] - self.noprint = False - self.block = block - - self.__build_attr_types() - self._set_attributes() - - @property - def single_line_options(self): - """ - Method to get the single line representation of the - Options Block - - Returns - ------- - t : (str) single line representation of Options - """ - t = repr(self).split("\n") - t = t[1:-2] - return " ".join(t) - - def update_from_package(self, pak): - """ - Updater method to check the package and update - OptionBlock attribute values based on package - values. - - Parameters - ---------- - pak : flopy.package - valid packages include ModflowWel, ModflowSfr2, - and ModflowUzf1 instances - """ - for key, ctx in self._context.items(): - if key in pak.__dict__: - val = pak.__dict__[key] - self.__setattr__(key, val) - if ctx[OptionBlock.nested]: - for k2, ctx2 in ctx[OptionBlock.vars].items(): - if k2 in pak.__dict__: - v2 = pak.__dict__[k2] - self.__setattr__(k2, v2) - - def __repr__(self): - """ - Syntactic sugar that creates a dynamic representation - of the OptionsBlock. Makes it very easy to write to file - """ - s = "OPTIONS\n" - for key, ctx in self._context.items(): - try: - val = [] - if ctx[OptionBlock.dtype] == np.bool_: - if not object.__getattribute__(self, key): - continue - else: - val.append(str(key)) - else: - val.append(str(object.__getattribute__(self, key))) - - if ctx[OptionBlock.nested]: - for k, d in ctx[OptionBlock.vars].items(): - if d[OptionBlock.dtype] == np.bool_: - if not object.__getattribute__(self, k): - pass - else: - val.append(str(k)) - else: - v = str(object.__getattribute__(self, k)) - if v == "None" and d[OptionBlock.optional]: - pass - else: - val.append(str((object.__getattribute__(self, - k)))) - - if "None" in val: - pass - else: - s += " ".join(val) - s += "\n" - except: - pass - - s += "END\n" - return s.upper() - - def __setattr__(self, key, value): - """ - Syntactic sugar to allow for dynamic recarray/attribute - interactions and data type enforcement on dynamic attributes - - Parameters - ---------- - key : str - string refering to an attribute - value : object - a python object (int, str, float, bool) that - is consistant with the attribute data type - - """ - err_msg = "Data type must be compatible with {}" - if key in ("_context", "_attr_types", "options_line"): - self.__dict__[key] = value - - elif value is None: - super(OptionBlock, self).__setattr__(key, value) - - elif isinstance(value, np.recarray): - for name in value.dtype.names: - if self._attr_types[name] == np.bool_: - if not isinstance(value, (bool, np.bool_, np.bool)): - raise TypeError(err_msg.format( - self._attr_types[name])) - else: - try: - value = self._attr_types[name](value) - except ValueError: - raise TypeError(err_msg.format( - self._attr_types[name])) - - self.__dict__[name] = value[name][0] - - elif key in self._attr_types: - if self._attr_types[key] == np.bool_: - if not isinstance(value, (bool, np.bool_, np.bool)): - raise TypeError(err_msg.format( - self._attr_types[key])) - else: - try: - value = self._attr_types[key](value) - except ValueError: - raise TypeError(err_msg.format( - self._attr_types[key])) - - self.__dict__[key] = value - - else: - super(OptionBlock, self).__setattr__(key, value) - - def __getattribute__(self, item): - """ - Syntactic sugar that creates recarrays of nested/related items. - Ex. Tabfiles, numtab, and maxval will be returned in a - recarray if the user calls <.tabfiles> - - """ - if item in ("__dict__", "_context", "package"): - value = object.__getattribute__(self, item) - - elif item in object.__getattribute__(self, "_context"): - ctx = object.__getattribute__(self, "_context")[item] - if ctx[OptionBlock.nested]: - vals = [object.__getattribute__(self, item)] - dtypes = [(item, ctx[OptionBlock.dtype])] - for key, d in ctx[OptionBlock.vars].items(): - vals.append(object.__getattribute__(self, key)) - dtypes.append((key, d[OptionBlock.dtype])) - - if not vals[0]: - value = False - elif None in vals: - value = vals[0] - else: - value = np.recarray((1,), dtype=dtypes) - value[0] = tuple(vals) - - else: - value = object.__getattribute__(self, item) - else: - value = object.__getattribute__(self, item) - - return value - - def __build_attr_types(self): - """ - Method to build a type dictionary for type - enforcements in __setattr__. This uses the package's - contex tree to build and enforce attribute - types for the class - - """ - for key, value in self._context.items(): - self._attr_types[key] = value[OptionBlock.dtype] - if OptionBlock.vars in value: - for k, d in value[OptionBlock.vars].items(): - self._attr_types[k] = d[OptionBlock.dtype] - - def _set_attributes(self): - """ - Dynamic attribute creation method. This uses the - package's context tree to build attributes for the class - - """ - # set up all attributes for the class! - for key, ctx in self._context.items(): - if ctx[OptionBlock.dtype] in (np.bool_, bool, np.bool): - self.__setattr__(key, False) - else: - self.__setattr__(key, None) - - if ctx[OptionBlock.nested]: - for k, d in ctx[OptionBlock.vars].items(): - if d[OptionBlock.dtype] in (np.bool_, bool, np.bool): - self.__setattr__(k, False) - else: - self.__setattr__(k, None) - - t = self.options_line.split() - nested = False - ix = 0 - while ix < len(t): - if not nested: - if t[ix] in self._context: - key = t[ix] - ctx = self._context[key] - dtype = ctx[OptionBlock.dtype] - nested = ctx[OptionBlock.nested] - - OptionUtil.isvalid(dtype, t[ix]) - - if dtype == np.bool_: - self.__setattr__(key, True) - else: - self.__setattr__(key, dtype(t[ix])) - - ix += 1 - - else: - err_msg = "Option: {} not a valid option".format(t[ix]) - raise KeyError(err_msg) - - else: - ctx = self._context[t[ix - 1]] - for key, d in ctx[OptionBlock.vars].items(): - dtype = d[OptionBlock.dtype] - - if d[OptionBlock.optional]: - if ix >= len(t): - continue - else: - try: - OptionUtil.isvalid(dtype, t[ix]) - except TypeError: - continue - - OptionUtil.isvalid(dtype, t[ix]) - - if dtype == np.bool_: - self.__setattr__(key, True) - else: - self.__setattr__(key, dtype(t[ix])) - - ix += 1 - - nested = False - - def write_options(self, f): - """ - Method to write the options block or options line to - an open file object. - - Parameters - f : file, str - open file object, or path to file - """ - if isinstance(f, str): - with open(f, "w") as optfile: - if self.block: - optfile.write(repr(self)) - else: - optfile.write(self.single_line_options) - optfile.write("\n") - else: - if self.block: - f.write(repr(self)) - else: - f.write(self.single_line_options) - f.write("\n") - - @staticmethod - def load_options(options, package): - """ - Loader for the options class. Reads in an options - block and uses context from option util dictionaries - to check the validity of the data - - Parameters - ---------- - options: str or file - string path to a file or file object - package : flopy.package type - valid packages include flopy.modflow.ModflowWel, - flopy.modflow.ModflowUzf1, flopy.modflow.ModflowSfr2 - - Returns - ------- - OptionBlock object - - """ - context = package._options - - openfile = not hasattr(options, 'read') - if openfile: - try: - options = open(options, "r") - except IOError: - err_msg = "Unrecognized type for options" \ - " variable: {}".format(type(options)) - raise TypeError(err_msg) - - option_line = "" - while True: - line = flopy_io.line_strip(options.readline()) - line = line.lower() - if not line: - continue - - if line.split()[0] == "options": - pass - - elif line.split()[0] != "end": - t = line.split() - if t[0] in context: - key = t[0] - option_line += key + " " - ctx = context[key] - - if ctx[OptionBlock.nested]: - ix = 1 - - for k, d in ctx[OptionBlock.vars].items(): - if ix >= len(t) and d[OptionBlock.optional]: - continue - - if d[OptionBlock.dtype] == float: - valid = OptionUtil.isfloat(t[ix]) - elif d[OptionBlock.dtype] == int: - valid = OptionUtil.isint(t[ix]) - else: - valid = True - - if not valid: - err_msg = "Invalid type set to variable " \ - "{} in option block".format(k) - raise TypeError(err_msg) - - option_line += t[ix] + " " - ix += 1 - - else: - if openfile: - options.close() - return OptionBlock(options_line=option_line, - package=package) - - -class OptionUtil(object): - - @staticmethod - def isfloat(s): - """ - Simple method to check that a string is a valid - floating point variable - - Parameters - ---------- - s : str - - Returns - ------- - bool - - """ - try: - float(s) - return True - except ValueError: - return False - - @staticmethod - def isint(s): - """ - Simple data check method to check that a string - is a valid integer - - Parameters - ---------- - s : str - - Returns - ------- - bool - - """ - try: - float(s) - return True - except ValueError: - return False - - @staticmethod - def isvalid(dtype, val): - """ - Check to see if a dtype is valid before setting - as an attribute - - Parameters - ---------- - dtype : type - int, float, str, bool, etc... - val : string - - Returns - ------- - bool - - """ - valid = False - if dtype == np.bool_: - valid = True - elif dtype == str: - valid = True - else: - # check if valid - if dtype == int: - valid = OptionUtil.isint(val) - elif dtype == float: - valid = OptionUtil.isfloat(val) - else: - pass - - if not valid: - err_msg = "Invalid type set to variable " \ - "{} in option block".format(val) - raise TypeError(err_msg) - - return valid +from collections import OrderedDict +import numpy as np +from ..utils import flopy_io + + +class OptionBlock(object): + """ + Parent class to for option blocks within + Modflow-nwt models. This class contains base + information and routines that can be shared throughout + all option block classes. + + Parameters + ---------- + options_line : str + single line based options string + package : flopy.pakbase.Package instance + valid packages include ModflowWel, ModflowSfr2, ModflowUzf1 + block : bool + flag to write as single line or block type + + """ + nested = "nested" + dtype = "dtype" + n_nested = "nvars" + vars = "vars" + optional = "optional" + + simple_flag = OrderedDict([(dtype, np.bool_), + (nested, False), + (optional, False)]) + simple_str = OrderedDict([(dtype, str), + (nested, False), + (optional, False)]) + simple_float = OrderedDict([(dtype, float), + (nested, False), + (optional, False)]) + simple_int = OrderedDict([(dtype, int), + (nested, False), + (optional, False)]) + + simple_tabfile = OrderedDict([(dtype, np.bool_), + (nested, True), + (n_nested, 2), + (vars, OrderedDict([('numtab', simple_int), + ('maxval', + simple_int)]))]) + + def __init__(self, options_line, package, block=True): + self._context = package._options + self._attr_types = {} + self.options_line = options_line + self.package = package + self.auxillary = [] + self.noprint = False + self.block = block + + self.__build_attr_types() + self._set_attributes() + + @property + def single_line_options(self): + """ + Method to get the single line representation of the + Options Block + + Returns + ------- + t : (str) single line representation of Options + """ + t = repr(self).split("\n") + t = t[1:-2] + return " ".join(t) + + def update_from_package(self, pak): + """ + Updater method to check the package and update + OptionBlock attribute values based on package + values. + + Parameters + ---------- + pak : flopy.package + valid packages include ModflowWel, ModflowSfr2, + and ModflowUzf1 instances + """ + for key, ctx in self._context.items(): + if key in pak.__dict__: + val = pak.__dict__[key] + self.__setattr__(key, val) + if ctx[OptionBlock.nested]: + for k2, ctx2 in ctx[OptionBlock.vars].items(): + if k2 in pak.__dict__: + v2 = pak.__dict__[k2] + self.__setattr__(k2, v2) + + def __repr__(self): + """ + Syntactic sugar that creates a dynamic representation + of the OptionsBlock. Makes it very easy to write to file + """ + s = "OPTIONS\n" + for key, ctx in self._context.items(): + try: + val = [] + if ctx[OptionBlock.dtype] == np.bool_: + if not object.__getattribute__(self, key): + continue + else: + val.append(str(key)) + else: + val.append(str(object.__getattribute__(self, key))) + + if ctx[OptionBlock.nested]: + for k, d in ctx[OptionBlock.vars].items(): + if d[OptionBlock.dtype] == np.bool_: + if not object.__getattribute__(self, k): + pass + else: + val.append(str(k)) + else: + v = str(object.__getattribute__(self, k)) + if v == "None" and d[OptionBlock.optional]: + pass + else: + val.append(str((object.__getattribute__(self, + k)))) + + if "None" in val: + pass + else: + s += " ".join(val) + s += "\n" + except: + pass + + s += "END\n" + return s.upper() + + def __setattr__(self, key, value): + """ + Syntactic sugar to allow for dynamic recarray/attribute + interactions and data type enforcement on dynamic attributes + + Parameters + ---------- + key : str + string refering to an attribute + value : object + a python object (int, str, float, bool) that + is consistant with the attribute data type + + """ + err_msg = "Data type must be compatible with {}" + if key in ("_context", "_attr_types", "options_line"): + self.__dict__[key] = value + + elif value is None: + super(OptionBlock, self).__setattr__(key, value) + + elif isinstance(value, np.recarray): + for name in value.dtype.names: + if self._attr_types[name] == np.bool_: + if not isinstance(value, (bool, np.bool_, np.bool)): + raise TypeError(err_msg.format( + self._attr_types[name])) + else: + try: + value = self._attr_types[name](value) + except ValueError: + raise TypeError(err_msg.format( + self._attr_types[name])) + + self.__dict__[name] = value[name][0] + + elif key in self._attr_types: + if self._attr_types[key] == np.bool_: + if not isinstance(value, (bool, np.bool_, np.bool)): + raise TypeError(err_msg.format( + self._attr_types[key])) + else: + try: + value = self._attr_types[key](value) + except ValueError: + raise TypeError(err_msg.format( + self._attr_types[key])) + + self.__dict__[key] = value + + else: + super(OptionBlock, self).__setattr__(key, value) + + def __getattribute__(self, item): + """ + Syntactic sugar that creates recarrays of nested/related items. + Ex. Tabfiles, numtab, and maxval will be returned in a + recarray if the user calls <.tabfiles> + + """ + if item in ("__dict__", "_context", "package"): + value = object.__getattribute__(self, item) + + elif item in object.__getattribute__(self, "_context"): + ctx = object.__getattribute__(self, "_context")[item] + if ctx[OptionBlock.nested]: + vals = [object.__getattribute__(self, item)] + dtypes = [(item, ctx[OptionBlock.dtype])] + for key, d in ctx[OptionBlock.vars].items(): + vals.append(object.__getattribute__(self, key)) + dtypes.append((key, d[OptionBlock.dtype])) + + if not vals[0]: + value = False + elif None in vals: + value = vals[0] + else: + value = np.recarray((1,), dtype=dtypes) + value[0] = tuple(vals) + + else: + value = object.__getattribute__(self, item) + else: + value = object.__getattribute__(self, item) + + return value + + def __build_attr_types(self): + """ + Method to build a type dictionary for type + enforcements in __setattr__. This uses the package's + contex tree to build and enforce attribute + types for the class + + """ + for key, value in self._context.items(): + self._attr_types[key] = value[OptionBlock.dtype] + if OptionBlock.vars in value: + for k, d in value[OptionBlock.vars].items(): + self._attr_types[k] = d[OptionBlock.dtype] + + def _set_attributes(self): + """ + Dynamic attribute creation method. This uses the + package's context tree to build attributes for the class + + """ + # set up all attributes for the class! + for key, ctx in self._context.items(): + if ctx[OptionBlock.dtype] in (np.bool_, bool, np.bool): + self.__setattr__(key, False) + else: + self.__setattr__(key, None) + + if ctx[OptionBlock.nested]: + for k, d in ctx[OptionBlock.vars].items(): + if d[OptionBlock.dtype] in (np.bool_, bool, np.bool): + self.__setattr__(k, False) + else: + self.__setattr__(k, None) + + t = self.options_line.split() + nested = False + ix = 0 + while ix < len(t): + if not nested: + if t[ix] in self._context: + key = t[ix] + ctx = self._context[key] + dtype = ctx[OptionBlock.dtype] + nested = ctx[OptionBlock.nested] + + OptionUtil.isvalid(dtype, t[ix]) + + if dtype == np.bool_: + self.__setattr__(key, True) + else: + self.__setattr__(key, dtype(t[ix])) + + ix += 1 + + else: + err_msg = "Option: {} not a valid option".format(t[ix]) + raise KeyError(err_msg) + + else: + ctx = self._context[t[ix - 1]] + for key, d in ctx[OptionBlock.vars].items(): + dtype = d[OptionBlock.dtype] + + if d[OptionBlock.optional]: + if ix >= len(t): + continue + else: + try: + OptionUtil.isvalid(dtype, t[ix]) + except TypeError: + continue + + OptionUtil.isvalid(dtype, t[ix]) + + if dtype == np.bool_: + self.__setattr__(key, True) + else: + self.__setattr__(key, dtype(t[ix])) + + ix += 1 + + nested = False + + def write_options(self, f): + """ + Method to write the options block or options line to + an open file object. + + Parameters + f : file, str + open file object, or path to file + """ + if isinstance(f, str): + with open(f, "w") as optfile: + if self.block: + optfile.write(repr(self)) + else: + optfile.write(self.single_line_options) + optfile.write("\n") + else: + if self.block: + f.write(repr(self)) + else: + f.write(self.single_line_options) + f.write("\n") + + @staticmethod + def load_options(options, package): + """ + Loader for the options class. Reads in an options + block and uses context from option util dictionaries + to check the validity of the data + + Parameters + ---------- + options: str or file + string path to a file or file object + package : flopy.package type + valid packages include flopy.modflow.ModflowWel, + flopy.modflow.ModflowUzf1, flopy.modflow.ModflowSfr2 + + Returns + ------- + OptionBlock object + + """ + context = package._options + + openfile = not hasattr(options, 'read') + if openfile: + try: + options = open(options, "r") + except IOError: + err_msg = "Unrecognized type for options" \ + " variable: {}".format(type(options)) + raise TypeError(err_msg) + + option_line = "" + while True: + line = flopy_io.line_strip(options.readline()) + line = line.lower() + if not line: + continue + + if line.split()[0] == "options": + pass + + elif line.split()[0] != "end": + t = line.split() + if t[0] in context: + key = t[0] + option_line += key + " " + ctx = context[key] + + if ctx[OptionBlock.nested]: + ix = 1 + + for k, d in ctx[OptionBlock.vars].items(): + if ix >= len(t) and d[OptionBlock.optional]: + continue + + if d[OptionBlock.dtype] == float: + valid = OptionUtil.isfloat(t[ix]) + elif d[OptionBlock.dtype] == int: + valid = OptionUtil.isint(t[ix]) + else: + valid = True + + if not valid: + err_msg = "Invalid type set to variable " \ + "{} in option block".format(k) + raise TypeError(err_msg) + + option_line += t[ix] + " " + ix += 1 + + else: + if openfile: + options.close() + return OptionBlock(options_line=option_line, + package=package) + + +class OptionUtil(object): + + @staticmethod + def isfloat(s): + """ + Simple method to check that a string is a valid + floating point variable + + Parameters + ---------- + s : str + + Returns + ------- + bool + + """ + try: + float(s) + return True + except ValueError: + return False + + @staticmethod + def isint(s): + """ + Simple data check method to check that a string + is a valid integer + + Parameters + ---------- + s : str + + Returns + ------- + bool + + """ + try: + float(s) + return True + except ValueError: + return False + + @staticmethod + def isvalid(dtype, val): + """ + Check to see if a dtype is valid before setting + as an attribute + + Parameters + ---------- + dtype : type + int, float, str, bool, etc... + val : string + + Returns + ------- + bool + + """ + valid = False + if dtype == np.bool_: + valid = True + elif dtype == str: + valid = True + else: + # check if valid + if dtype == int: + valid = OptionUtil.isint(val) + elif dtype == float: + valid = OptionUtil.isfloat(val) + else: + pass + + if not valid: + err_msg = "Invalid type set to variable " \ + "{} in option block".format(val) + raise TypeError(err_msg) + + return valid diff --git a/flopy/utils/rasters.py b/flopy/utils/rasters.py index 686b153eac..ee36449543 100644 --- a/flopy/utils/rasters.py +++ b/flopy/utils/rasters.py @@ -1,852 +1,852 @@ -import numpy as np - -try: - import rasterio -except ImportError: - rasterio = None - -try: - import affine -except ImportError: - affine = None - -try: - import scipy -except ImportError: - scipy = None - -try: - import shapely -except ImportError: - shapely = None - -class Raster(object): - """ - The Raster object is used for cropping, sampling raster values, - and re-sampling raster values to grids, and provides methods to - plot rasters and histograms of raster digital numbers for visualization - and analysis purposes. - - Parameters - ---------- - array : np.ndarray - a three dimensional array of raster values with dimensions - defined by (raster band, nrow, ncol) - bands : tuple - a tuple of raster bands - crs : int, string, rasterio.crs.CRS object - either a epsg code, a proj4 string, or a CRS object - transform : affine.Affine object - affine object, which is used to define geometry - nodataval : float - raster no data value - rio_ds : DatasetReader object - rasterIO dataset Reader object - - Notes - ----- - - - Examples - -------- - >>> from flopy.utils import Raster - >>> - >>> rio = Raster.load("myraster.tif") - - """ - FLOAT32 = (float, np.float, np.float32, np.float_) - FLOAT64 = (np.float64,) - INT8 = (np.int8,) - INT16 = (np.int16,) - INT32 = (int, np.int, np.int32, np.int_) - INT64 = (np.int64,) - - def __init__(self, array, bands, crs, transform, - nodataval, driver="GTiff", rio_ds=None): - if rasterio is None: - msg = 'Raster(): error ' + \ - 'importing rasterio - try "pip install rasterio"' - raise ImportError(msg) - else: - from rasterio.crs import CRS - - if affine is None: - msg = 'Raster(): error ' + \ - 'importing affine - try "pip install affine"' - raise ImportError(msg) - - self._array = array - self._bands = bands - - meta = {"driver": driver, - "nodata": nodataval} - - # create metadata dictionary - if array.dtype in Raster.FLOAT32: - dtype = "float32" - elif array.dtype in Raster.FLOAT64: - dtype = "float64" - elif array.dtype in Raster.INT8: - dtype = "int8" - elif array.dtype in Raster.INT16: - dtype = "int16" - elif array.dtype in Raster.INT32: - dtype = "int32" - elif array.dtype in Raster.INT64: - dtype = "int64" - else: - raise TypeError("dtype cannot be determined from Raster") - - meta['dtype'] = dtype - - if isinstance(crs, CRS): - pass - elif isinstance(crs, int): - crs = CRS.from_epsg(crs) - elif isinstance(crs, str): - crs = CRS.from_string(crs) - else: - TypeError("crs type not understood, provide an epsg or proj4") - - meta['crs'] = crs - - count, height, width = array.shape - meta['count'] = count - meta['height'] = height - meta['width'] = width - - if not isinstance(transform, affine.Affine): - raise TypeError("Transform must be defined by an Affine object") - - meta['transform'] = transform - - self._meta = meta - self._dataset = None - self.__arr_dict = {self._bands[b]: arr for - b, arr in enumerate(self._array)} - - self.__xcenters = None - self.__ycenters = None - - if isinstance(rio_ds, rasterio.io.DatasetReader): - self._dataset = rio_ds - - @property - def bounds(self): - """ - Returns a tuple of xmin, xmax, ymin, ymax boundaries - """ - height = self._meta['height'] - width = self._meta['width'] - transform = self._meta['transform'] - xmin = transform[2] - ymax = transform[5] - xmax, ymin = transform * (width, height) - - return xmin, xmax, ymin, ymax - - @property - def bands(self): - """ - Returns a tuple of raster bands - """ - if self._dataset is None: - return tuple(self._bands) - else: - return self._dataset.indexes - - @property - def nodatavals(self): - """ - Returns a Tuple of values used to define no data - """ - if self._dataset is None: - if isinstance(self._meta["nodata"], list): - nodata = tuple(self._meta['nodata']) - elif isinstance(self._meta["nodata"], tuple): - nodata = self._meta["nodata"] - else: - nodata = (self._meta["nodata"],) - return nodata - else: - return self._dataset.nodatavals - - @property - def xcenters(self): - """ - Returns a np.ndarray of raster x cell centers - """ - if self.__xcenters is None: - self.__xycenters() - return self.__xcenters - - @property - def ycenters(self): - """ - Returns a np.ndarray of raster y cell centers - """ - if self.__ycenters is None: - self.__xycenters() - return self.__ycenters - - def __xycenters(self): - """ - Method to create np.arrays of the xy-cell centers - in the raster object - """ - arr = None - for _, arr in self.__arr_dict.items(): - break - - if arr is None: - raise AssertionError("No array data was found") - - ylen, xlen = arr.shape - - # assume that transform is an unrotated plane - # if transform indicates a rotated plane additional - # processing will need to be added in this portion of the code - xd = abs(self._meta["transform"][0]) - yd = abs(self._meta["transform"][4]) - x0, x1, y0, y1 = self.bounds - - # adjust bounds to centroids - x0 += xd / 2. - x1 -= xd / 2. - y0 += yd / 2. - y1 -= yd / 2. - - x = np.linspace(x0, x1, xlen) - y = np.linspace(y1, y0, ylen) - self.__xcenters, self.__ycenters = np.meshgrid(x, y) - - def sample_point(self, x, y, band): - """ - Method to get nearest raster value at a user provided - point - - Parameters - ---------- - x : float - x coordinate - y : float - y coordinate - band : int - raster band to re-sample - - Returns - ------- - value : float - """ - # 1: get grid. - rxc = self.xcenters - ryc = self.ycenters - - # 2: apply distance equation - xt = (rxc - x) ** 2 - yt = (ryc - y) ** 2 - dist = np.sqrt(xt + yt) - - # 3: find indices of minimum distance - md = np.where(dist == np.nanmin(dist)) - - # 4: sample the array and average if necessary - vals = [] - arr = self.get_array(band) - for ix, i in enumerate(md[0]): - j = md[1][ix] - vals.append(arr[i, j]) - - value = np.nanmean(vals) - - return value - - def sample_polygon(self, polygon, band, invert=False): - """ - Method to get an unordered list of raster values that are located - within a arbitrary polygon - - Parameters - ---------- - polygon : (shapely.geometry.Polygon or GeoJSON-like dict) - The values should be a GeoJSON-like dict or object - implements the Python geo interface protocal. - - Alternatively if the user supplies the vectors - of a polygon in the format [(x0, y0), ..., (xn, yn)] - a single shapely polygon will be created for - cropping the data - - band : int - raster band to re-sample - - invert : bool - Default value is False. If invert is True then the - area inside the shapes will be masked out - - Returns - ------- - np.ndarray of unordered raster values - - """ - if band not in self.bands: - err = "Band number is not recognized, use self.bands for a list " \ - "of raster bands" - raise AssertionError(err) - - if self._dataset is not None: - arr_dict = self._sample_rio_dataset(polygon, invert)[0] - - for b, arr in arr_dict.items(): - for val in self.nodatavals: - t = arr[arr != val] - arr_dict[b] = t - - else: - mask = self._intersection(polygon, invert) - - arr_dict = {} - for b, arr in self.__arr_dict.items(): - t = arr[mask] - arr_dict[b] = t - - return arr_dict[band] - - def resample_to_grid(self, xc, yc, band, method="nearest"): - """ - Method to resample the raster data to a - user supplied grid of x, y coordinates. - - x, y coordinate arrays should correspond - to grid vertices - - Parameters - ---------- - xc : np.ndarray or list - an array of x-cell centers - yc : np.ndarray or list - an array of y-cell centers - band : int - raster band to re-sample - method : str - scipy interpolation method options - - "linear" for bi-linear interpolation - "nearest" for nearest neighbor - "cubic" for bi-cubic interpolation - - Returns - ------- - np.array - """ - if scipy is None: - print('Raster().resample_to_grid(): error ' + \ - 'importing scipy - try "pip install scipy"') - else: - from scipy.interpolate import griddata - - data_shape = xc.shape - xc = xc.flatten() - yc = yc.flatten() - # step 1: create grid from raster bounds - rxc = self.xcenters - ryc = self.ycenters - - # step 2: flatten grid - rxc = rxc.flatten() - ryc = ryc.flatten() - - # step 3: get array - if method == "cubic": - arr = self.get_array(band, masked=False) - else: - arr = self.get_array(band, masked=True) - arr = arr.flatten() - - # step 3: use griddata interpolation to snap to grid - data = griddata((rxc, ryc), arr, (xc, yc), method=method) - - # step 4: return grid to user in shape provided - data.shape = data_shape - - # step 5: re-apply nodata values - data[np.isnan(data)] = self.nodatavals[0] - - return data - - def crop(self, polygon, invert=False): - """ - Method to crop a new raster object - from the current raster object - - Parameters - ---------- - polygon : (shapely.geometry.Polygon or GeoJSON-like dict) - The values should be a GeoJSON-like dict or object - implements the Python geo interface protocal. - - Alternatively if the user supplies the vectors - of a polygon in the format [(x0, y0), ..., (xn, yn)] - a single shapely polygon will be created for - cropping the data - - invert : bool - Default value is False. If invert is True then the - area inside the shapes will be masked out - - """ - if self._dataset is not None: - arr_dict, rstr_crp_meta = self._sample_rio_dataset(polygon, invert) - self.__arr_dict = arr_dict - self._meta = rstr_crp_meta - self._dataset = None - self.__xcenters = None - self.__ycenters = None - - else: - # crop from user supplied points using numpy - if rasterio is None: - msg = 'Raster().crop(): error ' + \ - 'importing rasterio try "pip install rasterio"' - raise ImportError(msg) - else: - from rasterio.mask import mask - - if affine is None: - msg = 'Raster(),crop(): error ' + \ - 'importing affine - try "pip install affine"' - raise ImportError(msg) - else: - from affine import Affine - - mask = self._intersection(polygon, invert) - - xc = self.xcenters - yc = self.ycenters - # step 4: find bounding box - xba = np.copy(xc) - yba = np.copy(yc) - xba[~mask] = np.nan - yba[~mask] = np.nan - - xmin = np.nanmin(xba) - xmax = np.nanmax(xba) - ymin = np.nanmin(yba) - ymax = np.nanmax(yba) - - bbox = [(xmin, ymin), - (xmin, ymax), - (xmax, ymax), - (xmax, ymin)] - - # step 5: use bounding box to crop array - xind = [] - yind = [] - for pt in bbox: - xt = (pt[0] - xc) ** 2 - yt = (pt[1] - yc) ** 2 - hypot = np.sqrt(xt + yt) - ind = np.where(hypot == np.min(hypot)) - yind.append(ind[0][0]) - xind.append(ind[1][0]) - - xmii = np.min(xind) - xmai = np.max(xind) - ymii = np.min(yind) - ymai = np.max(yind) - - crp_mask = mask[ymii:ymai + 1, xmii:xmai + 1] - nodata = self._meta["nodata"] - if not isinstance(nodata, float) and not isinstance(nodata, int): - try: - nodata = nodata[0] - except (IndexError, TypeError): - nodata = -1.0e+38 - self._meta["nodata"] = nodata - - arr_dict = {} - for band, arr in self.__arr_dict.items(): - t = arr[ymii:ymai + 1, xmii:xmai + 1] - t[~crp_mask] = nodata - arr_dict[band] = t - - self.__arr_dict = arr_dict - - # adjust xmin, ymax back to appropriate grid locations - xd = abs(self._meta["transform"][0]) - yd = abs(self._meta["transform"][4]) - xmin -= xd / 2. - ymax += yd / 2. - - # step 6: update metadata including a new Affine - self._meta["height"] = crp_mask.shape[0] - self._meta["width"] = crp_mask.shape[1] - transform = self._meta['transform'] - self._meta["transform"] = Affine(transform[0], transform[1], xmin, - transform[3], transform[4], ymax) - self.__xcenters = None - self.__ycenters = None - - def _sample_rio_dataset(self, polygon, invert): - """ - Internal method to sample a rasterIO dataset using - rasterIO built ins - - Parameters - ---------- - polygon : (shapely.geometry.Polygon or GeoJSON-like dict) - The values should be a GeoJSON-like dict or object - implements the Python geo interface protocal. - - Alternatively if the user supplies the vectors - of a polygon in the format [(x0, y0), ..., (xn, yn)] - a single shapely polygon will be created for - cropping the data - - invert : bool - Default value is False. If invert is True then the - area inside the shapes will be masked out - - Returns - ------- - tuple : (arr_dict, raster_crp_meta) - - """ - if rasterio is None: - msg = 'Raster()._sample_rio_dataset(): error ' + \ - 'importing rasterio try "pip install rasterio"' - raise ImportError(msg) - else: - from rasterio.mask import mask - - if shapely is None: - msg = 'Raster()._sample_rio_dataset(): error ' + \ - 'importing shapely - try "pip install shapely"' - raise ImportError(msg) - else: - from shapely import geometry - - - if isinstance(polygon, list) or isinstance(polygon, np.ndarray): - shapes = [geometry.Polygon([[x, y] for x, y in polygon])] - - else: - shapes = [polygon] - - rstr_crp, rstr_crp_affine = mask(self._dataset, - shapes, - crop=True, - invert=invert) - - rstr_crp_meta = self._dataset.meta.copy() - rstr_crp_meta.update({"driver": "GTiff", - "height": rstr_crp.shape[1], - "width": rstr_crp.shape[2], - "transform": rstr_crp_affine}) - - arr_dict = {self.bands[b]: arr for b, arr in enumerate(rstr_crp)} - - return arr_dict, rstr_crp_meta - - def _intersection(self, polygon, invert): - """ - Internal method to create an intersection mask, used for cropping - arrays and sampling arrays. - - Parameters - ---------- - polygon : (shapely.geometry.Polygon or GeoJSON-like dict) - The values should be a GeoJSON-like dict or object - implements the Python geo interface protocal. - - Alternatively if the user supplies the vectors - of a polygon in the format [(x0, y0), ..., (xn, yn)] - a single shapely polygon will be created for - cropping the data - - invert : bool - Default value is False. If invert is True then the - area inside the shapes will be masked out - - Returns - ------- - mask : np.ndarray (dtype = bool) - - """ - if shapely is None: - msg = 'Raster()._intersection(): error ' + \ - 'importing shapely try "pip install shapely"' - raise ImportError(msg) - else: - from shapely import geometry - - # step 1: check the data type in shapes - if isinstance(polygon, geometry.Polygon): - polygon = list(polygon.exterior.coords) - - elif isinstance(polygon, dict): - # geojson, get coordinates= - if polygon['geometry']['type'].lower() == "polygon": - polygon = [[x, y] for x, y in - polygon["geometry"]["coordinates"]] - - else: - raise TypeError("Shape type must be a polygon") - - elif isinstance(polygon, np.ndarray): - # numpy array, change to a list - polygon = list(polygon) - - else: - # this is a list of coordinates - pass - - # step 2: create a grid of centoids - xc = self.xcenters - yc = self.ycenters - - # step 3: do intersection - mask = self._point_in_polygon(xc, yc, polygon) - if invert: - mask = np.invert(mask) - - return mask - - @staticmethod - def _point_in_polygon(xc, yc, polygon): - """ - Use the ray casting algorithm to determine if a point - is within a polygon. Enables very fast - intersection calculations! - - Parameters - ---------- - xc : np.ndarray - array of xpoints - yc : np.ndarray - array of ypoints - polygon : iterable (list) - polygon vertices [(x0, y0),....(xn, yn)] - note: polygon can be open or closed - - Returns - ------- - mask: np.array - True value means point is in polygon! - - """ - x0, y0 = polygon[0] - xt, yt = polygon[-1] - - # close polygon if it isn't already - if (x0, y0) != (xt, yt): - polygon.append((x0, y0)) - - ray_count = np.zeros(xc.shape, dtype=int) - num = len(polygon) - j = num - 1 - for i in range(num): - - tmp = polygon[i][0] + (polygon[j][0] - polygon[i][0]) * \ - (yc - polygon[i][1]) / (polygon[j][1] - polygon[i][1]) - - comp = np.where(((polygon[i][1] > yc) ^ (polygon[j][1] > yc)) - & (xc < tmp)) - - j = i - if len(comp[0]) > 0: - ray_count[comp[0], comp[1]] += 1 - - mask = np.ones(xc.shape, dtype=bool) - mask[ray_count % 2 == 0] = False - - return mask - - def get_array(self, band, masked=True): - """ - Method to get a numpy array corresponding to the - provided raster band. Nodata vals are set to - np.NaN - - Parameters - ---------- - band : int - band number from the raster - masked : bool - determines if nodatavals will be returned as np.nan to - the user - - Returns - ------- - np.ndarray - - """ - if band not in self.bands: - raise ValueError("Band {} not a valid value") - - if self._dataset is None: - array = np.copy(self.__arr_dict[band]) - else: - array = self._dataset.read(band) - - if masked: - for v in self.nodatavals: - array[array == v] = np.nan - - return array - - def write(self, name): - """ - Method to write raster data to a .tif - file - - Parameters - ---------- - name : str - output raster .tif file name - - """ - if rasterio is None: - msg = 'Raster().write(): error ' + \ - 'importing rasterio - try "pip install rasterio"' - raise ImportError(msg) - - if not name.endswith(".tif"): - name += ".tif" - - with rasterio.open(name, "w", **self._meta) as foo: - for band, arr in self.__arr_dict.items(): - foo.write(arr, band) - - @staticmethod - def load(raster): - """ - Static method to load a raster file - into the raster object - - Parameters - ---------- - raster : str - - Returns - ------- - Raster object - - """ - if rasterio is None: - msg = 'Raster().load(): error ' + \ - 'importing rasterio - try "pip install rasterio"' - raise ImportError(msg) - - dataset = rasterio.open(raster) - array = dataset.read() - bands = dataset.indexes - meta = dataset.meta - - return Raster(array, bands, meta["crs"], meta['transform'], - meta['nodata'], meta['driver']) - - def plot(self, ax=None, contour=False, **kwargs): - """ - Method to plot raster layers or contours. - - Parameters - ---------- - ax : matplotlib.pyplot.axes - optional matplotlib axes for plotting - contour : bool - flag to indicate creation of contour plot - - **kwargs : - matplotlib keyword arguments - see matplotlib documentation for valid - arguments for plot and contour. - - Returns - ------- - ax : matplotlib.pyplot.axes - - """ - if rasterio is None: - msg = 'Raster().plot(): error ' + \ - 'importing rasterio - try "pip install rasterio"' - raise ImportError(msg) - else: - from rasterio.plot import show - - if self._dataset is not None: - ax = show(self._dataset, ax=ax, contour=contour, **kwargs) - - else: - d0 = len(self.__arr_dict) - d1, d2 = None, None - for _, arr in self.__arr_dict.items(): - d1, d2 = arr.shape - - if d1 is None: - raise AssertionError("No plottable arrays found") - - data = np.zeros((d0, d1, d2), dtype=float) - i = 0 - for _, arr in sorted(self.__arr_dict.items()): - data[i, :, :] = arr - i += 1 - - data = np.ma.masked_where(data == self.nodatavals, data) - ax = show(data, ax=ax, contour=contour, - transform=self._meta["transform"], - **kwargs) - - return ax - - def histogram(self, ax=None, **kwargs): - """ - Method to plot a histogram of digital numbers - - Parameters - ---------- - ax : matplotlib.pyplot.axes - optional matplotlib axes for plotting - - **kwargs : - matplotlib keyword arguments - see matplotlib documentation for valid - arguments for histogram - - Returns - ------- - ax : matplotlib.pyplot.axes - - """ - if rasterio is None: - msg = 'Raster().histogram(): error ' + \ - 'importing rasterio - try "pip install rasterio"' - raise ImportError(msg) - else: - from rasterio.plot import show_hist - - if "alpha" not in kwargs: - kwargs["alpha"] = 0.3 - - if self._dataset is not None: - ax = show_hist(self._dataset, ax=ax, **kwargs) - - else: - d0 = len(self.__arr_dict) - d1, d2 = None, None - for _, arr in self.__arr_dict.items(): - d1, d2 = arr.shape - - if d1 is None: - raise AssertionError("No plottable arrays found") - - data = np.zeros((d0, d1, d2), dtype=float) - i = 0 - for _, arr in sorted(self.__arr_dict.items()): - data[i, :, :] = arr - i += 1 - - data = np.ma.masked_where(data == self.nodatavals, data) - ax = show_hist(data, ax=ax, **kwargs) - - return ax +import numpy as np + +try: + import rasterio +except ImportError: + rasterio = None + +try: + import affine +except ImportError: + affine = None + +try: + import scipy +except ImportError: + scipy = None + +try: + import shapely +except ImportError: + shapely = None + +class Raster(object): + """ + The Raster object is used for cropping, sampling raster values, + and re-sampling raster values to grids, and provides methods to + plot rasters and histograms of raster digital numbers for visualization + and analysis purposes. + + Parameters + ---------- + array : np.ndarray + a three dimensional array of raster values with dimensions + defined by (raster band, nrow, ncol) + bands : tuple + a tuple of raster bands + crs : int, string, rasterio.crs.CRS object + either a epsg code, a proj4 string, or a CRS object + transform : affine.Affine object + affine object, which is used to define geometry + nodataval : float + raster no data value + rio_ds : DatasetReader object + rasterIO dataset Reader object + + Notes + ----- + + + Examples + -------- + >>> from flopy.utils import Raster + >>> + >>> rio = Raster.load("myraster.tif") + + """ + FLOAT32 = (float, np.float, np.float32, np.float_) + FLOAT64 = (np.float64,) + INT8 = (np.int8,) + INT16 = (np.int16,) + INT32 = (int, np.int, np.int32, np.int_) + INT64 = (np.int64,) + + def __init__(self, array, bands, crs, transform, + nodataval, driver="GTiff", rio_ds=None): + if rasterio is None: + msg = 'Raster(): error ' + \ + 'importing rasterio - try "pip install rasterio"' + raise ImportError(msg) + else: + from rasterio.crs import CRS + + if affine is None: + msg = 'Raster(): error ' + \ + 'importing affine - try "pip install affine"' + raise ImportError(msg) + + self._array = array + self._bands = bands + + meta = {"driver": driver, + "nodata": nodataval} + + # create metadata dictionary + if array.dtype in Raster.FLOAT32: + dtype = "float32" + elif array.dtype in Raster.FLOAT64: + dtype = "float64" + elif array.dtype in Raster.INT8: + dtype = "int8" + elif array.dtype in Raster.INT16: + dtype = "int16" + elif array.dtype in Raster.INT32: + dtype = "int32" + elif array.dtype in Raster.INT64: + dtype = "int64" + else: + raise TypeError("dtype cannot be determined from Raster") + + meta['dtype'] = dtype + + if isinstance(crs, CRS): + pass + elif isinstance(crs, int): + crs = CRS.from_epsg(crs) + elif isinstance(crs, str): + crs = CRS.from_string(crs) + else: + TypeError("crs type not understood, provide an epsg or proj4") + + meta['crs'] = crs + + count, height, width = array.shape + meta['count'] = count + meta['height'] = height + meta['width'] = width + + if not isinstance(transform, affine.Affine): + raise TypeError("Transform must be defined by an Affine object") + + meta['transform'] = transform + + self._meta = meta + self._dataset = None + self.__arr_dict = {self._bands[b]: arr for + b, arr in enumerate(self._array)} + + self.__xcenters = None + self.__ycenters = None + + if isinstance(rio_ds, rasterio.io.DatasetReader): + self._dataset = rio_ds + + @property + def bounds(self): + """ + Returns a tuple of xmin, xmax, ymin, ymax boundaries + """ + height = self._meta['height'] + width = self._meta['width'] + transform = self._meta['transform'] + xmin = transform[2] + ymax = transform[5] + xmax, ymin = transform * (width, height) + + return xmin, xmax, ymin, ymax + + @property + def bands(self): + """ + Returns a tuple of raster bands + """ + if self._dataset is None: + return tuple(self._bands) + else: + return self._dataset.indexes + + @property + def nodatavals(self): + """ + Returns a Tuple of values used to define no data + """ + if self._dataset is None: + if isinstance(self._meta["nodata"], list): + nodata = tuple(self._meta['nodata']) + elif isinstance(self._meta["nodata"], tuple): + nodata = self._meta["nodata"] + else: + nodata = (self._meta["nodata"],) + return nodata + else: + return self._dataset.nodatavals + + @property + def xcenters(self): + """ + Returns a np.ndarray of raster x cell centers + """ + if self.__xcenters is None: + self.__xycenters() + return self.__xcenters + + @property + def ycenters(self): + """ + Returns a np.ndarray of raster y cell centers + """ + if self.__ycenters is None: + self.__xycenters() + return self.__ycenters + + def __xycenters(self): + """ + Method to create np.arrays of the xy-cell centers + in the raster object + """ + arr = None + for _, arr in self.__arr_dict.items(): + break + + if arr is None: + raise AssertionError("No array data was found") + + ylen, xlen = arr.shape + + # assume that transform is an unrotated plane + # if transform indicates a rotated plane additional + # processing will need to be added in this portion of the code + xd = abs(self._meta["transform"][0]) + yd = abs(self._meta["transform"][4]) + x0, x1, y0, y1 = self.bounds + + # adjust bounds to centroids + x0 += xd / 2. + x1 -= xd / 2. + y0 += yd / 2. + y1 -= yd / 2. + + x = np.linspace(x0, x1, xlen) + y = np.linspace(y1, y0, ylen) + self.__xcenters, self.__ycenters = np.meshgrid(x, y) + + def sample_point(self, x, y, band): + """ + Method to get nearest raster value at a user provided + point + + Parameters + ---------- + x : float + x coordinate + y : float + y coordinate + band : int + raster band to re-sample + + Returns + ------- + value : float + """ + # 1: get grid. + rxc = self.xcenters + ryc = self.ycenters + + # 2: apply distance equation + xt = (rxc - x) ** 2 + yt = (ryc - y) ** 2 + dist = np.sqrt(xt + yt) + + # 3: find indices of minimum distance + md = np.where(dist == np.nanmin(dist)) + + # 4: sample the array and average if necessary + vals = [] + arr = self.get_array(band) + for ix, i in enumerate(md[0]): + j = md[1][ix] + vals.append(arr[i, j]) + + value = np.nanmean(vals) + + return value + + def sample_polygon(self, polygon, band, invert=False): + """ + Method to get an unordered list of raster values that are located + within a arbitrary polygon + + Parameters + ---------- + polygon : (shapely.geometry.Polygon or GeoJSON-like dict) + The values should be a GeoJSON-like dict or object + implements the Python geo interface protocal. + + Alternatively if the user supplies the vectors + of a polygon in the format [(x0, y0), ..., (xn, yn)] + a single shapely polygon will be created for + cropping the data + + band : int + raster band to re-sample + + invert : bool + Default value is False. If invert is True then the + area inside the shapes will be masked out + + Returns + ------- + np.ndarray of unordered raster values + + """ + if band not in self.bands: + err = "Band number is not recognized, use self.bands for a list " \ + "of raster bands" + raise AssertionError(err) + + if self._dataset is not None: + arr_dict = self._sample_rio_dataset(polygon, invert)[0] + + for b, arr in arr_dict.items(): + for val in self.nodatavals: + t = arr[arr != val] + arr_dict[b] = t + + else: + mask = self._intersection(polygon, invert) + + arr_dict = {} + for b, arr in self.__arr_dict.items(): + t = arr[mask] + arr_dict[b] = t + + return arr_dict[band] + + def resample_to_grid(self, xc, yc, band, method="nearest"): + """ + Method to resample the raster data to a + user supplied grid of x, y coordinates. + + x, y coordinate arrays should correspond + to grid vertices + + Parameters + ---------- + xc : np.ndarray or list + an array of x-cell centers + yc : np.ndarray or list + an array of y-cell centers + band : int + raster band to re-sample + method : str + scipy interpolation method options + + "linear" for bi-linear interpolation + "nearest" for nearest neighbor + "cubic" for bi-cubic interpolation + + Returns + ------- + np.array + """ + if scipy is None: + print('Raster().resample_to_grid(): error ' + \ + 'importing scipy - try "pip install scipy"') + else: + from scipy.interpolate import griddata + + data_shape = xc.shape + xc = xc.flatten() + yc = yc.flatten() + # step 1: create grid from raster bounds + rxc = self.xcenters + ryc = self.ycenters + + # step 2: flatten grid + rxc = rxc.flatten() + ryc = ryc.flatten() + + # step 3: get array + if method == "cubic": + arr = self.get_array(band, masked=False) + else: + arr = self.get_array(band, masked=True) + arr = arr.flatten() + + # step 3: use griddata interpolation to snap to grid + data = griddata((rxc, ryc), arr, (xc, yc), method=method) + + # step 4: return grid to user in shape provided + data.shape = data_shape + + # step 5: re-apply nodata values + data[np.isnan(data)] = self.nodatavals[0] + + return data + + def crop(self, polygon, invert=False): + """ + Method to crop a new raster object + from the current raster object + + Parameters + ---------- + polygon : (shapely.geometry.Polygon or GeoJSON-like dict) + The values should be a GeoJSON-like dict or object + implements the Python geo interface protocal. + + Alternatively if the user supplies the vectors + of a polygon in the format [(x0, y0), ..., (xn, yn)] + a single shapely polygon will be created for + cropping the data + + invert : bool + Default value is False. If invert is True then the + area inside the shapes will be masked out + + """ + if self._dataset is not None: + arr_dict, rstr_crp_meta = self._sample_rio_dataset(polygon, invert) + self.__arr_dict = arr_dict + self._meta = rstr_crp_meta + self._dataset = None + self.__xcenters = None + self.__ycenters = None + + else: + # crop from user supplied points using numpy + if rasterio is None: + msg = 'Raster().crop(): error ' + \ + 'importing rasterio try "pip install rasterio"' + raise ImportError(msg) + else: + from rasterio.mask import mask + + if affine is None: + msg = 'Raster(),crop(): error ' + \ + 'importing affine - try "pip install affine"' + raise ImportError(msg) + else: + from affine import Affine + + mask = self._intersection(polygon, invert) + + xc = self.xcenters + yc = self.ycenters + # step 4: find bounding box + xba = np.copy(xc) + yba = np.copy(yc) + xba[~mask] = np.nan + yba[~mask] = np.nan + + xmin = np.nanmin(xba) + xmax = np.nanmax(xba) + ymin = np.nanmin(yba) + ymax = np.nanmax(yba) + + bbox = [(xmin, ymin), + (xmin, ymax), + (xmax, ymax), + (xmax, ymin)] + + # step 5: use bounding box to crop array + xind = [] + yind = [] + for pt in bbox: + xt = (pt[0] - xc) ** 2 + yt = (pt[1] - yc) ** 2 + hypot = np.sqrt(xt + yt) + ind = np.where(hypot == np.min(hypot)) + yind.append(ind[0][0]) + xind.append(ind[1][0]) + + xmii = np.min(xind) + xmai = np.max(xind) + ymii = np.min(yind) + ymai = np.max(yind) + + crp_mask = mask[ymii:ymai + 1, xmii:xmai + 1] + nodata = self._meta["nodata"] + if not isinstance(nodata, float) and not isinstance(nodata, int): + try: + nodata = nodata[0] + except (IndexError, TypeError): + nodata = -1.0e+38 + self._meta["nodata"] = nodata + + arr_dict = {} + for band, arr in self.__arr_dict.items(): + t = arr[ymii:ymai + 1, xmii:xmai + 1] + t[~crp_mask] = nodata + arr_dict[band] = t + + self.__arr_dict = arr_dict + + # adjust xmin, ymax back to appropriate grid locations + xd = abs(self._meta["transform"][0]) + yd = abs(self._meta["transform"][4]) + xmin -= xd / 2. + ymax += yd / 2. + + # step 6: update metadata including a new Affine + self._meta["height"] = crp_mask.shape[0] + self._meta["width"] = crp_mask.shape[1] + transform = self._meta['transform'] + self._meta["transform"] = Affine(transform[0], transform[1], xmin, + transform[3], transform[4], ymax) + self.__xcenters = None + self.__ycenters = None + + def _sample_rio_dataset(self, polygon, invert): + """ + Internal method to sample a rasterIO dataset using + rasterIO built ins + + Parameters + ---------- + polygon : (shapely.geometry.Polygon or GeoJSON-like dict) + The values should be a GeoJSON-like dict or object + implements the Python geo interface protocal. + + Alternatively if the user supplies the vectors + of a polygon in the format [(x0, y0), ..., (xn, yn)] + a single shapely polygon will be created for + cropping the data + + invert : bool + Default value is False. If invert is True then the + area inside the shapes will be masked out + + Returns + ------- + tuple : (arr_dict, raster_crp_meta) + + """ + if rasterio is None: + msg = 'Raster()._sample_rio_dataset(): error ' + \ + 'importing rasterio try "pip install rasterio"' + raise ImportError(msg) + else: + from rasterio.mask import mask + + if shapely is None: + msg = 'Raster()._sample_rio_dataset(): error ' + \ + 'importing shapely - try "pip install shapely"' + raise ImportError(msg) + else: + from shapely import geometry + + + if isinstance(polygon, list) or isinstance(polygon, np.ndarray): + shapes = [geometry.Polygon([[x, y] for x, y in polygon])] + + else: + shapes = [polygon] + + rstr_crp, rstr_crp_affine = mask(self._dataset, + shapes, + crop=True, + invert=invert) + + rstr_crp_meta = self._dataset.meta.copy() + rstr_crp_meta.update({"driver": "GTiff", + "height": rstr_crp.shape[1], + "width": rstr_crp.shape[2], + "transform": rstr_crp_affine}) + + arr_dict = {self.bands[b]: arr for b, arr in enumerate(rstr_crp)} + + return arr_dict, rstr_crp_meta + + def _intersection(self, polygon, invert): + """ + Internal method to create an intersection mask, used for cropping + arrays and sampling arrays. + + Parameters + ---------- + polygon : (shapely.geometry.Polygon or GeoJSON-like dict) + The values should be a GeoJSON-like dict or object + implements the Python geo interface protocal. + + Alternatively if the user supplies the vectors + of a polygon in the format [(x0, y0), ..., (xn, yn)] + a single shapely polygon will be created for + cropping the data + + invert : bool + Default value is False. If invert is True then the + area inside the shapes will be masked out + + Returns + ------- + mask : np.ndarray (dtype = bool) + + """ + if shapely is None: + msg = 'Raster()._intersection(): error ' + \ + 'importing shapely try "pip install shapely"' + raise ImportError(msg) + else: + from shapely import geometry + + # step 1: check the data type in shapes + if isinstance(polygon, geometry.Polygon): + polygon = list(polygon.exterior.coords) + + elif isinstance(polygon, dict): + # geojson, get coordinates= + if polygon['geometry']['type'].lower() == "polygon": + polygon = [[x, y] for x, y in + polygon["geometry"]["coordinates"]] + + else: + raise TypeError("Shape type must be a polygon") + + elif isinstance(polygon, np.ndarray): + # numpy array, change to a list + polygon = list(polygon) + + else: + # this is a list of coordinates + pass + + # step 2: create a grid of centoids + xc = self.xcenters + yc = self.ycenters + + # step 3: do intersection + mask = self._point_in_polygon(xc, yc, polygon) + if invert: + mask = np.invert(mask) + + return mask + + @staticmethod + def _point_in_polygon(xc, yc, polygon): + """ + Use the ray casting algorithm to determine if a point + is within a polygon. Enables very fast + intersection calculations! + + Parameters + ---------- + xc : np.ndarray + array of xpoints + yc : np.ndarray + array of ypoints + polygon : iterable (list) + polygon vertices [(x0, y0),....(xn, yn)] + note: polygon can be open or closed + + Returns + ------- + mask: np.array + True value means point is in polygon! + + """ + x0, y0 = polygon[0] + xt, yt = polygon[-1] + + # close polygon if it isn't already + if (x0, y0) != (xt, yt): + polygon.append((x0, y0)) + + ray_count = np.zeros(xc.shape, dtype=int) + num = len(polygon) + j = num - 1 + for i in range(num): + + tmp = polygon[i][0] + (polygon[j][0] - polygon[i][0]) * \ + (yc - polygon[i][1]) / (polygon[j][1] - polygon[i][1]) + + comp = np.where(((polygon[i][1] > yc) ^ (polygon[j][1] > yc)) + & (xc < tmp)) + + j = i + if len(comp[0]) > 0: + ray_count[comp[0], comp[1]] += 1 + + mask = np.ones(xc.shape, dtype=bool) + mask[ray_count % 2 == 0] = False + + return mask + + def get_array(self, band, masked=True): + """ + Method to get a numpy array corresponding to the + provided raster band. Nodata vals are set to + np.NaN + + Parameters + ---------- + band : int + band number from the raster + masked : bool + determines if nodatavals will be returned as np.nan to + the user + + Returns + ------- + np.ndarray + + """ + if band not in self.bands: + raise ValueError("Band {} not a valid value") + + if self._dataset is None: + array = np.copy(self.__arr_dict[band]) + else: + array = self._dataset.read(band) + + if masked: + for v in self.nodatavals: + array[array == v] = np.nan + + return array + + def write(self, name): + """ + Method to write raster data to a .tif + file + + Parameters + ---------- + name : str + output raster .tif file name + + """ + if rasterio is None: + msg = 'Raster().write(): error ' + \ + 'importing rasterio - try "pip install rasterio"' + raise ImportError(msg) + + if not name.endswith(".tif"): + name += ".tif" + + with rasterio.open(name, "w", **self._meta) as foo: + for band, arr in self.__arr_dict.items(): + foo.write(arr, band) + + @staticmethod + def load(raster): + """ + Static method to load a raster file + into the raster object + + Parameters + ---------- + raster : str + + Returns + ------- + Raster object + + """ + if rasterio is None: + msg = 'Raster().load(): error ' + \ + 'importing rasterio - try "pip install rasterio"' + raise ImportError(msg) + + dataset = rasterio.open(raster) + array = dataset.read() + bands = dataset.indexes + meta = dataset.meta + + return Raster(array, bands, meta["crs"], meta['transform'], + meta['nodata'], meta['driver']) + + def plot(self, ax=None, contour=False, **kwargs): + """ + Method to plot raster layers or contours. + + Parameters + ---------- + ax : matplotlib.pyplot.axes + optional matplotlib axes for plotting + contour : bool + flag to indicate creation of contour plot + + **kwargs : + matplotlib keyword arguments + see matplotlib documentation for valid + arguments for plot and contour. + + Returns + ------- + ax : matplotlib.pyplot.axes + + """ + if rasterio is None: + msg = 'Raster().plot(): error ' + \ + 'importing rasterio - try "pip install rasterio"' + raise ImportError(msg) + else: + from rasterio.plot import show + + if self._dataset is not None: + ax = show(self._dataset, ax=ax, contour=contour, **kwargs) + + else: + d0 = len(self.__arr_dict) + d1, d2 = None, None + for _, arr in self.__arr_dict.items(): + d1, d2 = arr.shape + + if d1 is None: + raise AssertionError("No plottable arrays found") + + data = np.zeros((d0, d1, d2), dtype=float) + i = 0 + for _, arr in sorted(self.__arr_dict.items()): + data[i, :, :] = arr + i += 1 + + data = np.ma.masked_where(data == self.nodatavals, data) + ax = show(data, ax=ax, contour=contour, + transform=self._meta["transform"], + **kwargs) + + return ax + + def histogram(self, ax=None, **kwargs): + """ + Method to plot a histogram of digital numbers + + Parameters + ---------- + ax : matplotlib.pyplot.axes + optional matplotlib axes for plotting + + **kwargs : + matplotlib keyword arguments + see matplotlib documentation for valid + arguments for histogram + + Returns + ------- + ax : matplotlib.pyplot.axes + + """ + if rasterio is None: + msg = 'Raster().histogram(): error ' + \ + 'importing rasterio - try "pip install rasterio"' + raise ImportError(msg) + else: + from rasterio.plot import show_hist + + if "alpha" not in kwargs: + kwargs["alpha"] = 0.3 + + if self._dataset is not None: + ax = show_hist(self._dataset, ax=ax, **kwargs) + + else: + d0 = len(self.__arr_dict) + d1, d2 = None, None + for _, arr in self.__arr_dict.items(): + d1, d2 = arr.shape + + if d1 is None: + raise AssertionError("No plottable arrays found") + + data = np.zeros((d0, d1, d2), dtype=float) + i = 0 + for _, arr in sorted(self.__arr_dict.items()): + data[i, :, :] = arr + i += 1 + + data = np.ma.masked_where(data == self.nodatavals, data) + ax = show_hist(data, ax=ax, **kwargs) + + return ax diff --git a/flopy/utils/reference.py b/flopy/utils/reference.py index 5fb2a965c3..b582614e5c 100755 --- a/flopy/utils/reference.py +++ b/flopy/utils/reference.py @@ -1,2215 +1,2215 @@ -""" -Module spatial referencing for flopy model objects - -""" -import json -import numpy as np -import os -import warnings - -from collections import OrderedDict - -# web address of spatial reference dot org -srefhttp = 'https://spatialreference.org' - - -class SpatialReference(object): - """ - a class to locate a structured model grid in x-y space - - Parameters - ---------- - - delr : numpy ndarray - the model discretization delr vector - (An array of spacings along a row) - delc : numpy ndarray - the model discretization delc vector - (An array of spacings along a column) - lenuni : int - the length units flag from the discretization package - (default 2) - xul : float - the x coordinate of the upper left corner of the grid - Enter either xul and yul or xll and yll. - yul : float - the y coordinate of the upper left corner of the grid - Enter either xul and yul or xll and yll. - xll : float - the x coordinate of the lower left corner of the grid - Enter either xul and yul or xll and yll. - yll : float - the y coordinate of the lower left corner of the grid - Enter either xul and yul or xll and yll. - rotation : float - the counter-clockwise rotation (in degrees) of the grid - - proj4_str: str - a PROJ4 string that identifies the grid in space. warning: case - sensitive! - - units : string - Units for the grid. Must be either feet or meters - - epsg : int - EPSG code that identifies the grid in space. Can be used in lieu of - proj4. PROJ4 attribute will auto-populate if there is an internet - connection(via get_proj4 method). - See https://www.epsg-registry.org/ or spatialreference.org - - length_multiplier : float - multiplier to convert model units to spatial reference units. - delr and delc above will be multiplied by this value. (default=1.) - - Attributes - ---------- - xedge : ndarray - array of column edges - - yedge : ndarray - array of row edges - - xgrid : ndarray - numpy meshgrid of xedges - - ygrid : ndarray - numpy meshgrid of yedges - - xcenter : ndarray - array of column centers - - ycenter : ndarray - array of row centers - - xcentergrid : ndarray - numpy meshgrid of column centers - - ycentergrid : ndarray - numpy meshgrid of row centers - - vertices : 1D array - 1D array of cell vertices for whole grid in C-style (row-major) order - (same as np.ravel()) - - - Notes - ----- - - xul and yul can be explicitly (re)set after SpatialReference - instantiation, but only before any of the other attributes and methods are - accessed - - """ - - xul, yul = None, None - xll, yll = None, None - rotation = 0. - length_multiplier = 1. - origin_loc = 'ul' # or ll - - defaults = {"xul": None, "yul": None, "rotation": 0., - "proj4_str": None, - "units": None, "lenuni": 2, - "length_multiplier": None, - "source": 'defaults'} - - lenuni_values = {'undefined': 0, - 'feet': 1, - 'meters': 2, - 'centimeters': 3} - lenuni_text = {v: k for k, v in lenuni_values.items()} - - def __init__(self, delr=np.array([]), delc=np.array([]), lenuni=2, - xul=None, yul=None, xll=None, yll=None, rotation=0.0, - proj4_str=None, epsg=None, prj=None, units=None, - length_multiplier=None): - warnings.warn( - "SpatialReference has been deprecated. Use StructuredGrid" - " instead.", - category=DeprecationWarning) - - for delrc in [delr, delc]: - if isinstance(delrc, float) or isinstance(delrc, int): - msg = ('delr and delcs must be an array or sequences equal in ' - 'length to the number of rows/columns.') - raise TypeError(msg) - - self.delc = np.atleast_1d(np.array(delc)).astype( - np.float64) # * length_multiplier - self.delr = np.atleast_1d(np.array(delr)).astype( - np.float64) # * length_multiplier - - if self.delr.sum() == 0 or self.delc.sum() == 0: - if xll is None or yll is None: - msg = ('Warning: no grid spacing or lower-left corner ' - 'supplied. Setting the offset with xul, yul requires ' - 'arguments for delr and delc. Origin will be set to ' - 'zero.') - print(msg) - xll, yll = 0, 0 - xul, yul = None, None - - self._lenuni = lenuni - self._proj4_str = proj4_str - - self._epsg = epsg - if epsg is not None: - self._proj4_str = getproj4(self._epsg) - self.prj = prj - self._wkt = None - self.crs = crs(prj=prj, epsg=epsg) - - self.supported_units = ["feet", "meters"] - self._units = units - self._length_multiplier = length_multiplier - self._reset() - self.set_spatialreference(xul, yul, xll, yll, rotation) - - @property - def xll(self): - if self.origin_loc == 'll': - xll = self._xll if self._xll is not None else 0. - elif self.origin_loc == 'ul': - # calculate coords for lower left corner - xll = self._xul - (np.sin(self.theta) * self.yedge[0] * - self.length_multiplier) - return xll - - @property - def yll(self): - if self.origin_loc == 'll': - yll = self._yll if self._yll is not None else 0. - elif self.origin_loc == 'ul': - # calculate coords for lower left corner - yll = self._yul - (np.cos(self.theta) * self.yedge[0] * - self.length_multiplier) - return yll - - @property - def xul(self): - if self.origin_loc == 'll': - # calculate coords for upper left corner - xul = self._xll + (np.sin(self.theta) * self.yedge[0] * - self.length_multiplier) - if self.origin_loc == 'ul': - # calculate coords for lower left corner - xul = self._xul if self._xul is not None else 0. - return xul - - @property - def yul(self): - if self.origin_loc == 'll': - # calculate coords for upper left corner - yul = self._yll + (np.cos(self.theta) * self.yedge[0] * - self.length_multiplier) - - if self.origin_loc == 'ul': - # calculate coords for lower left corner - yul = self._yul if self._yul is not None else 0. - return yul - - @property - def proj4_str(self): - proj4_str = None - if self._proj4_str is not None: - if "epsg" in self._proj4_str.lower(): - proj4_str = self._proj4_str - # set the epsg if proj4 specifies it - tmp = [i for i in self._proj4_str.split() if - 'epsg' in i.lower()] - self._epsg = int(tmp[0].split(':')[1]) - else: - proj4_str = self._proj4_str - elif self.epsg is not None: - proj4_str = 'epsg:{}'.format(self.epsg) - return proj4_str - - @property - def epsg(self): - # don't reset the proj4 string here - # because proj4 attribute may already be populated - # (with more details than getproj4 would return) - # instead reset proj4 when epsg is set - # (on init or setattr) - return self._epsg - - @property - def wkt(self): - if self._wkt is None: - if self.prj is not None: - with open(self.prj) as src: - wkt = src.read() - elif self.epsg is not None: - wkt = getprj(self.epsg) - else: - return None - return wkt - else: - return self._wkt - - @property - def lenuni(self): - return self._lenuni - - def _parse_units_from_proj4(self): - units = None - try: - # need this because preserve_units doesn't seem to be - # working for complex proj4 strings. So if an - # epsg code was passed, we have no choice, but if a - # proj4 string was passed, we can just parse it - if "EPSG" in self.proj4_str.upper(): - import pyproj - - crs = pyproj.Proj(self.proj4_str, preserve_units=True) - proj_str = crs.srs - else: - proj_str = self.proj4_str - # http://proj4.org/parameters.html#units - # from proj4 source code - # "us-ft", "0.304800609601219", "U.S. Surveyor's Foot", - # "ft", "0.3048", "International Foot", - if "units=m" in proj_str: - units = "meters" - elif "units=ft" in proj_str or \ - "units=us-ft" in proj_str or \ - "to_meters:0.3048" in proj_str: - units = "feet" - return units - except: - if self.proj4_str is not None: - print(' could not parse units from {}'.format( - self.proj4_str)) - - @property - def units(self): - if self._units is not None: - units = self._units.lower() - else: - units = self._parse_units_from_proj4() - if units is None: - # print("warning: assuming SpatialReference units are meters") - units = 'meters' - assert units in self.supported_units - return units - - @property - def length_multiplier(self): - """ - Attempt to identify multiplier for converting from - model units to sr units, defaulting to 1. - """ - lm = None - if self._length_multiplier is not None: - lm = self._length_multiplier - else: - if self.model_length_units == 'feet': - if self.units == 'meters': - lm = 0.3048 - elif self.units == 'feet': - lm = 1. - elif self.model_length_units == 'meters': - if self.units == 'feet': - lm = 1 / .3048 - elif self.units == 'meters': - lm = 1. - elif self.model_length_units == 'centimeters': - if self.units == 'meters': - lm = 1 / 100. - elif self.units == 'feet': - lm = 1 / 30.48 - else: # model units unspecified; default to 1 - lm = 1. - return lm - - @property - def model_length_units(self): - return self.lenuni_text[self.lenuni] - - @property - def bounds(self): - """ - Return bounding box in shapely order. - """ - xmin, xmax, ymin, ymax = self.get_extent() - return xmin, ymin, xmax, ymax - - @staticmethod - def load(namefile=None, reffile='usgs.model.reference'): - """ - Attempts to load spatial reference information from - the following files (in order): - 1) usgs.model.reference - 2) NAM file (header comment) - 3) SpatialReference.default dictionary - """ - reffile = os.path.join(os.path.split(namefile)[0], reffile) - d = SpatialReference.read_usgs_model_reference_file(reffile) - if d is not None: - return d - d = SpatialReference.attribs_from_namfile_header(namefile) - if d is not None: - return d - else: - return SpatialReference.defaults - - @staticmethod - def attribs_from_namfile_header(namefile): - # check for reference info in the nam file header - d = SpatialReference.defaults.copy() - d['source'] = 'namfile' - if namefile is None: - return None - header = [] - with open(namefile, 'r') as f: - for line in f: - if not line.startswith('#'): - break - header.extend(line.strip().replace('#', '').split(';')) - - for item in header: - if "xul" in item.lower(): - try: - d['xul'] = float(item.split(':')[1]) - except: - print(' could not parse xul ' + - 'in {}'.format(namefile)) - elif "yul" in item.lower(): - try: - d['yul'] = float(item.split(':')[1]) - except: - print(' could not parse yul ' + - 'in {}'.format(namefile)) - elif "rotation" in item.lower(): - try: - d['rotation'] = float(item.split(':')[1]) - except: - print(' could not parse rotation ' + - 'in {}'.format(namefile)) - elif "proj4_str" in item.lower(): - try: - proj4_str = ':'.join(item.split(':')[1:]).strip() - if proj4_str.lower() == 'none': - proj4_str = None - d['proj4_str'] = proj4_str - except: - print(' could not parse proj4_str ' + - 'in {}'.format(namefile)) - elif "start" in item.lower(): - try: - d['start_datetime'] = item.split(':')[1].strip() - except: - print(' could not parse start ' + - 'in {}'.format(namefile)) - - # spatial reference length units - elif "units" in item.lower(): - d['units'] = item.split(':')[1].strip() - # model length units - elif "lenuni" in item.lower(): - d['lenuni'] = int(item.split(':')[1].strip()) - # multiplier for converting from model length units to sr length units - elif "length_multiplier" in item.lower(): - d['length_multiplier'] = float(item.split(':')[1].strip()) - return d - - @staticmethod - def read_usgs_model_reference_file(reffile='usgs.model.reference'): - """ - read spatial reference info from the usgs.model.reference file - https://water.usgs.gov/ogw/policy/gw-model/modelers-setup.html - """ - - ITMUNI = {0: "undefined", 1: "seconds", 2: "minutes", 3: "hours", - 4: "days", - 5: "years"} - itmuni_values = {v: k for k, v in ITMUNI.items()} - - d = SpatialReference.defaults.copy() - d['source'] = 'usgs.model.reference' - # discard default to avoid confusion with epsg code if entered - d.pop('proj4_str') - if os.path.exists(reffile): - with open(reffile) as fref: - for line in fref: - if len(line) > 1: - if line.strip()[0] != '#': - info = line.strip().split('#')[0].split() - if len(info) > 1: - d[info[0].lower()] = ' '.join(info[1:]) - d['xul'] = float(d['xul']) - d['yul'] = float(d['yul']) - d['rotation'] = float(d['rotation']) - - # convert the model.reference text to a lenuni value - # (these are the model length units) - if 'length_units' in d.keys(): - d['lenuni'] = SpatialReference.lenuni_values[d['length_units']] - if 'time_units' in d.keys(): - d['itmuni'] = itmuni_values[d['time_units']] - if 'start_date' in d.keys(): - start_datetime = d.pop('start_date') - if 'start_time' in d.keys(): - start_datetime += ' {}'.format(d.pop('start_time')) - d['start_datetime'] = start_datetime - if 'epsg' in d.keys(): - try: - d['epsg'] = int(d['epsg']) - except Exception as e: - raise Exception( - "error reading epsg code from file:\n" + str(e)) - # this prioritizes epsg over proj4 if both are given - # (otherwise 'proj4' entry will be dropped below) - elif 'proj4' in d.keys(): - d['proj4_str'] = d['proj4'] - - # drop any other items that aren't used in sr class - d = {k: v for k, v in d.items() if - k.lower() in SpatialReference.defaults.keys() - or k.lower() in {'epsg', 'start_datetime', 'itmuni', - 'source'}} - return d - else: - return None - - def __setattr__(self, key, value): - reset = True - if key == "delr": - super(SpatialReference, self). \ - __setattr__("delr", np.atleast_1d(np.array(value))) - elif key == "delc": - super(SpatialReference, self). \ - __setattr__("delc", np.atleast_1d(np.array(value))) - elif key == "xul": - super(SpatialReference, self). \ - __setattr__("_xul", float(value)) - self.origin_loc = 'ul' - elif key == "yul": - super(SpatialReference, self). \ - __setattr__("_yul", float(value)) - self.origin_loc = 'ul' - elif key == "xll": - super(SpatialReference, self). \ - __setattr__("_xll", float(value)) - self.origin_loc = 'll' - elif key == "yll": - super(SpatialReference, self). \ - __setattr__("_yll", float(value)) - self.origin_loc = 'll' - elif key == "length_multiplier": - super(SpatialReference, self). \ - __setattr__("_length_multiplier", float(value)) - # self.set_origin(xul=self.xul, yul=self.yul, xll=self.xll, - # yll=self.yll) - elif key == "rotation": - super(SpatialReference, self). \ - __setattr__("rotation", float(value)) - # self.set_origin(xul=self.xul, yul=self.yul, xll=self.xll, - # yll=self.yll) - elif key == "lenuni": - super(SpatialReference, self). \ - __setattr__("_lenuni", int(value)) - # self.set_origin(xul=self.xul, yul=self.yul, xll=self.xll, - # yll=self.yll) - elif key == "units": - value = value.lower() - assert value in self.supported_units - super(SpatialReference, self). \ - __setattr__("_units", value) - elif key == "proj4_str": - super(SpatialReference, self). \ - __setattr__("_proj4_str", value) - # reset the units and epsg - units = self._parse_units_from_proj4() - if units is not None: - self._units = units - self._epsg = None - elif key == "epsg": - super(SpatialReference, self). \ - __setattr__("_epsg", value) - # reset the units and proj4 - self._units = None - self._proj4_str = getproj4(self._epsg) - self.crs = crs(epsg=value) - elif key == "prj": - super(SpatialReference, self). \ - __setattr__("prj", value) - # translation to proj4 strings in crs class not robust yet - # leave units and proj4 alone for now. - self.crs = crs(prj=value, epsg=self.epsg) - else: - super(SpatialReference, self).__setattr__(key, value) - reset = False - if reset: - self._reset() - - def reset(self, **kwargs): - for key, value in kwargs.items(): - setattr(self, key, value) - return - - def _reset(self): - self._xgrid = None - self._ygrid = None - self._ycentergrid = None - self._xcentergrid = None - self._vertices = None - return - - @property - def nrow(self): - return self.delc.shape[0] - - @property - def ncol(self): - return self.delr.shape[0] - - def __eq__(self, other): - if not isinstance(other, SpatialReference): - return False - if other.xul != self.xul: - return False - if other.yul != self.yul: - return False - if other.rotation != self.rotation: - return False - if other.proj4_str != self.proj4_str: - return False - return True - - @classmethod - def from_namfile(cls, namefile): - attribs = SpatialReference.attribs_from_namfile_header(namefile) - try: - attribs.pop("start_datetime") - except: - print(' could not remove start_datetime') - - return SpatialReference(**attribs) - - @classmethod - def from_gridspec(cls, gridspec_file, lenuni=0): - f = open(gridspec_file, 'r') - raw = f.readline().strip().split() - nrow = int(raw[0]) - ncol = int(raw[1]) - raw = f.readline().strip().split() - xul, yul, rot = float(raw[0]), float(raw[1]), float(raw[2]) - delr = [] - j = 0 - while j < ncol: - raw = f.readline().strip().split() - for r in raw: - if '*' in r: - rraw = r.split('*') - for n in range(int(rraw[0])): - delr.append(float(rraw[1])) - j += 1 - else: - delr.append(float(r)) - j += 1 - delc = [] - i = 0 - while i < nrow: - raw = f.readline().strip().split() - for r in raw: - if '*' in r: - rraw = r.split('*') - for n in range(int(rraw[0])): - delc.append(float(rraw[1])) - i += 1 - else: - delc.append(float(r)) - i += 1 - f.close() - return cls(np.array(delr), np.array(delc), - lenuni, xul=xul, yul=yul, rotation=rot) - - @property - def attribute_dict(self): - return {"xul": self.xul, "yul": self.yul, "rotation": self.rotation, - "proj4_str": self.proj4_str} - - def set_spatialreference(self, xul=None, yul=None, xll=None, yll=None, - rotation=0.0): - """ - set spatial reference - can be called from model instance - - """ - if xul is not None and xll is not None: - msg = ('Both xul and xll entered. Please enter either xul, yul or ' - 'xll, yll.') - raise ValueError(msg) - if yul is not None and yll is not None: - msg = ('Both yul and yll entered. Please enter either xul, yul or ' - 'xll, yll.') - raise ValueError(msg) - # set the origin priority based on the left corner specified - # (the other left corner will be calculated). If none are specified - # then default to upper left - if xul is None and yul is None and xll is None and yll is None: - self.origin_loc = 'ul' - xul = 0. - yul = self.delc.sum() - elif xll is not None: - self.origin_loc = 'll' - else: - self.origin_loc = 'ul' - - self.rotation = rotation - self._xll = xll if xll is not None else 0. - self._yll = yll if yll is not None else 0. - self._xul = xul if xul is not None else 0. - self._yul = yul if yul is not None else 0. - # self.set_origin(xul, yul, xll, yll) - return - - def __repr__(self): - s = "xul:{0:<.10G}; yul:{1:<.10G}; rotation:{2: '.format(interval) + \ - 'maxlevels = {}'.format(maxlevels) - assert nlevels < maxlevels, msg - levels = np.arange(vmin, vmax, interval) - fig, ax = plt.subplots() - ctr = self.contour_array(ax, a, levels=levels) - self.export_contours(filename, ctr, fieldname, epsg, prj, **kwargs) - plt.close() - - def contour_array(self, ax, a, **kwargs): - """ - Create a QuadMesh plot of the specified array using pcolormesh - - Parameters - ---------- - ax : matplotlib.axes.Axes - ax to add the contours - - a : np.ndarray - array to contour - - Returns - ------- - contour_set : ContourSet - - """ - from flopy.plot import ModelMap - - kwargs['ax'] = ax - mm = ModelMap(sr=self) - contour_set = mm.contour_array(a=a, **kwargs) - - return contour_set - - @property - def vertices(self): - """ - Returns a list of vertices for - """ - if self._vertices is None: - self._set_vertices() - return self._vertices - - def _set_vertices(self): - """ - Populate vertices for the whole grid - """ - jj, ii = np.meshgrid(range(self.ncol), range(self.nrow)) - jj, ii = jj.ravel(), ii.ravel() - self._vertices = self.get_vertices(ii, jj) - - def interpolate(self, a, xi, method='nearest'): - """ - Use the griddata method to interpolate values from an array onto the - points defined in xi. For any values outside of the grid, use - 'nearest' to find a value for them. - - Parameters - ---------- - a : numpy.ndarray - array to interpolate from. It must be of size nrow, ncol - xi : numpy.ndarray - array containing x and y point coordinates of size (npts, 2). xi - also works with broadcasting so that if a is a 2d array, then - xi can be passed in as (xgrid, ygrid). - method : {'linear', 'nearest', 'cubic'} - method to use for interpolation (default is 'nearest') - - Returns - ------- - b : numpy.ndarray - array of size (npts) - - """ - try: - from scipy.interpolate import griddata - except: - print('scipy not installed\ntry pip install scipy') - return None - - # Create a 2d array of points for the grid centers - points = np.empty((self.ncol * self.nrow, 2)) - points[:, 0] = self.xcentergrid.flatten() - points[:, 1] = self.ycentergrid.flatten() - - # Use the griddata function to interpolate to the xi points - b = griddata(points, a.flatten(), xi, method=method, fill_value=np.nan) - - # if method is linear or cubic, then replace nan's with a value - # interpolated using nearest - if method != 'nearest': - bn = griddata(points, a.flatten(), xi, method='nearest') - idx = np.isnan(b) - b[idx] = bn[idx] - - return b - - def get_2d_vertex_connectivity(self): - """ - Create the cell 2d vertices array and the iverts index array. These - are the same form as the ones used to instantiate an unstructured - spatial reference. - - Returns - ------- - - verts : ndarray - array of x and y coordinates for the grid vertices - - iverts : list - a list with a list of vertex indices for each cell in clockwise - order starting with the upper left corner - - """ - x = self.xgrid.flatten() - y = self.ygrid.flatten() - nrowvert = self.nrow + 1 - ncolvert = self.ncol + 1 - npoints = nrowvert * ncolvert - verts = np.empty((npoints, 2), dtype=np.float) - verts[:, 0] = x - verts[:, 1] = y - iverts = [] - for i in range(self.nrow): - for j in range(self.ncol): - iv1 = i * ncolvert + j # upper left point number - iv2 = iv1 + 1 - iv4 = (i + 1) * ncolvert + j - iv3 = iv4 + 1 - iverts.append([iv1, iv2, iv3, iv4]) - return verts, iverts - - def get_3d_shared_vertex_connectivity(self, nlay, botm, ibound=None): - - # get the x and y points for the grid - x = self.xgrid.flatten() - y = self.ygrid.flatten() - - # set the size of the vertex grid - nrowvert = self.nrow + 1 - ncolvert = self.ncol + 1 - nlayvert = nlay + 1 - nrvncv = nrowvert * ncolvert - npoints = nrvncv * nlayvert - - # create and fill a 3d points array for the grid - verts = np.empty((npoints, 3), dtype=np.float) - verts[:, 0] = np.tile(x, nlayvert) - verts[:, 1] = np.tile(y, nlayvert) - istart = 0 - istop = nrvncv - for k in range(nlay + 1): - verts[istart:istop, 2] = self.interpolate(botm[k], - verts[istart:istop, :2], - method='linear') - istart = istop - istop = istart + nrvncv - - # create the list of points comprising each cell. points must be - # listed a specific way according to vtk requirements. - iverts = [] - for k in range(nlay): - koffset = k * nrvncv - for i in range(self.nrow): - for j in range(self.ncol): - if ibound is not None: - if ibound[k, i, j] == 0: - continue - iv1 = i * ncolvert + j + koffset - iv2 = iv1 + 1 - iv4 = (i + 1) * ncolvert + j + koffset - iv3 = iv4 + 1 - iverts.append([iv4 + nrvncv, iv3 + nrvncv, - iv1 + nrvncv, iv2 + nrvncv, - iv4, iv3, iv1, iv2]) - - # renumber and reduce the vertices if ibound_filter - if ibound is not None: - - # go through the vertex list and mark vertices that are used - ivertrenum = np.zeros(npoints, dtype=np.int) - for vlist in iverts: - for iv in vlist: - # mark vertices that are actually used - ivertrenum[iv] = 1 - - # renumber vertices that are used, skip those that are not - inum = 0 - for i in range(npoints): - if ivertrenum[i] > 0: - inum += 1 - ivertrenum[i] = inum - ivertrenum -= 1 - - # reassign the vertex list using the new vertex numbers - iverts2 = [] - for vlist in iverts: - vlist2 = [] - for iv in vlist: - vlist2.append(ivertrenum[iv]) - iverts2.append(vlist2) - iverts = iverts2 - idx = np.where(ivertrenum >= 0) - verts = verts[idx] - - return verts, iverts - - def get_3d_vertex_connectivity(self, nlay, top, bot, ibound=None): - if ibound is None: - ncells = nlay * self.nrow * self.ncol - ibound = np.ones((nlay, self.nrow, self.ncol), dtype=np.int) - else: - ncells = (ibound != 0).sum() - npoints = ncells * 8 - verts = np.empty((npoints, 3), dtype=np.float) - iverts = [] - ipoint = 0 - for k in range(nlay): - for i in range(self.nrow): - for j in range(self.ncol): - if ibound[k, i, j] == 0: - continue - - ivert = [] - pts = self.get_vertices(i, j) - pt0, pt1, pt2, pt3, pt0 = pts - - z = bot[k, i, j] - - verts[ipoint, 0:2] = np.array(pt1) - verts[ipoint, 2] = z - ivert.append(ipoint) - ipoint += 1 - - verts[ipoint, 0:2] = np.array(pt2) - verts[ipoint, 2] = z - ivert.append(ipoint) - ipoint += 1 - - verts[ipoint, 0:2] = np.array(pt0) - verts[ipoint, 2] = z - ivert.append(ipoint) - ipoint += 1 - - verts[ipoint, 0:2] = np.array(pt3) - verts[ipoint, 2] = z - ivert.append(ipoint) - ipoint += 1 - - z = top[k, i, j] - - verts[ipoint, 0:2] = np.array(pt1) - verts[ipoint, 2] = z - ivert.append(ipoint) - ipoint += 1 - - verts[ipoint, 0:2] = np.array(pt2) - verts[ipoint, 2] = z - ivert.append(ipoint) - ipoint += 1 - - verts[ipoint, 0:2] = np.array(pt0) - verts[ipoint, 2] = z - ivert.append(ipoint) - ipoint += 1 - - verts[ipoint, 0:2] = np.array(pt3) - verts[ipoint, 2] = z - ivert.append(ipoint) - ipoint += 1 - - iverts.append(ivert) - - return verts, iverts - - -class SpatialReferenceUnstructured(SpatialReference): - """ - a class to locate an unstructured model grid in x-y space - - Parameters - ---------- - - verts : ndarray - 2d array of x and y points. - - iverts : list of lists - should be of len(ncells) with a list of vertex numbers for each cell - - ncpl : ndarray - array containing the number of cells per layer. ncpl.sum() must be - equal to the total number of cells in the grid. - - layered : boolean - flag to indicated that the grid is layered. In this case, the vertices - define the grid for single layer, and all layers use this same grid. - In this case the ncpl value for each layer must equal len(iverts). - If not layered, then verts and iverts are specified for all cells and - all layers in the grid. In this case, npcl.sum() must equal - len(iverts). - - lenuni : int - the length units flag from the discretization package - - proj4_str: str - a PROJ4 string that identifies the grid in space. warning: case - sensitive! - - units : string - Units for the grid. Must be either feet or meters - - epsg : int - EPSG code that identifies the grid in space. Can be used in lieu of - proj4. PROJ4 attribute will auto-populate if there is an internet - connection(via get_proj4 method). - See https://www.epsg-registry.org/ or spatialreference.org - - length_multiplier : float - multiplier to convert model units to spatial reference units. - delr and delc above will be multiplied by this value. (default=1.) - - Attributes - ---------- - xcenter : ndarray - array of x cell centers - - ycenter : ndarray - array of y cell centers - - Notes - ----- - - """ - - def __init__(self, xc, yc, verts, iverts, ncpl, layered=True, lenuni=1, - proj4_str=None, epsg=None, units=None, - length_multiplier=1.): - warnings.warn("SpatialReferenceUnstructured has been deprecated. " - "Use VertexGrid instead.", - category=DeprecationWarning) - self.xc = xc - self.yc = yc - self.verts = verts - self.iverts = iverts - self.ncpl = ncpl - self.layered = layered - self._lenuni = lenuni - self._proj4_str = proj4_str - self._epsg = epsg - if epsg is not None: - self._proj4_str = getproj4(epsg) - self.supported_units = ["feet", "meters"] - self._units = units - self._length_multiplier = length_multiplier - - # set defaults - self._xul = 0. - self._yul = 0. - self.rotation = 0. - - if self.layered: - assert all([n == len(iverts) for n in ncpl]) - assert self.xc.shape[0] == self.ncpl[0] - assert self.yc.shape[0] == self.ncpl[0] - else: - msg = ('Length of iverts must equal ncpl.sum ' - '({} {})'.format(len(iverts), ncpl)) - assert len(iverts) == ncpl.sum(), msg - assert self.xc.shape[0] == self.ncpl.sum() - assert self.yc.shape[0] == self.ncpl.sum() - return - - @property - def grid_type(self): - return "unstructured" - - def write_shapefile(self, filename='grid.shp'): - """ - Write shapefile of the grid - - Parameters - ---------- - filename : string - filename for shapefile - - Returns - ------- - - """ - raise NotImplementedError() - return - - def write_gridSpec(self, filename): - """ - Write a PEST-style grid specification file - - Parameters - ---------- - filename : string - filename for grid specification file - - Returns - ------- - - """ - raise NotImplementedError() - return - - @classmethod - def from_gridspec(cls, fname): - """ - Create a new SpatialReferenceUnstructured grid from an PEST - grid specification file - - Parameters - ---------- - fname : string - File name for grid specification file - - Returns - ------- - sru : flopy.utils.reference.SpatialReferenceUnstructured - - """ - raise NotImplementedError() - return - - @classmethod - def from_argus_export(cls, fname, nlay=1): - """ - Create a new SpatialReferenceUnstructured grid from an Argus One - Trimesh file - - Parameters - ---------- - fname : string - File name - - nlay : int - Number of layers to create - - Returns - ------- - sru : flopy.utils.reference.SpatialReferenceUnstructured - - """ - from ..utils.geometry import get_polygon_centroid - f = open(fname, 'r') - line = f.readline() - ll = line.split() - ncells, nverts = ll[0:2] - ncells = int(ncells) - nverts = int(nverts) - verts = np.empty((nverts, 2), dtype=np.float) - xc = np.empty((ncells), dtype=np.float) - yc = np.empty((ncells), dtype=np.float) - - # read the vertices - f.readline() - for ivert in range(nverts): - line = f.readline() - ll = line.split() - c, iv, x, y = ll[0:4] - verts[ivert, 0] = x - verts[ivert, 1] = y - - # read the cell information and create iverts, xc, and yc - iverts = [] - for icell in range(ncells): - line = f.readline() - ll = line.split() - ivlist = [] - for ic in ll[2:5]: - ivlist.append(int(ic) - 1) - if ivlist[0] != ivlist[-1]: - ivlist.append(ivlist[0]) - iverts.append(ivlist) - xc[icell], yc[icell] = get_polygon_centroid(verts[ivlist, :]) - - # close file and return spatial reference - f.close() - return cls(xc, yc, verts, iverts, np.array(nlay * [len(iverts)])) - - def __setattr__(self, key, value): - super(SpatialReference, self).__setattr__(key, value) - return - - def get_extent(self): - """ - Get the extent of the grid - - Returns - ------- - extent : tuple - min and max grid coordinates - - """ - xmin = self.verts[:, 0].min() - xmax = self.verts[:, 0].max() - ymin = self.verts[:, 1].min() - ymax = self.verts[:, 1].max() - return (xmin, xmax, ymin, ymax) - - def get_xcenter_array(self): - """ - Return a numpy one-dimensional float array that has the cell center x - coordinate for every cell in the grid in model space - not offset or - rotated. - - """ - return self.xc - - def get_ycenter_array(self): - """ - Return a numpy one-dimensional float array that has the cell center x - coordinate for every cell in the grid in model space - not offset of - rotated. - - """ - return self.yc - - def plot_array(self, a, ax=None): - """ - Create a QuadMesh plot of the specified array using patches - - Parameters - ---------- - a : np.ndarray - - Returns - ------- - quadmesh : matplotlib.collections.QuadMesh - - """ - from ..plot import plotutil - - patch_collection = plotutil.plot_cvfd(self.verts, self.iverts, a=a, - ax=ax) - return patch_collection - - def get_grid_line_collection(self, **kwargs): - """ - Get a patch collection of the grid - - """ - from ..plot import plotutil - edgecolor = kwargs.pop('colors') - pc = plotutil.cvfd_to_patch_collection(self.verts, self.iverts) - pc.set(facecolor='none') - pc.set(edgecolor=edgecolor) - return pc - - def contour_array(self, ax, a, **kwargs): - """ - Create a QuadMesh plot of the specified array using pcolormesh - - Parameters - ---------- - ax : matplotlib.axes.Axes - ax to add the contours - - a : np.ndarray - array to contour - - Returns - ------- - contour_set : ContourSet - - """ - contour_set = ax.tricontour(self.xcenter, self.ycenter, - a, **kwargs) - return contour_set - - -class TemporalReference(object): - """ - For now, just a container to hold start time and time units files - outside of DIS package. - """ - - defaults = {'itmuni': 4, - 'start_datetime': '01-01-1970'} - - itmuni_values = {'undefined': 0, - 'seconds': 1, - 'minutes': 2, - 'hours': 3, - 'days': 4, - 'years': 5} - - itmuni_text = {v: k for k, v in itmuni_values.items()} - - def __init__(self, itmuni=4, start_datetime=None): - self.itmuni = itmuni - self.start_datetime = start_datetime - - @property - def model_time_units(self): - return self.itmuni_text[self.itmuni] - - -class epsgRef: - """ - Sets up a local database of text representations of coordinate reference - systems, keyed by EPSG code. - - The database is epsgref.json, located in the user's data directory. If - optional 'appdirs' package is available, this is in the platform-dependent - user directory, otherwise in the user's 'HOME/.flopy' directory. - """ - - def __init__(self): - warnings.warn( - "epsgRef has been deprecated.", category=DeprecationWarning) - try: - from appdirs import user_data_dir - except ImportError: - user_data_dir = None - if user_data_dir: - datadir = user_data_dir('flopy') - else: - # if appdirs is not installed, use user's home directory - datadir = os.path.join(os.path.expanduser('~'), '.flopy') - if not os.path.isdir(datadir): - os.makedirs(datadir) - dbname = 'epsgref.json' - self.location = os.path.join(datadir, dbname) - - def to_dict(self): - """ - Returns dict with EPSG code integer key, and WKT CRS text - """ - data = OrderedDict() - if os.path.exists(self.location): - with open(self.location, 'r') as f: - loaded_data = json.load(f, object_pairs_hook=OrderedDict) - # convert JSON key from str to EPSG integer - for key, value in loaded_data.items(): - try: - data[int(key)] = value - except ValueError: - data[key] = value - return data - - def _write(self, data): - with open(self.location, 'w') as f: - json.dump(data, f, indent=0) - f.write('\n') - - def reset(self, verbose=True): - if os.path.exists(self.location): - os.remove(self.location) - if verbose: - print('Resetting {}'.format(self.location)) - - def add(self, epsg, prj): - """ - add an epsg code to epsgref.json - """ - data = self.to_dict() - data[epsg] = prj - self._write(data) - - def get(self, epsg): - """ - returns prj from a epsg code, otherwise None if not found - """ - data = self.to_dict() - return data.get(epsg) - - def remove(self, epsg): - """ - removes an epsg entry from epsgref.json - """ - data = self.to_dict() - if epsg in data: - del data[epsg] - self._write(data) - - @staticmethod - def show(): - ep = epsgRef() - prj = ep.to_dict() - for k, v in prj.items(): - print('{}:\n{}\n'.format(k, v)) - - -class crs(object): - """ - Container to parse and store coordinate reference system parameters, - and translate between different formats. - """ - - def __init__(self, prj=None, esri_wkt=None, epsg=None): - warnings.warn( - "crs has been deprecated. Use CRS in shapefile_utils instead.", - category=DeprecationWarning) - self.wktstr = None - if prj is not None: - with open(prj) as fprj: - self.wktstr = fprj.read() - elif esri_wkt is not None: - self.wktstr = esri_wkt - elif epsg is not None: - wktstr = getprj(epsg) - if wktstr is not None: - self.wktstr = wktstr - if self.wktstr is not None: - self.parse_wkt() - - @property - def crs(self): - """ - Dict mapping crs attributes to proj4 parameters - """ - proj = None - if self.projcs is not None: - # projection - if 'mercator' in self.projcs.lower(): - if 'transverse' in self.projcs.lower() or \ - 'tm' in self.projcs.lower(): - proj = 'tmerc' - else: - proj = 'merc' - elif 'utm' in self.projcs.lower() and \ - 'zone' in self.projcs.lower(): - proj = 'utm' - elif 'stateplane' in self.projcs.lower(): - proj = 'lcc' - elif 'lambert' and 'conformal' and 'conic' in self.projcs.lower(): - proj = 'lcc' - elif 'albers' in self.projcs.lower(): - proj = 'aea' - elif self.projcs is None and self.geogcs is not None: - proj = 'longlat' - - # datum - datum = None - if 'NAD' in self.datum.lower() or \ - 'north' in self.datum.lower() and \ - 'america' in self.datum.lower(): - datum = 'nad' - if '83' in self.datum.lower(): - datum += '83' - elif '27' in self.datum.lower(): - datum += '27' - elif '84' in self.datum.lower(): - datum = 'wgs84' - - # ellipse - ellps = None - if '1866' in self.spheroid_name: - ellps = 'clrk66' - elif 'grs' in self.spheroid_name.lower(): - ellps = 'grs80' - elif 'wgs' in self.spheroid_name.lower(): - ellps = 'wgs84' - - # prime meridian - pm = self.primem[0].lower() - - return {'proj': proj, - 'datum': datum, - 'ellps': ellps, - 'a': self.semi_major_axis, - 'rf': self.inverse_flattening, - 'lat_0': self.latitude_of_origin, - 'lat_1': self.standard_parallel_1, - 'lat_2': self.standard_parallel_2, - 'lon_0': self.central_meridian, - 'k_0': self.scale_factor, - 'x_0': self.false_easting, - 'y_0': self.false_northing, - 'units': self.projcs_unit, - 'zone': self.utm_zone} - - @property - def grid_mapping_attribs(self): - """ - Map parameters for CF Grid Mappings - http://http://cfconventions.org/cf-conventions/cf-conventions.html, - Appendix F: Grid Mappings - """ - if self.wktstr is not None: - sp = [p for p in [self.standard_parallel_1, - self.standard_parallel_2] - if p is not None] - sp = sp if len(sp) > 0 else None - proj = self.crs['proj'] - names = {'aea': 'albers_conical_equal_area', - 'aeqd': 'azimuthal_equidistant', - 'laea': 'lambert_azimuthal_equal_area', - 'longlat': 'latitude_longitude', - 'lcc': 'lambert_conformal_conic', - 'merc': 'mercator', - 'tmerc': 'transverse_mercator', - 'utm': 'transverse_mercator'} - attribs = {'grid_mapping_name': names[proj], - 'semi_major_axis': self.crs['a'], - 'inverse_flattening': self.crs['rf'], - 'standard_parallel': sp, - 'longitude_of_central_meridian': self.crs['lon_0'], - 'latitude_of_projection_origin': self.crs['lat_0'], - 'scale_factor_at_projection_origin': self.crs['k_0'], - 'false_easting': self.crs['x_0'], - 'false_northing': self.crs['y_0']} - return {k: v for k, v in attribs.items() if v is not None} - - @property - def proj4(self): - """ - Not implemented yet - """ - return None - - def parse_wkt(self): - - self.projcs = self._gettxt('PROJCS["', '"') - self.utm_zone = None - if self.projcs is not None and 'utm' in self.projcs.lower(): - self.utm_zone = self.projcs[-3:].lower().strip('n').strip('s') - self.geogcs = self._gettxt('GEOGCS["', '"') - self.datum = self._gettxt('DATUM["', '"') - tmp = self._getgcsparam('SPHEROID') - self.spheroid_name = tmp.pop(0) - self.semi_major_axis = tmp.pop(0) - self.inverse_flattening = tmp.pop(0) - self.primem = self._getgcsparam('PRIMEM') - self.gcs_unit = self._getgcsparam('UNIT') - self.projection = self._gettxt('PROJECTION["', '"') - self.latitude_of_origin = self._getvalue('latitude_of_origin') - self.central_meridian = self._getvalue('central_meridian') - self.standard_parallel_1 = self._getvalue('standard_parallel_1') - self.standard_parallel_2 = self._getvalue('standard_parallel_2') - self.scale_factor = self._getvalue('scale_factor') - self.false_easting = self._getvalue('false_easting') - self.false_northing = self._getvalue('false_northing') - self.projcs_unit = self._getprojcs_unit() - - def _gettxt(self, s1, s2): - s = self.wktstr.lower() - strt = s.find(s1.lower()) - if strt >= 0: # -1 indicates not found - strt += len(s1) - end = s[strt:].find(s2.lower()) + strt - return self.wktstr[strt:end] - - def _getvalue(self, k): - s = self.wktstr.lower() - strt = s.find(k.lower()) - if strt >= 0: - strt += len(k) - end = s[strt:].find(']') + strt - try: - return float(self.wktstr[strt:end].split(',')[1]) - except: - print(' could not typecast wktstr to a float') - - def _getgcsparam(self, txt): - nvalues = 3 if txt.lower() == 'spheroid' else 2 - tmp = self._gettxt('{}["'.format(txt), ']') - if tmp is not None: - tmp = tmp.replace('"', '').split(',') - name = tmp[0:1] - values = list(map(float, tmp[1:nvalues])) - return name + values - else: - return [None] * nvalues - - def _getprojcs_unit(self): - if self.projcs is not None: - tmp = self.wktstr.lower().split('unit["')[-1] - uname, ufactor = tmp.strip().strip(']').split('",')[0:2] - ufactor = float(ufactor.split(']')[0].split()[0].split(',')[0]) - return uname, ufactor - return None, None - - -def getprj(epsg, addlocalreference=True, text='esriwkt'): - """ - Gets projection file (.prj) text for given epsg code from - spatialreference.org - - Parameters - ---------- - epsg : int - epsg code for coordinate system - addlocalreference : boolean - adds the projection file text associated with epsg to a local - database, epsgref.json, located in the user's data directory. - - References - ---------- - https://www.epsg-registry.org/ - - Returns - ------- - prj : str - text for a projection (*.prj) file. - - """ - warnings.warn("SpatialReference has been deprecated. Use StructuredGrid " - "instead.", category=DeprecationWarning) - epsgfile = epsgRef() - wktstr = epsgfile.get(epsg) - if wktstr is None: - wktstr = get_spatialreference(epsg, text=text) - if addlocalreference and wktstr is not None: - epsgfile.add(epsg, wktstr) - return wktstr - - -def get_spatialreference(epsg, text='esriwkt'): - """ - Gets text for given epsg code and text format from spatialreference.org - - Fetches the reference text using the url: - https://spatialreference.org/ref/epsg/// - - See: https://www.epsg-registry.org/ - - Parameters - ---------- - epsg : int - epsg code for coordinate system - text : str - string added to url - - Returns - ------- - url : str - - """ - from flopy.utils.flopy_io import get_url_text - - warnings.warn("SpatialReference has been deprecated. Use StructuredGrid " - "instead.", category=DeprecationWarning) - - epsg_categories = ['epsg', 'esri'] - for cat in epsg_categories: - url = "{}/ref/{}/{}/{}/".format(srefhttp, cat, epsg, text) - result = get_url_text(url) - if result is not None: - break - if result is not None: - return result.replace("\n", "") - elif result is None and text != 'epsg': - for cat in epsg_categories: - error_msg = 'No internet connection or ' + \ - 'epsg code {} '.format(epsg) + \ - 'not found at {}/ref/'.format(srefhttp) + \ - '{}/{}/{}'.format(cat, cat, epsg) - print(error_msg) - # epsg code not listed on spatialreference.org - # may still work with pyproj - elif text == 'epsg': - return 'epsg:{}'.format(epsg) - - -def getproj4(epsg): - """ - Get projection file (.prj) text for given epsg code from - spatialreference.org. See: https://www.epsg-registry.org/ - - Parameters - ---------- - epsg : int - epsg code for coordinate system - - Returns - ------- - prj : str - text for a projection (*.prj) file. - - """ - warnings.warn("SpatialReference has been deprecated. Use StructuredGrid " - "instead.", category=DeprecationWarning) - - return get_spatialreference(epsg, text='proj4') +""" +Module spatial referencing for flopy model objects + +""" +import json +import numpy as np +import os +import warnings + +from collections import OrderedDict + +# web address of spatial reference dot org +srefhttp = 'https://spatialreference.org' + + +class SpatialReference(object): + """ + a class to locate a structured model grid in x-y space + + Parameters + ---------- + + delr : numpy ndarray + the model discretization delr vector + (An array of spacings along a row) + delc : numpy ndarray + the model discretization delc vector + (An array of spacings along a column) + lenuni : int + the length units flag from the discretization package + (default 2) + xul : float + the x coordinate of the upper left corner of the grid + Enter either xul and yul or xll and yll. + yul : float + the y coordinate of the upper left corner of the grid + Enter either xul and yul or xll and yll. + xll : float + the x coordinate of the lower left corner of the grid + Enter either xul and yul or xll and yll. + yll : float + the y coordinate of the lower left corner of the grid + Enter either xul and yul or xll and yll. + rotation : float + the counter-clockwise rotation (in degrees) of the grid + + proj4_str: str + a PROJ4 string that identifies the grid in space. warning: case + sensitive! + + units : string + Units for the grid. Must be either feet or meters + + epsg : int + EPSG code that identifies the grid in space. Can be used in lieu of + proj4. PROJ4 attribute will auto-populate if there is an internet + connection(via get_proj4 method). + See https://www.epsg-registry.org/ or spatialreference.org + + length_multiplier : float + multiplier to convert model units to spatial reference units. + delr and delc above will be multiplied by this value. (default=1.) + + Attributes + ---------- + xedge : ndarray + array of column edges + + yedge : ndarray + array of row edges + + xgrid : ndarray + numpy meshgrid of xedges + + ygrid : ndarray + numpy meshgrid of yedges + + xcenter : ndarray + array of column centers + + ycenter : ndarray + array of row centers + + xcentergrid : ndarray + numpy meshgrid of column centers + + ycentergrid : ndarray + numpy meshgrid of row centers + + vertices : 1D array + 1D array of cell vertices for whole grid in C-style (row-major) order + (same as np.ravel()) + + + Notes + ----- + + xul and yul can be explicitly (re)set after SpatialReference + instantiation, but only before any of the other attributes and methods are + accessed + + """ + + xul, yul = None, None + xll, yll = None, None + rotation = 0. + length_multiplier = 1. + origin_loc = 'ul' # or ll + + defaults = {"xul": None, "yul": None, "rotation": 0., + "proj4_str": None, + "units": None, "lenuni": 2, + "length_multiplier": None, + "source": 'defaults'} + + lenuni_values = {'undefined': 0, + 'feet': 1, + 'meters': 2, + 'centimeters': 3} + lenuni_text = {v: k for k, v in lenuni_values.items()} + + def __init__(self, delr=np.array([]), delc=np.array([]), lenuni=2, + xul=None, yul=None, xll=None, yll=None, rotation=0.0, + proj4_str=None, epsg=None, prj=None, units=None, + length_multiplier=None): + warnings.warn( + "SpatialReference has been deprecated. Use StructuredGrid" + " instead.", + category=DeprecationWarning) + + for delrc in [delr, delc]: + if isinstance(delrc, float) or isinstance(delrc, int): + msg = ('delr and delcs must be an array or sequences equal in ' + 'length to the number of rows/columns.') + raise TypeError(msg) + + self.delc = np.atleast_1d(np.array(delc)).astype( + np.float64) # * length_multiplier + self.delr = np.atleast_1d(np.array(delr)).astype( + np.float64) # * length_multiplier + + if self.delr.sum() == 0 or self.delc.sum() == 0: + if xll is None or yll is None: + msg = ('Warning: no grid spacing or lower-left corner ' + 'supplied. Setting the offset with xul, yul requires ' + 'arguments for delr and delc. Origin will be set to ' + 'zero.') + print(msg) + xll, yll = 0, 0 + xul, yul = None, None + + self._lenuni = lenuni + self._proj4_str = proj4_str + + self._epsg = epsg + if epsg is not None: + self._proj4_str = getproj4(self._epsg) + self.prj = prj + self._wkt = None + self.crs = crs(prj=prj, epsg=epsg) + + self.supported_units = ["feet", "meters"] + self._units = units + self._length_multiplier = length_multiplier + self._reset() + self.set_spatialreference(xul, yul, xll, yll, rotation) + + @property + def xll(self): + if self.origin_loc == 'll': + xll = self._xll if self._xll is not None else 0. + elif self.origin_loc == 'ul': + # calculate coords for lower left corner + xll = self._xul - (np.sin(self.theta) * self.yedge[0] * + self.length_multiplier) + return xll + + @property + def yll(self): + if self.origin_loc == 'll': + yll = self._yll if self._yll is not None else 0. + elif self.origin_loc == 'ul': + # calculate coords for lower left corner + yll = self._yul - (np.cos(self.theta) * self.yedge[0] * + self.length_multiplier) + return yll + + @property + def xul(self): + if self.origin_loc == 'll': + # calculate coords for upper left corner + xul = self._xll + (np.sin(self.theta) * self.yedge[0] * + self.length_multiplier) + if self.origin_loc == 'ul': + # calculate coords for lower left corner + xul = self._xul if self._xul is not None else 0. + return xul + + @property + def yul(self): + if self.origin_loc == 'll': + # calculate coords for upper left corner + yul = self._yll + (np.cos(self.theta) * self.yedge[0] * + self.length_multiplier) + + if self.origin_loc == 'ul': + # calculate coords for lower left corner + yul = self._yul if self._yul is not None else 0. + return yul + + @property + def proj4_str(self): + proj4_str = None + if self._proj4_str is not None: + if "epsg" in self._proj4_str.lower(): + proj4_str = self._proj4_str + # set the epsg if proj4 specifies it + tmp = [i for i in self._proj4_str.split() if + 'epsg' in i.lower()] + self._epsg = int(tmp[0].split(':')[1]) + else: + proj4_str = self._proj4_str + elif self.epsg is not None: + proj4_str = 'epsg:{}'.format(self.epsg) + return proj4_str + + @property + def epsg(self): + # don't reset the proj4 string here + # because proj4 attribute may already be populated + # (with more details than getproj4 would return) + # instead reset proj4 when epsg is set + # (on init or setattr) + return self._epsg + + @property + def wkt(self): + if self._wkt is None: + if self.prj is not None: + with open(self.prj) as src: + wkt = src.read() + elif self.epsg is not None: + wkt = getprj(self.epsg) + else: + return None + return wkt + else: + return self._wkt + + @property + def lenuni(self): + return self._lenuni + + def _parse_units_from_proj4(self): + units = None + try: + # need this because preserve_units doesn't seem to be + # working for complex proj4 strings. So if an + # epsg code was passed, we have no choice, but if a + # proj4 string was passed, we can just parse it + if "EPSG" in self.proj4_str.upper(): + import pyproj + + crs = pyproj.Proj(self.proj4_str, preserve_units=True) + proj_str = crs.srs + else: + proj_str = self.proj4_str + # http://proj4.org/parameters.html#units + # from proj4 source code + # "us-ft", "0.304800609601219", "U.S. Surveyor's Foot", + # "ft", "0.3048", "International Foot", + if "units=m" in proj_str: + units = "meters" + elif "units=ft" in proj_str or \ + "units=us-ft" in proj_str or \ + "to_meters:0.3048" in proj_str: + units = "feet" + return units + except: + if self.proj4_str is not None: + print(' could not parse units from {}'.format( + self.proj4_str)) + + @property + def units(self): + if self._units is not None: + units = self._units.lower() + else: + units = self._parse_units_from_proj4() + if units is None: + # print("warning: assuming SpatialReference units are meters") + units = 'meters' + assert units in self.supported_units + return units + + @property + def length_multiplier(self): + """ + Attempt to identify multiplier for converting from + model units to sr units, defaulting to 1. + """ + lm = None + if self._length_multiplier is not None: + lm = self._length_multiplier + else: + if self.model_length_units == 'feet': + if self.units == 'meters': + lm = 0.3048 + elif self.units == 'feet': + lm = 1. + elif self.model_length_units == 'meters': + if self.units == 'feet': + lm = 1 / .3048 + elif self.units == 'meters': + lm = 1. + elif self.model_length_units == 'centimeters': + if self.units == 'meters': + lm = 1 / 100. + elif self.units == 'feet': + lm = 1 / 30.48 + else: # model units unspecified; default to 1 + lm = 1. + return lm + + @property + def model_length_units(self): + return self.lenuni_text[self.lenuni] + + @property + def bounds(self): + """ + Return bounding box in shapely order. + """ + xmin, xmax, ymin, ymax = self.get_extent() + return xmin, ymin, xmax, ymax + + @staticmethod + def load(namefile=None, reffile='usgs.model.reference'): + """ + Attempts to load spatial reference information from + the following files (in order): + 1) usgs.model.reference + 2) NAM file (header comment) + 3) SpatialReference.default dictionary + """ + reffile = os.path.join(os.path.split(namefile)[0], reffile) + d = SpatialReference.read_usgs_model_reference_file(reffile) + if d is not None: + return d + d = SpatialReference.attribs_from_namfile_header(namefile) + if d is not None: + return d + else: + return SpatialReference.defaults + + @staticmethod + def attribs_from_namfile_header(namefile): + # check for reference info in the nam file header + d = SpatialReference.defaults.copy() + d['source'] = 'namfile' + if namefile is None: + return None + header = [] + with open(namefile, 'r') as f: + for line in f: + if not line.startswith('#'): + break + header.extend(line.strip().replace('#', '').split(';')) + + for item in header: + if "xul" in item.lower(): + try: + d['xul'] = float(item.split(':')[1]) + except: + print(' could not parse xul ' + + 'in {}'.format(namefile)) + elif "yul" in item.lower(): + try: + d['yul'] = float(item.split(':')[1]) + except: + print(' could not parse yul ' + + 'in {}'.format(namefile)) + elif "rotation" in item.lower(): + try: + d['rotation'] = float(item.split(':')[1]) + except: + print(' could not parse rotation ' + + 'in {}'.format(namefile)) + elif "proj4_str" in item.lower(): + try: + proj4_str = ':'.join(item.split(':')[1:]).strip() + if proj4_str.lower() == 'none': + proj4_str = None + d['proj4_str'] = proj4_str + except: + print(' could not parse proj4_str ' + + 'in {}'.format(namefile)) + elif "start" in item.lower(): + try: + d['start_datetime'] = item.split(':')[1].strip() + except: + print(' could not parse start ' + + 'in {}'.format(namefile)) + + # spatial reference length units + elif "units" in item.lower(): + d['units'] = item.split(':')[1].strip() + # model length units + elif "lenuni" in item.lower(): + d['lenuni'] = int(item.split(':')[1].strip()) + # multiplier for converting from model length units to sr length units + elif "length_multiplier" in item.lower(): + d['length_multiplier'] = float(item.split(':')[1].strip()) + return d + + @staticmethod + def read_usgs_model_reference_file(reffile='usgs.model.reference'): + """ + read spatial reference info from the usgs.model.reference file + https://water.usgs.gov/ogw/policy/gw-model/modelers-setup.html + """ + + ITMUNI = {0: "undefined", 1: "seconds", 2: "minutes", 3: "hours", + 4: "days", + 5: "years"} + itmuni_values = {v: k for k, v in ITMUNI.items()} + + d = SpatialReference.defaults.copy() + d['source'] = 'usgs.model.reference' + # discard default to avoid confusion with epsg code if entered + d.pop('proj4_str') + if os.path.exists(reffile): + with open(reffile) as fref: + for line in fref: + if len(line) > 1: + if line.strip()[0] != '#': + info = line.strip().split('#')[0].split() + if len(info) > 1: + d[info[0].lower()] = ' '.join(info[1:]) + d['xul'] = float(d['xul']) + d['yul'] = float(d['yul']) + d['rotation'] = float(d['rotation']) + + # convert the model.reference text to a lenuni value + # (these are the model length units) + if 'length_units' in d.keys(): + d['lenuni'] = SpatialReference.lenuni_values[d['length_units']] + if 'time_units' in d.keys(): + d['itmuni'] = itmuni_values[d['time_units']] + if 'start_date' in d.keys(): + start_datetime = d.pop('start_date') + if 'start_time' in d.keys(): + start_datetime += ' {}'.format(d.pop('start_time')) + d['start_datetime'] = start_datetime + if 'epsg' in d.keys(): + try: + d['epsg'] = int(d['epsg']) + except Exception as e: + raise Exception( + "error reading epsg code from file:\n" + str(e)) + # this prioritizes epsg over proj4 if both are given + # (otherwise 'proj4' entry will be dropped below) + elif 'proj4' in d.keys(): + d['proj4_str'] = d['proj4'] + + # drop any other items that aren't used in sr class + d = {k: v for k, v in d.items() if + k.lower() in SpatialReference.defaults.keys() + or k.lower() in {'epsg', 'start_datetime', 'itmuni', + 'source'}} + return d + else: + return None + + def __setattr__(self, key, value): + reset = True + if key == "delr": + super(SpatialReference, self). \ + __setattr__("delr", np.atleast_1d(np.array(value))) + elif key == "delc": + super(SpatialReference, self). \ + __setattr__("delc", np.atleast_1d(np.array(value))) + elif key == "xul": + super(SpatialReference, self). \ + __setattr__("_xul", float(value)) + self.origin_loc = 'ul' + elif key == "yul": + super(SpatialReference, self). \ + __setattr__("_yul", float(value)) + self.origin_loc = 'ul' + elif key == "xll": + super(SpatialReference, self). \ + __setattr__("_xll", float(value)) + self.origin_loc = 'll' + elif key == "yll": + super(SpatialReference, self). \ + __setattr__("_yll", float(value)) + self.origin_loc = 'll' + elif key == "length_multiplier": + super(SpatialReference, self). \ + __setattr__("_length_multiplier", float(value)) + # self.set_origin(xul=self.xul, yul=self.yul, xll=self.xll, + # yll=self.yll) + elif key == "rotation": + super(SpatialReference, self). \ + __setattr__("rotation", float(value)) + # self.set_origin(xul=self.xul, yul=self.yul, xll=self.xll, + # yll=self.yll) + elif key == "lenuni": + super(SpatialReference, self). \ + __setattr__("_lenuni", int(value)) + # self.set_origin(xul=self.xul, yul=self.yul, xll=self.xll, + # yll=self.yll) + elif key == "units": + value = value.lower() + assert value in self.supported_units + super(SpatialReference, self). \ + __setattr__("_units", value) + elif key == "proj4_str": + super(SpatialReference, self). \ + __setattr__("_proj4_str", value) + # reset the units and epsg + units = self._parse_units_from_proj4() + if units is not None: + self._units = units + self._epsg = None + elif key == "epsg": + super(SpatialReference, self). \ + __setattr__("_epsg", value) + # reset the units and proj4 + self._units = None + self._proj4_str = getproj4(self._epsg) + self.crs = crs(epsg=value) + elif key == "prj": + super(SpatialReference, self). \ + __setattr__("prj", value) + # translation to proj4 strings in crs class not robust yet + # leave units and proj4 alone for now. + self.crs = crs(prj=value, epsg=self.epsg) + else: + super(SpatialReference, self).__setattr__(key, value) + reset = False + if reset: + self._reset() + + def reset(self, **kwargs): + for key, value in kwargs.items(): + setattr(self, key, value) + return + + def _reset(self): + self._xgrid = None + self._ygrid = None + self._ycentergrid = None + self._xcentergrid = None + self._vertices = None + return + + @property + def nrow(self): + return self.delc.shape[0] + + @property + def ncol(self): + return self.delr.shape[0] + + def __eq__(self, other): + if not isinstance(other, SpatialReference): + return False + if other.xul != self.xul: + return False + if other.yul != self.yul: + return False + if other.rotation != self.rotation: + return False + if other.proj4_str != self.proj4_str: + return False + return True + + @classmethod + def from_namfile(cls, namefile): + attribs = SpatialReference.attribs_from_namfile_header(namefile) + try: + attribs.pop("start_datetime") + except: + print(' could not remove start_datetime') + + return SpatialReference(**attribs) + + @classmethod + def from_gridspec(cls, gridspec_file, lenuni=0): + f = open(gridspec_file, 'r') + raw = f.readline().strip().split() + nrow = int(raw[0]) + ncol = int(raw[1]) + raw = f.readline().strip().split() + xul, yul, rot = float(raw[0]), float(raw[1]), float(raw[2]) + delr = [] + j = 0 + while j < ncol: + raw = f.readline().strip().split() + for r in raw: + if '*' in r: + rraw = r.split('*') + for n in range(int(rraw[0])): + delr.append(float(rraw[1])) + j += 1 + else: + delr.append(float(r)) + j += 1 + delc = [] + i = 0 + while i < nrow: + raw = f.readline().strip().split() + for r in raw: + if '*' in r: + rraw = r.split('*') + for n in range(int(rraw[0])): + delc.append(float(rraw[1])) + i += 1 + else: + delc.append(float(r)) + i += 1 + f.close() + return cls(np.array(delr), np.array(delc), + lenuni, xul=xul, yul=yul, rotation=rot) + + @property + def attribute_dict(self): + return {"xul": self.xul, "yul": self.yul, "rotation": self.rotation, + "proj4_str": self.proj4_str} + + def set_spatialreference(self, xul=None, yul=None, xll=None, yll=None, + rotation=0.0): + """ + set spatial reference - can be called from model instance + + """ + if xul is not None and xll is not None: + msg = ('Both xul and xll entered. Please enter either xul, yul or ' + 'xll, yll.') + raise ValueError(msg) + if yul is not None and yll is not None: + msg = ('Both yul and yll entered. Please enter either xul, yul or ' + 'xll, yll.') + raise ValueError(msg) + # set the origin priority based on the left corner specified + # (the other left corner will be calculated). If none are specified + # then default to upper left + if xul is None and yul is None and xll is None and yll is None: + self.origin_loc = 'ul' + xul = 0. + yul = self.delc.sum() + elif xll is not None: + self.origin_loc = 'll' + else: + self.origin_loc = 'ul' + + self.rotation = rotation + self._xll = xll if xll is not None else 0. + self._yll = yll if yll is not None else 0. + self._xul = xul if xul is not None else 0. + self._yul = yul if yul is not None else 0. + # self.set_origin(xul, yul, xll, yll) + return + + def __repr__(self): + s = "xul:{0:<.10G}; yul:{1:<.10G}; rotation:{2: '.format(interval) + \ + 'maxlevels = {}'.format(maxlevels) + assert nlevels < maxlevels, msg + levels = np.arange(vmin, vmax, interval) + fig, ax = plt.subplots() + ctr = self.contour_array(ax, a, levels=levels) + self.export_contours(filename, ctr, fieldname, epsg, prj, **kwargs) + plt.close() + + def contour_array(self, ax, a, **kwargs): + """ + Create a QuadMesh plot of the specified array using pcolormesh + + Parameters + ---------- + ax : matplotlib.axes.Axes + ax to add the contours + + a : np.ndarray + array to contour + + Returns + ------- + contour_set : ContourSet + + """ + from flopy.plot import ModelMap + + kwargs['ax'] = ax + mm = ModelMap(sr=self) + contour_set = mm.contour_array(a=a, **kwargs) + + return contour_set + + @property + def vertices(self): + """ + Returns a list of vertices for + """ + if self._vertices is None: + self._set_vertices() + return self._vertices + + def _set_vertices(self): + """ + Populate vertices for the whole grid + """ + jj, ii = np.meshgrid(range(self.ncol), range(self.nrow)) + jj, ii = jj.ravel(), ii.ravel() + self._vertices = self.get_vertices(ii, jj) + + def interpolate(self, a, xi, method='nearest'): + """ + Use the griddata method to interpolate values from an array onto the + points defined in xi. For any values outside of the grid, use + 'nearest' to find a value for them. + + Parameters + ---------- + a : numpy.ndarray + array to interpolate from. It must be of size nrow, ncol + xi : numpy.ndarray + array containing x and y point coordinates of size (npts, 2). xi + also works with broadcasting so that if a is a 2d array, then + xi can be passed in as (xgrid, ygrid). + method : {'linear', 'nearest', 'cubic'} + method to use for interpolation (default is 'nearest') + + Returns + ------- + b : numpy.ndarray + array of size (npts) + + """ + try: + from scipy.interpolate import griddata + except: + print('scipy not installed\ntry pip install scipy') + return None + + # Create a 2d array of points for the grid centers + points = np.empty((self.ncol * self.nrow, 2)) + points[:, 0] = self.xcentergrid.flatten() + points[:, 1] = self.ycentergrid.flatten() + + # Use the griddata function to interpolate to the xi points + b = griddata(points, a.flatten(), xi, method=method, fill_value=np.nan) + + # if method is linear or cubic, then replace nan's with a value + # interpolated using nearest + if method != 'nearest': + bn = griddata(points, a.flatten(), xi, method='nearest') + idx = np.isnan(b) + b[idx] = bn[idx] + + return b + + def get_2d_vertex_connectivity(self): + """ + Create the cell 2d vertices array and the iverts index array. These + are the same form as the ones used to instantiate an unstructured + spatial reference. + + Returns + ------- + + verts : ndarray + array of x and y coordinates for the grid vertices + + iverts : list + a list with a list of vertex indices for each cell in clockwise + order starting with the upper left corner + + """ + x = self.xgrid.flatten() + y = self.ygrid.flatten() + nrowvert = self.nrow + 1 + ncolvert = self.ncol + 1 + npoints = nrowvert * ncolvert + verts = np.empty((npoints, 2), dtype=np.float) + verts[:, 0] = x + verts[:, 1] = y + iverts = [] + for i in range(self.nrow): + for j in range(self.ncol): + iv1 = i * ncolvert + j # upper left point number + iv2 = iv1 + 1 + iv4 = (i + 1) * ncolvert + j + iv3 = iv4 + 1 + iverts.append([iv1, iv2, iv3, iv4]) + return verts, iverts + + def get_3d_shared_vertex_connectivity(self, nlay, botm, ibound=None): + + # get the x and y points for the grid + x = self.xgrid.flatten() + y = self.ygrid.flatten() + + # set the size of the vertex grid + nrowvert = self.nrow + 1 + ncolvert = self.ncol + 1 + nlayvert = nlay + 1 + nrvncv = nrowvert * ncolvert + npoints = nrvncv * nlayvert + + # create and fill a 3d points array for the grid + verts = np.empty((npoints, 3), dtype=np.float) + verts[:, 0] = np.tile(x, nlayvert) + verts[:, 1] = np.tile(y, nlayvert) + istart = 0 + istop = nrvncv + for k in range(nlay + 1): + verts[istart:istop, 2] = self.interpolate(botm[k], + verts[istart:istop, :2], + method='linear') + istart = istop + istop = istart + nrvncv + + # create the list of points comprising each cell. points must be + # listed a specific way according to vtk requirements. + iverts = [] + for k in range(nlay): + koffset = k * nrvncv + for i in range(self.nrow): + for j in range(self.ncol): + if ibound is not None: + if ibound[k, i, j] == 0: + continue + iv1 = i * ncolvert + j + koffset + iv2 = iv1 + 1 + iv4 = (i + 1) * ncolvert + j + koffset + iv3 = iv4 + 1 + iverts.append([iv4 + nrvncv, iv3 + nrvncv, + iv1 + nrvncv, iv2 + nrvncv, + iv4, iv3, iv1, iv2]) + + # renumber and reduce the vertices if ibound_filter + if ibound is not None: + + # go through the vertex list and mark vertices that are used + ivertrenum = np.zeros(npoints, dtype=np.int) + for vlist in iverts: + for iv in vlist: + # mark vertices that are actually used + ivertrenum[iv] = 1 + + # renumber vertices that are used, skip those that are not + inum = 0 + for i in range(npoints): + if ivertrenum[i] > 0: + inum += 1 + ivertrenum[i] = inum + ivertrenum -= 1 + + # reassign the vertex list using the new vertex numbers + iverts2 = [] + for vlist in iverts: + vlist2 = [] + for iv in vlist: + vlist2.append(ivertrenum[iv]) + iverts2.append(vlist2) + iverts = iverts2 + idx = np.where(ivertrenum >= 0) + verts = verts[idx] + + return verts, iverts + + def get_3d_vertex_connectivity(self, nlay, top, bot, ibound=None): + if ibound is None: + ncells = nlay * self.nrow * self.ncol + ibound = np.ones((nlay, self.nrow, self.ncol), dtype=np.int) + else: + ncells = (ibound != 0).sum() + npoints = ncells * 8 + verts = np.empty((npoints, 3), dtype=np.float) + iverts = [] + ipoint = 0 + for k in range(nlay): + for i in range(self.nrow): + for j in range(self.ncol): + if ibound[k, i, j] == 0: + continue + + ivert = [] + pts = self.get_vertices(i, j) + pt0, pt1, pt2, pt3, pt0 = pts + + z = bot[k, i, j] + + verts[ipoint, 0:2] = np.array(pt1) + verts[ipoint, 2] = z + ivert.append(ipoint) + ipoint += 1 + + verts[ipoint, 0:2] = np.array(pt2) + verts[ipoint, 2] = z + ivert.append(ipoint) + ipoint += 1 + + verts[ipoint, 0:2] = np.array(pt0) + verts[ipoint, 2] = z + ivert.append(ipoint) + ipoint += 1 + + verts[ipoint, 0:2] = np.array(pt3) + verts[ipoint, 2] = z + ivert.append(ipoint) + ipoint += 1 + + z = top[k, i, j] + + verts[ipoint, 0:2] = np.array(pt1) + verts[ipoint, 2] = z + ivert.append(ipoint) + ipoint += 1 + + verts[ipoint, 0:2] = np.array(pt2) + verts[ipoint, 2] = z + ivert.append(ipoint) + ipoint += 1 + + verts[ipoint, 0:2] = np.array(pt0) + verts[ipoint, 2] = z + ivert.append(ipoint) + ipoint += 1 + + verts[ipoint, 0:2] = np.array(pt3) + verts[ipoint, 2] = z + ivert.append(ipoint) + ipoint += 1 + + iverts.append(ivert) + + return verts, iverts + + +class SpatialReferenceUnstructured(SpatialReference): + """ + a class to locate an unstructured model grid in x-y space + + Parameters + ---------- + + verts : ndarray + 2d array of x and y points. + + iverts : list of lists + should be of len(ncells) with a list of vertex numbers for each cell + + ncpl : ndarray + array containing the number of cells per layer. ncpl.sum() must be + equal to the total number of cells in the grid. + + layered : boolean + flag to indicated that the grid is layered. In this case, the vertices + define the grid for single layer, and all layers use this same grid. + In this case the ncpl value for each layer must equal len(iverts). + If not layered, then verts and iverts are specified for all cells and + all layers in the grid. In this case, npcl.sum() must equal + len(iverts). + + lenuni : int + the length units flag from the discretization package + + proj4_str: str + a PROJ4 string that identifies the grid in space. warning: case + sensitive! + + units : string + Units for the grid. Must be either feet or meters + + epsg : int + EPSG code that identifies the grid in space. Can be used in lieu of + proj4. PROJ4 attribute will auto-populate if there is an internet + connection(via get_proj4 method). + See https://www.epsg-registry.org/ or spatialreference.org + + length_multiplier : float + multiplier to convert model units to spatial reference units. + delr and delc above will be multiplied by this value. (default=1.) + + Attributes + ---------- + xcenter : ndarray + array of x cell centers + + ycenter : ndarray + array of y cell centers + + Notes + ----- + + """ + + def __init__(self, xc, yc, verts, iverts, ncpl, layered=True, lenuni=1, + proj4_str=None, epsg=None, units=None, + length_multiplier=1.): + warnings.warn("SpatialReferenceUnstructured has been deprecated. " + "Use VertexGrid instead.", + category=DeprecationWarning) + self.xc = xc + self.yc = yc + self.verts = verts + self.iverts = iverts + self.ncpl = ncpl + self.layered = layered + self._lenuni = lenuni + self._proj4_str = proj4_str + self._epsg = epsg + if epsg is not None: + self._proj4_str = getproj4(epsg) + self.supported_units = ["feet", "meters"] + self._units = units + self._length_multiplier = length_multiplier + + # set defaults + self._xul = 0. + self._yul = 0. + self.rotation = 0. + + if self.layered: + assert all([n == len(iverts) for n in ncpl]) + assert self.xc.shape[0] == self.ncpl[0] + assert self.yc.shape[0] == self.ncpl[0] + else: + msg = ('Length of iverts must equal ncpl.sum ' + '({} {})'.format(len(iverts), ncpl)) + assert len(iverts) == ncpl.sum(), msg + assert self.xc.shape[0] == self.ncpl.sum() + assert self.yc.shape[0] == self.ncpl.sum() + return + + @property + def grid_type(self): + return "unstructured" + + def write_shapefile(self, filename='grid.shp'): + """ + Write shapefile of the grid + + Parameters + ---------- + filename : string + filename for shapefile + + Returns + ------- + + """ + raise NotImplementedError() + return + + def write_gridSpec(self, filename): + """ + Write a PEST-style grid specification file + + Parameters + ---------- + filename : string + filename for grid specification file + + Returns + ------- + + """ + raise NotImplementedError() + return + + @classmethod + def from_gridspec(cls, fname): + """ + Create a new SpatialReferenceUnstructured grid from an PEST + grid specification file + + Parameters + ---------- + fname : string + File name for grid specification file + + Returns + ------- + sru : flopy.utils.reference.SpatialReferenceUnstructured + + """ + raise NotImplementedError() + return + + @classmethod + def from_argus_export(cls, fname, nlay=1): + """ + Create a new SpatialReferenceUnstructured grid from an Argus One + Trimesh file + + Parameters + ---------- + fname : string + File name + + nlay : int + Number of layers to create + + Returns + ------- + sru : flopy.utils.reference.SpatialReferenceUnstructured + + """ + from ..utils.geometry import get_polygon_centroid + f = open(fname, 'r') + line = f.readline() + ll = line.split() + ncells, nverts = ll[0:2] + ncells = int(ncells) + nverts = int(nverts) + verts = np.empty((nverts, 2), dtype=np.float) + xc = np.empty((ncells), dtype=np.float) + yc = np.empty((ncells), dtype=np.float) + + # read the vertices + f.readline() + for ivert in range(nverts): + line = f.readline() + ll = line.split() + c, iv, x, y = ll[0:4] + verts[ivert, 0] = x + verts[ivert, 1] = y + + # read the cell information and create iverts, xc, and yc + iverts = [] + for icell in range(ncells): + line = f.readline() + ll = line.split() + ivlist = [] + for ic in ll[2:5]: + ivlist.append(int(ic) - 1) + if ivlist[0] != ivlist[-1]: + ivlist.append(ivlist[0]) + iverts.append(ivlist) + xc[icell], yc[icell] = get_polygon_centroid(verts[ivlist, :]) + + # close file and return spatial reference + f.close() + return cls(xc, yc, verts, iverts, np.array(nlay * [len(iverts)])) + + def __setattr__(self, key, value): + super(SpatialReference, self).__setattr__(key, value) + return + + def get_extent(self): + """ + Get the extent of the grid + + Returns + ------- + extent : tuple + min and max grid coordinates + + """ + xmin = self.verts[:, 0].min() + xmax = self.verts[:, 0].max() + ymin = self.verts[:, 1].min() + ymax = self.verts[:, 1].max() + return (xmin, xmax, ymin, ymax) + + def get_xcenter_array(self): + """ + Return a numpy one-dimensional float array that has the cell center x + coordinate for every cell in the grid in model space - not offset or + rotated. + + """ + return self.xc + + def get_ycenter_array(self): + """ + Return a numpy one-dimensional float array that has the cell center x + coordinate for every cell in the grid in model space - not offset of + rotated. + + """ + return self.yc + + def plot_array(self, a, ax=None): + """ + Create a QuadMesh plot of the specified array using patches + + Parameters + ---------- + a : np.ndarray + + Returns + ------- + quadmesh : matplotlib.collections.QuadMesh + + """ + from ..plot import plotutil + + patch_collection = plotutil.plot_cvfd(self.verts, self.iverts, a=a, + ax=ax) + return patch_collection + + def get_grid_line_collection(self, **kwargs): + """ + Get a patch collection of the grid + + """ + from ..plot import plotutil + edgecolor = kwargs.pop('colors') + pc = plotutil.cvfd_to_patch_collection(self.verts, self.iverts) + pc.set(facecolor='none') + pc.set(edgecolor=edgecolor) + return pc + + def contour_array(self, ax, a, **kwargs): + """ + Create a QuadMesh plot of the specified array using pcolormesh + + Parameters + ---------- + ax : matplotlib.axes.Axes + ax to add the contours + + a : np.ndarray + array to contour + + Returns + ------- + contour_set : ContourSet + + """ + contour_set = ax.tricontour(self.xcenter, self.ycenter, + a, **kwargs) + return contour_set + + +class TemporalReference(object): + """ + For now, just a container to hold start time and time units files + outside of DIS package. + """ + + defaults = {'itmuni': 4, + 'start_datetime': '01-01-1970'} + + itmuni_values = {'undefined': 0, + 'seconds': 1, + 'minutes': 2, + 'hours': 3, + 'days': 4, + 'years': 5} + + itmuni_text = {v: k for k, v in itmuni_values.items()} + + def __init__(self, itmuni=4, start_datetime=None): + self.itmuni = itmuni + self.start_datetime = start_datetime + + @property + def model_time_units(self): + return self.itmuni_text[self.itmuni] + + +class epsgRef: + """ + Sets up a local database of text representations of coordinate reference + systems, keyed by EPSG code. + + The database is epsgref.json, located in the user's data directory. If + optional 'appdirs' package is available, this is in the platform-dependent + user directory, otherwise in the user's 'HOME/.flopy' directory. + """ + + def __init__(self): + warnings.warn( + "epsgRef has been deprecated.", category=DeprecationWarning) + try: + from appdirs import user_data_dir + except ImportError: + user_data_dir = None + if user_data_dir: + datadir = user_data_dir('flopy') + else: + # if appdirs is not installed, use user's home directory + datadir = os.path.join(os.path.expanduser('~'), '.flopy') + if not os.path.isdir(datadir): + os.makedirs(datadir) + dbname = 'epsgref.json' + self.location = os.path.join(datadir, dbname) + + def to_dict(self): + """ + Returns dict with EPSG code integer key, and WKT CRS text + """ + data = OrderedDict() + if os.path.exists(self.location): + with open(self.location, 'r') as f: + loaded_data = json.load(f, object_pairs_hook=OrderedDict) + # convert JSON key from str to EPSG integer + for key, value in loaded_data.items(): + try: + data[int(key)] = value + except ValueError: + data[key] = value + return data + + def _write(self, data): + with open(self.location, 'w') as f: + json.dump(data, f, indent=0) + f.write('\n') + + def reset(self, verbose=True): + if os.path.exists(self.location): + os.remove(self.location) + if verbose: + print('Resetting {}'.format(self.location)) + + def add(self, epsg, prj): + """ + add an epsg code to epsgref.json + """ + data = self.to_dict() + data[epsg] = prj + self._write(data) + + def get(self, epsg): + """ + returns prj from a epsg code, otherwise None if not found + """ + data = self.to_dict() + return data.get(epsg) + + def remove(self, epsg): + """ + removes an epsg entry from epsgref.json + """ + data = self.to_dict() + if epsg in data: + del data[epsg] + self._write(data) + + @staticmethod + def show(): + ep = epsgRef() + prj = ep.to_dict() + for k, v in prj.items(): + print('{}:\n{}\n'.format(k, v)) + + +class crs(object): + """ + Container to parse and store coordinate reference system parameters, + and translate between different formats. + """ + + def __init__(self, prj=None, esri_wkt=None, epsg=None): + warnings.warn( + "crs has been deprecated. Use CRS in shapefile_utils instead.", + category=DeprecationWarning) + self.wktstr = None + if prj is not None: + with open(prj) as fprj: + self.wktstr = fprj.read() + elif esri_wkt is not None: + self.wktstr = esri_wkt + elif epsg is not None: + wktstr = getprj(epsg) + if wktstr is not None: + self.wktstr = wktstr + if self.wktstr is not None: + self.parse_wkt() + + @property + def crs(self): + """ + Dict mapping crs attributes to proj4 parameters + """ + proj = None + if self.projcs is not None: + # projection + if 'mercator' in self.projcs.lower(): + if 'transverse' in self.projcs.lower() or \ + 'tm' in self.projcs.lower(): + proj = 'tmerc' + else: + proj = 'merc' + elif 'utm' in self.projcs.lower() and \ + 'zone' in self.projcs.lower(): + proj = 'utm' + elif 'stateplane' in self.projcs.lower(): + proj = 'lcc' + elif 'lambert' and 'conformal' and 'conic' in self.projcs.lower(): + proj = 'lcc' + elif 'albers' in self.projcs.lower(): + proj = 'aea' + elif self.projcs is None and self.geogcs is not None: + proj = 'longlat' + + # datum + datum = None + if 'NAD' in self.datum.lower() or \ + 'north' in self.datum.lower() and \ + 'america' in self.datum.lower(): + datum = 'nad' + if '83' in self.datum.lower(): + datum += '83' + elif '27' in self.datum.lower(): + datum += '27' + elif '84' in self.datum.lower(): + datum = 'wgs84' + + # ellipse + ellps = None + if '1866' in self.spheroid_name: + ellps = 'clrk66' + elif 'grs' in self.spheroid_name.lower(): + ellps = 'grs80' + elif 'wgs' in self.spheroid_name.lower(): + ellps = 'wgs84' + + # prime meridian + pm = self.primem[0].lower() + + return {'proj': proj, + 'datum': datum, + 'ellps': ellps, + 'a': self.semi_major_axis, + 'rf': self.inverse_flattening, + 'lat_0': self.latitude_of_origin, + 'lat_1': self.standard_parallel_1, + 'lat_2': self.standard_parallel_2, + 'lon_0': self.central_meridian, + 'k_0': self.scale_factor, + 'x_0': self.false_easting, + 'y_0': self.false_northing, + 'units': self.projcs_unit, + 'zone': self.utm_zone} + + @property + def grid_mapping_attribs(self): + """ + Map parameters for CF Grid Mappings + http://http://cfconventions.org/cf-conventions/cf-conventions.html, + Appendix F: Grid Mappings + """ + if self.wktstr is not None: + sp = [p for p in [self.standard_parallel_1, + self.standard_parallel_2] + if p is not None] + sp = sp if len(sp) > 0 else None + proj = self.crs['proj'] + names = {'aea': 'albers_conical_equal_area', + 'aeqd': 'azimuthal_equidistant', + 'laea': 'lambert_azimuthal_equal_area', + 'longlat': 'latitude_longitude', + 'lcc': 'lambert_conformal_conic', + 'merc': 'mercator', + 'tmerc': 'transverse_mercator', + 'utm': 'transverse_mercator'} + attribs = {'grid_mapping_name': names[proj], + 'semi_major_axis': self.crs['a'], + 'inverse_flattening': self.crs['rf'], + 'standard_parallel': sp, + 'longitude_of_central_meridian': self.crs['lon_0'], + 'latitude_of_projection_origin': self.crs['lat_0'], + 'scale_factor_at_projection_origin': self.crs['k_0'], + 'false_easting': self.crs['x_0'], + 'false_northing': self.crs['y_0']} + return {k: v for k, v in attribs.items() if v is not None} + + @property + def proj4(self): + """ + Not implemented yet + """ + return None + + def parse_wkt(self): + + self.projcs = self._gettxt('PROJCS["', '"') + self.utm_zone = None + if self.projcs is not None and 'utm' in self.projcs.lower(): + self.utm_zone = self.projcs[-3:].lower().strip('n').strip('s') + self.geogcs = self._gettxt('GEOGCS["', '"') + self.datum = self._gettxt('DATUM["', '"') + tmp = self._getgcsparam('SPHEROID') + self.spheroid_name = tmp.pop(0) + self.semi_major_axis = tmp.pop(0) + self.inverse_flattening = tmp.pop(0) + self.primem = self._getgcsparam('PRIMEM') + self.gcs_unit = self._getgcsparam('UNIT') + self.projection = self._gettxt('PROJECTION["', '"') + self.latitude_of_origin = self._getvalue('latitude_of_origin') + self.central_meridian = self._getvalue('central_meridian') + self.standard_parallel_1 = self._getvalue('standard_parallel_1') + self.standard_parallel_2 = self._getvalue('standard_parallel_2') + self.scale_factor = self._getvalue('scale_factor') + self.false_easting = self._getvalue('false_easting') + self.false_northing = self._getvalue('false_northing') + self.projcs_unit = self._getprojcs_unit() + + def _gettxt(self, s1, s2): + s = self.wktstr.lower() + strt = s.find(s1.lower()) + if strt >= 0: # -1 indicates not found + strt += len(s1) + end = s[strt:].find(s2.lower()) + strt + return self.wktstr[strt:end] + + def _getvalue(self, k): + s = self.wktstr.lower() + strt = s.find(k.lower()) + if strt >= 0: + strt += len(k) + end = s[strt:].find(']') + strt + try: + return float(self.wktstr[strt:end].split(',')[1]) + except: + print(' could not typecast wktstr to a float') + + def _getgcsparam(self, txt): + nvalues = 3 if txt.lower() == 'spheroid' else 2 + tmp = self._gettxt('{}["'.format(txt), ']') + if tmp is not None: + tmp = tmp.replace('"', '').split(',') + name = tmp[0:1] + values = list(map(float, tmp[1:nvalues])) + return name + values + else: + return [None] * nvalues + + def _getprojcs_unit(self): + if self.projcs is not None: + tmp = self.wktstr.lower().split('unit["')[-1] + uname, ufactor = tmp.strip().strip(']').split('",')[0:2] + ufactor = float(ufactor.split(']')[0].split()[0].split(',')[0]) + return uname, ufactor + return None, None + + +def getprj(epsg, addlocalreference=True, text='esriwkt'): + """ + Gets projection file (.prj) text for given epsg code from + spatialreference.org + + Parameters + ---------- + epsg : int + epsg code for coordinate system + addlocalreference : boolean + adds the projection file text associated with epsg to a local + database, epsgref.json, located in the user's data directory. + + References + ---------- + https://www.epsg-registry.org/ + + Returns + ------- + prj : str + text for a projection (*.prj) file. + + """ + warnings.warn("SpatialReference has been deprecated. Use StructuredGrid " + "instead.", category=DeprecationWarning) + epsgfile = epsgRef() + wktstr = epsgfile.get(epsg) + if wktstr is None: + wktstr = get_spatialreference(epsg, text=text) + if addlocalreference and wktstr is not None: + epsgfile.add(epsg, wktstr) + return wktstr + + +def get_spatialreference(epsg, text='esriwkt'): + """ + Gets text for given epsg code and text format from spatialreference.org + + Fetches the reference text using the url: + https://spatialreference.org/ref/epsg/// + + See: https://www.epsg-registry.org/ + + Parameters + ---------- + epsg : int + epsg code for coordinate system + text : str + string added to url + + Returns + ------- + url : str + + """ + from flopy.utils.flopy_io import get_url_text + + warnings.warn("SpatialReference has been deprecated. Use StructuredGrid " + "instead.", category=DeprecationWarning) + + epsg_categories = ['epsg', 'esri'] + for cat in epsg_categories: + url = "{}/ref/{}/{}/{}/".format(srefhttp, cat, epsg, text) + result = get_url_text(url) + if result is not None: + break + if result is not None: + return result.replace("\n", "") + elif result is None and text != 'epsg': + for cat in epsg_categories: + error_msg = 'No internet connection or ' + \ + 'epsg code {} '.format(epsg) + \ + 'not found at {}/ref/'.format(srefhttp) + \ + '{}/{}/{}'.format(cat, cat, epsg) + print(error_msg) + # epsg code not listed on spatialreference.org + # may still work with pyproj + elif text == 'epsg': + return 'epsg:{}'.format(epsg) + + +def getproj4(epsg): + """ + Get projection file (.prj) text for given epsg code from + spatialreference.org. See: https://www.epsg-registry.org/ + + Parameters + ---------- + epsg : int + epsg code for coordinate system + + Returns + ------- + prj : str + text for a projection (*.prj) file. + + """ + warnings.warn("SpatialReference has been deprecated. Use StructuredGrid " + "instead.", category=DeprecationWarning) + + return get_spatialreference(epsg, text='proj4') diff --git a/flopy/utils/swroutputfile.py b/flopy/utils/swroutputfile.py index b967266ffe..854db3e6d2 100644 --- a/flopy/utils/swroutputfile.py +++ b/flopy/utils/swroutputfile.py @@ -1,809 +1,809 @@ -import sys -import numpy as np -from collections import OrderedDict - -from ..utils.utils_def import FlopyBinaryData - - -class SwrFile(FlopyBinaryData): - """ - Read binary SWR output from MODFLOW SWR Process binary output files - The SwrFile class is the super class from which specific derived - classes are formed. This class should not be instantiated directly - - Parameters - ---------- - filename : string - Name of the swr output file - swrtype : str - swr data type. Valid data types are 'stage', 'budget', - 'flow', 'exchange', or 'structure'. (default is 'stage') - precision : string - 'single' or 'double'. Default is 'double'. - verbose : bool - Write information to the screen. Default is False. - - Attributes - ---------- - - Methods - ------- - - See Also - -------- - - Notes - ----- - - Examples - -------- - - >>> import flopy - >>> so = flopy.utils.SwrFile('mymodel.swr.stage.bin') - - """ - - def __init__(self, filename, swrtype='stage', precision='double', - verbose=False): - """ - Class constructor. - - """ - super(SwrFile, self).__init__() - self.set_float(precision=precision) - self.header_dtype = np.dtype([('totim', self.floattype), - ('kswr', 'i4'), ('kstp', 'i4'), - ('kper', 'i4')]) - self._recordarray = [] - - self.file = open(filename, 'rb') - self.types = ('stage', 'budget', 'flow', 'exchange', 'structure') - if swrtype.lower() in self.types: - self.type = swrtype.lower() - else: - err = 'SWR type ({}) is not defined. '.format(type) + \ - 'Available types are:\n' - for t in self.types: - err = '{} {}\n'.format(err, t) - raise Exception(err) - - # set data dtypes - self._build_dtypes() - - # debug - self.verbose = verbose - - # Read the dimension data - self.flowitems = 0 - if self.type == 'flow': - self.flowitems = self.read_integer() - self.nrecord = self.read_integer() - - # set-up - self.items = len(self.out_dtype) - 1 - - # read connectivity for velocity data if necessary - self.conn_dtype = None - if self.type == 'flow': - self.connectivity = self._read_connectivity() - if self.verbose: - print('Connectivity: ') - print(self.connectivity) - - # initialize itemlist and nentries for qaq data - self.nentries = {} - - self.datastart = self.file.tell() - - # build index - self._build_index() - - def get_connectivity(self): - """ - Get connectivity data from the file. - - Parameters - ---------- - - Returns - ---------- - data : numpy array - Array has size (nrecord, 3). None is returned if swrtype is not - 'flow' - - See Also - -------- - - Notes - ----- - - Examples - -------- - - """ - if self.type == 'flow': - return self.connectivity - else: - return None - - def get_nrecords(self): - """ - Get the number of records in the file - - Returns - ---------- - out : tuple of int - A tupe with the number of records and number of flow items - in the file. The number of flow items is non-zero only if - swrtype='flow'. - - """ - return self.nrecord, self.flowitems - - def get_kswrkstpkper(self): - """ - Get a list of unique stress periods, time steps, and swr time steps - in the file - - Returns - ---------- - out : list of (kswr, kstp, kper) tuples - List of unique kswr, kstp, kper combinations in binary file. - kswr, kstp, and kper values are zero-based. - - """ - return self._kswrkstpkper - - def get_ntimes(self): - """ - Get the number of times in the file - - Returns - ---------- - out : int - The number of simulation times (totim) in binary file. - - """ - return self._ntimes - - def get_times(self): - """ - Get a list of unique times in the file - - Returns - ---------- - out : list of floats - List contains unique simulation times (totim) in binary file. - - """ - return self._times.tolist() - - def get_record_names(self): - """ - Get a list of unique record names in the file - - Returns - ---------- - out : list of strings - List of unique text names in the binary file. - - """ - return self.out_dtype.names - - def get_data(self, idx=None, kswrkstpkper=None, totim=None): - """ - Get data from the file for the specified conditions. - - Parameters - ---------- - idx : int - The zero-based record number. The first record is record 0. - (default is None) - kswrkstpkper : tuple of ints - A tuple containing the swr time step, time step, and stress period - (kswr, kstp, kper). These are zero-based kswr, kstp, and kper - values. (default is None) - totim : float - The simulation time. (default is None) - - Returns - ---------- - data : numpy record array - Array has size (nitems). - - See Also - -------- - - Notes - ----- - if both kswrkstpkper and totim are None, will return the last entry - - Examples - -------- - - """ - if kswrkstpkper is not None: - kswr1 = kswrkstpkper[0] - kstp1 = kswrkstpkper[1] - kper1 = kswrkstpkper[2] - - totim1 = self._recordarray[np.where( - (self._recordarray['kswr'] == kswr1) & - (self._recordarray['kstp'] == kstp1) & - (self._recordarray['kper'] == kper1))]["totim"][0] - elif totim is not None: - totim1 = totim - elif idx is not None: - totim1 = self._recordarray['totim'][idx] - else: - totim1 = self._times[-1] - - try: - ipos = self.recorddict[totim1] - self.file.seek(ipos) - if self.type == 'exchange': - self.nitems, self.itemlist = self.nentries[totim1] - r = self._read_qaq() - elif self.type == 'structure': - self.nitems, self.itemlist = self.nentries[totim1] - r = self._read_structure() - else: - r = self.read_record(count=self.nrecord) - - # add totim to data record array - s = np.zeros(r.shape[0], dtype=self.out_dtype) - s['totim'] = totim1 - for name in r.dtype.names: - s[name] = r[name] - return s.view(dtype=self.out_dtype) - except: - return None - - def get_ts(self, irec=0, iconn=0, klay=0, istr=0): - """ - Get a time series from a swr binary file. - - Parameters - ---------- - irec : int - is the zero-based reach (stage, qm, qaq) or reach group number - (budget) to retrieve. (default is 0) - iconn : int - is the zero-based connection number for reach (irch) to retrieve - qm data. iconn is only used if qm data is being read. - (default is 0) - klay : int - is the zero-based layer number for reach (irch) to retrieve - qaq data . klay is only used if qaq data is being read. - (default is 0) - klay : int - is the zero-based structure number for reach (irch) to retrieve - structure data . isrt is only used if structure data is being read. - (default is 0) - - Returns - ---------- - out : numpy recarray - Array has size (ntimes, nitems). The first column in the - data array will contain time (totim). nitems is 2 for stage - data, 15 for budget data, 3 for qm data, and 11 for qaq - data. - - See Also - -------- - - Notes - ----- - - The irec, iconn, and klay values must be zero-based. - - Examples - -------- - - """ - - if irec + 1 > self.nrecord: - err = 'Error: specified irec ({}) '.format(irec) + \ - 'exceeds the total number of records ()'.format(self.nrecord) - raise Exception(err) - - gage_record = None - if self.type == 'stage' or self.type == 'budget': - gage_record = self._get_ts(irec=irec) - elif self.type == 'flow': - gage_record = self._get_ts_qm(irec=irec, iconn=iconn) - elif self.type == 'exchange': - gage_record = self._get_ts_qaq(irec=irec, klay=klay) - elif self.type == 'structure': - gage_record = self._get_ts_structure(irec=irec, istr=istr) - - return gage_record - - def _read_connectivity(self): - self.conn_dtype = np.dtype([('reach', 'i4'), - ('from', 'i4'), ('to', 'i4')]) - conn = np.zeros((self.nrecord, 3), np.int) - icount = 0 - for nrg in range(self.flowitems): - flowitems = self.read_integer() - for ic in range(flowitems): - conn[icount, 0] = nrg - conn[icount, 1] = self.read_integer() - 1 - conn[icount, 2] = self.read_integer() - 1 - icount += 1 - return conn - - def _build_dtypes(self): - self.vtotim = ('totim', self.floattype) - if self.type == 'stage': - vtype = [('stage', self.floattype)] - elif self.type == 'budget': - vtype = [('stage', self.floattype), ('qsflow', self.floattype), - ('qlatflow', self.floattype), ('quzflow', self.floattype), - ('rain', self.floattype), ('evap', self.floattype), - ('qbflow', self.floattype), ('qeflow', self.floattype), - ('qexflow', self.floattype), ('qbcflow', self.floattype), - ('qcrflow', self.floattype), ('dv', self.floattype), - ('inf-out', self.floattype), ('volume', self.floattype)] - elif self.type == 'flow': - vtype = [('flow', self.floattype), - ('velocity', self.floattype)] - elif self.type == 'exchange': - vtype = [('layer', 'i4'), ('bottom', 'f8'), ('stage', 'f8'), - ('depth', 'f8'), ('head', 'f8'), ('wetper', 'f8'), - ('cond', 'f8'), ('headdiff', 'f8'), ('exchange', 'f8')] - elif self.type == 'structure': - vtype = [('usstage', 'f8'), ('dsstage', 'f8'), ('gateelev', 'f8'), - ('opening', 'f8'), ('strflow', 'f8')] - self.dtype = np.dtype(vtype) - temp = list(vtype) - if self.type == 'exchange': - temp.insert(0, ('reach', 'i4')) - self.qaq_dtype = np.dtype(temp) - elif self.type == 'structure': - temp.insert(0, ('structure', 'i4')) - temp.insert(0, ('reach', 'i4')) - self.str_dtype = np.dtype(temp) - temp.insert(0, self.vtotim) - self.out_dtype = np.dtype(temp) - return - - def _read_header(self): - nitems = 0 - if self.type == 'exchange' or self.type == 'structure': - itemlist = np.zeros(self.nrecord, np.int) - try: - for i in range(self.nrecord): - itemlist[i] = self.read_integer() - nitems += itemlist[i] - self.nitems = nitems - except: - if self.verbose: - sys.stdout.write('\nCould not read itemlist') - return 0.0, 0.0, 0, 0, 0, False - try: - totim = self.read_real() - dt = self.read_real() - kper = self.read_integer() - 1 - kstp = self.read_integer() - 1 - kswr = self.read_integer() - 1 - if self.type == 'exchange' or self.type == 'structure': - self.nentries[totim] = (nitems, itemlist) - return totim, dt, kper, kstp, kswr, True - except: - return 0.0, 0.0, 0, 0, 0, False - - def _get_ts(self, irec=0): - - # create array - gage_record = np.zeros(self._ntimes, dtype=self.out_dtype) - - # iterate through the record dictionary - idx = 0 - for key, value in self.recorddict.items(): - totim = np.array(key) - gage_record['totim'][idx] = totim - - self.file.seek(value) - r = self._get_data() - for name in r.dtype.names: - gage_record[name][idx] = r[name][irec] - idx += 1 - - return gage_record.view(dtype=self.out_dtype) - - def _get_ts_qm(self, irec=0, iconn=0): - - # create array - gage_record = np.zeros(self._ntimes, dtype=self.out_dtype) - - # iterate through the record dictionary - idx = 0 - for key, value in self.recorddict.items(): - totim = key - gage_record['totim'][idx] = totim - - self.file.seek(value) - r = self._get_data() - - # find correct entry for reach and connection - for i in range(self.nrecord): - inode = self.connectivity[i, 1] - ic = self.connectivity[i, 2] - if irec == inode and ic == iconn: - for name in r.dtype.names: - gage_record[name][idx] = r[name][i] - break - idx += 1 - - return gage_record.view(dtype=self.out_dtype) - - def _get_ts_qaq(self, irec=0, klay=0): - - # create array - gage_record = np.zeros(self._ntimes, dtype=self.out_dtype) - - # iterate through the record dictionary - idx = 0 - for key, value in self.recorddict.items(): - totim = key - gage_record['totim'][idx] = totim - - self.nitems, self.itemlist = self.nentries[key] - - self.file.seek(value) - r = self._get_data() - - # find correct entry for record and layer - ilen = np.shape(r)[0] - for i in range(ilen): - ir = r['reach'][i] - il = r['layer'][i] - if ir == irec and il == klay: - for name in r.dtype.names: - gage_record[name][idx] = r[name][i] - break - idx += 1 - - return gage_record.view(dtype=self.out_dtype) - - def _get_ts_structure(self, irec=0, istr=0): - - # create array - gage_record = np.zeros(self._ntimes, dtype=self.out_dtype) - - # iterate through the record dictionary - idx = 0 - for key, value in self.recorddict.items(): - totim = key - gage_record['totim'][idx] = totim - - self.nitems, self.itemlist = self.nentries[key] - - self.file.seek(value) - r = self._get_data() - - # find correct entry for record and structure number - ilen = np.shape(r)[0] - for i in range(ilen): - ir = r['reach'][i] - il = r['structure'][i] - if ir == irec and il == istr: - for name in r.dtype.names: - gage_record[name][idx] = r[name][i] - break - idx += 1 - - return gage_record.view(dtype=self.out_dtype) - - def _get_data(self): - if self.type == 'exchange': - return self._read_qaq() - elif self.type == 'structure': - return self._read_structure() - else: - return self.read_record(count=self.nrecord) - - def _read_qaq(self): - - # read qaq data using standard record reader - bd = self.read_record(count=self.nitems) - bd['layer'] -= 1 - - # add reach number to qaq data - r = np.zeros(self.nitems, dtype=self.qaq_dtype) - - # build array with reach numbers - reaches = np.zeros(self.nitems, dtype=np.int32) - idx = 0 - for irch in range(self.nrecord): - klay = self.itemlist[irch] - for k in range(klay): - # r[idx, 0] = irch - reaches[idx] = irch - idx += 1 - - # add reach to array returned - r['reach'] = reaches.copy() - - # add read data to array returned - for idx, k in enumerate(self.dtype.names): - r[k] = bd[k] - return r - - def _read_structure(self): - - # read qaq data using standard record reader - bd = self.read_record(count=self.nitems) - - # add reach and structure number to structure data - r = np.zeros(self.nitems, dtype=self.str_dtype) - - # build array with reach numbers - reaches = np.zeros(self.nitems, dtype=np.int32) - struct = np.zeros(self.nitems, dtype=np.int32) - idx = 0 - for irch in range(self.nrecord): - nstr = self.itemlist[irch] - for n in range(nstr): - reaches[idx] = irch - struct[idx] = n - idx += 1 - - # add reach to array returned - r['reach'] = reaches.copy() - r['structure'] = struct.copy() - - # add read data to array returned - for idx, k in enumerate(self.dtype.names): - r[k] = bd[k] - return r - - def _build_index(self): - """ - Build the recordarray recarray and recorddict dictionary, which map - the header information to the position in the binary file. - """ - self.file.seek(self.datastart) - if self.verbose: - sys.stdout.write('Generating SWR binary data time list\n') - self._ntimes = 0 - self._times = [] - self._kswrkstpkper = [] - self.recorddict = OrderedDict() - - idx = 0 - while True: - # --output something to screen so it is possible to determine - # that the time list is being created - idx += 1 - if self.verbose: - v = divmod(float(idx), 72.) - if v[1] == 0.0: - sys.stdout.write('.') - # read header - totim, dt, kper, kstp, kswr, success = self._read_header() - if success: - if self.type == 'exchange': - bytes = self.nitems * \ - (self.integerbyte + - 8 * self.realbyte) - elif self.type == 'structure': - bytes = self.nitems * (5 * self.realbyte) - else: - bytes = self.nrecord * self.items * \ - self.realbyte - ipos = self.file.tell() - self.file.seek(bytes, 1) - # save data - self._ntimes += 1 - self._times.append(totim) - self._kswrkstpkper.append((kswr, kstp, kper)) - header = (totim, kswr, kstp, kper) - self.recorddict[totim] = ipos - self._recordarray.append(header) - else: - if self.verbose: - sys.stdout.write('\n') - self._recordarray = np.array(self._recordarray, - dtype=self.header_dtype) - self._times = np.array(self._times) - self._kswrkstpkper = np.array(self._kswrkstpkper) - return - - -class SwrStage(SwrFile): - """ - Read binary SWR stage output from MODFLOW SWR Process binary output files - - Parameters - ---------- - filename : string - Name of the swr stage output file - precision : string - 'single' or 'double'. Default is 'double'. - verbose : bool - Write information to the screen. Default is False. - - Attributes - ---------- - - Methods - ------- - - See Also - -------- - - Notes - ----- - - Examples - -------- - - >>> import flopy - >>> stageobj = flopy.utils.SwrStage('mymodel.swr.stg') - - """ - - def __init__(self, filename, precision='double', verbose=False): - super(SwrStage, self).__init__(filename, swrtype='stage', - precision=precision, verbose=verbose) - return - - -class SwrBudget(SwrFile): - """ - Read binary SWR budget output from MODFLOW SWR Process binary output files - - Parameters - ---------- - filename : string - Name of the swr budget output file - precision : string - 'single' or 'double'. Default is 'double'. - verbose : bool - Write information to the screen. Default is False. - - Attributes - ---------- - - Methods - ------- - - See Also - -------- - - Notes - ----- - - Examples - -------- - - >>> import flopy - >>> stageobj = flopy.utils.SwrStage('mymodel.swr.bud') - - """ - - def __init__(self, filename, precision='double', verbose=False): - super(SwrBudget, self).__init__(filename, swrtype='budget', - precision=precision, verbose=verbose) - return - - -class SwrFlow(SwrFile): - """ - Read binary SWR flow output from MODFLOW SWR Process binary output files - - Parameters - ---------- - filename : string - Name of the swr flow output file - precision : string - 'single' or 'double'. Default is 'double'. - verbose : bool - Write information to the screen. Default is False. - - Attributes - ---------- - - Methods - ------- - - See Also - -------- - - Notes - ----- - - Examples - -------- - - >>> import flopy - >>> stageobj = flopy.utils.SwrStage('mymodel.swr.flow') - - """ - - def __init__(self, filename, precision='double', verbose=False): - super(SwrFlow, self).__init__(filename, swrtype='flow', - precision=precision, verbose=verbose) - return - - -class SwrExchange(SwrFile): - """ - Read binary SWR surface-water groundwater exchange output from MODFLOW SWR Process binary output files - - Parameters - ---------- - filename : string - Name of the swr surface-water groundwater exchange output file - precision : string - 'single' or 'double'. Default is 'double'. - verbose : bool - Write information to the screen. Default is False. - - Attributes - ---------- - - Methods - ------- - - See Also - -------- - - Notes - ----- - - Examples - -------- - - >>> import flopy - >>> stageobj = flopy.utils.SwrStage('mymodel.swr.qaq') - - """ - - def __init__(self, filename, precision='double', verbose=False): - super(SwrExchange, self).__init__(filename, swrtype='exchange', - precision=precision, verbose=verbose) - return - - -class SwrStructure(SwrFile): - """ - Read binary SWR structure output from MODFLOW SWR Process binary output - files - - Parameters - ---------- - filename : string - Name of the swr structure output file - precision : string - 'single' or 'double'. Default is 'double'. - verbose : bool - Write information to the screen. Default is False. - - Attributes - ---------- - - Methods - ------- - - See Also - -------- - - Notes - ----- - - Examples - -------- - - >>> import flopy - >>> stageobj = flopy.utils.SwrStage('mymodel.swr.str') - - """ - - def __init__(self, filename, precision='double', verbose=False): - super(SwrStructure, self).__init__(filename, swrtype='structure', - precision=precision, - verbose=verbose) - return +import sys +import numpy as np +from collections import OrderedDict + +from ..utils.utils_def import FlopyBinaryData + + +class SwrFile(FlopyBinaryData): + """ + Read binary SWR output from MODFLOW SWR Process binary output files + The SwrFile class is the super class from which specific derived + classes are formed. This class should not be instantiated directly + + Parameters + ---------- + filename : string + Name of the swr output file + swrtype : str + swr data type. Valid data types are 'stage', 'budget', + 'flow', 'exchange', or 'structure'. (default is 'stage') + precision : string + 'single' or 'double'. Default is 'double'. + verbose : bool + Write information to the screen. Default is False. + + Attributes + ---------- + + Methods + ------- + + See Also + -------- + + Notes + ----- + + Examples + -------- + + >>> import flopy + >>> so = flopy.utils.SwrFile('mymodel.swr.stage.bin') + + """ + + def __init__(self, filename, swrtype='stage', precision='double', + verbose=False): + """ + Class constructor. + + """ + super(SwrFile, self).__init__() + self.set_float(precision=precision) + self.header_dtype = np.dtype([('totim', self.floattype), + ('kswr', 'i4'), ('kstp', 'i4'), + ('kper', 'i4')]) + self._recordarray = [] + + self.file = open(filename, 'rb') + self.types = ('stage', 'budget', 'flow', 'exchange', 'structure') + if swrtype.lower() in self.types: + self.type = swrtype.lower() + else: + err = 'SWR type ({}) is not defined. '.format(type) + \ + 'Available types are:\n' + for t in self.types: + err = '{} {}\n'.format(err, t) + raise Exception(err) + + # set data dtypes + self._build_dtypes() + + # debug + self.verbose = verbose + + # Read the dimension data + self.flowitems = 0 + if self.type == 'flow': + self.flowitems = self.read_integer() + self.nrecord = self.read_integer() + + # set-up + self.items = len(self.out_dtype) - 1 + + # read connectivity for velocity data if necessary + self.conn_dtype = None + if self.type == 'flow': + self.connectivity = self._read_connectivity() + if self.verbose: + print('Connectivity: ') + print(self.connectivity) + + # initialize itemlist and nentries for qaq data + self.nentries = {} + + self.datastart = self.file.tell() + + # build index + self._build_index() + + def get_connectivity(self): + """ + Get connectivity data from the file. + + Parameters + ---------- + + Returns + ---------- + data : numpy array + Array has size (nrecord, 3). None is returned if swrtype is not + 'flow' + + See Also + -------- + + Notes + ----- + + Examples + -------- + + """ + if self.type == 'flow': + return self.connectivity + else: + return None + + def get_nrecords(self): + """ + Get the number of records in the file + + Returns + ---------- + out : tuple of int + A tupe with the number of records and number of flow items + in the file. The number of flow items is non-zero only if + swrtype='flow'. + + """ + return self.nrecord, self.flowitems + + def get_kswrkstpkper(self): + """ + Get a list of unique stress periods, time steps, and swr time steps + in the file + + Returns + ---------- + out : list of (kswr, kstp, kper) tuples + List of unique kswr, kstp, kper combinations in binary file. + kswr, kstp, and kper values are zero-based. + + """ + return self._kswrkstpkper + + def get_ntimes(self): + """ + Get the number of times in the file + + Returns + ---------- + out : int + The number of simulation times (totim) in binary file. + + """ + return self._ntimes + + def get_times(self): + """ + Get a list of unique times in the file + + Returns + ---------- + out : list of floats + List contains unique simulation times (totim) in binary file. + + """ + return self._times.tolist() + + def get_record_names(self): + """ + Get a list of unique record names in the file + + Returns + ---------- + out : list of strings + List of unique text names in the binary file. + + """ + return self.out_dtype.names + + def get_data(self, idx=None, kswrkstpkper=None, totim=None): + """ + Get data from the file for the specified conditions. + + Parameters + ---------- + idx : int + The zero-based record number. The first record is record 0. + (default is None) + kswrkstpkper : tuple of ints + A tuple containing the swr time step, time step, and stress period + (kswr, kstp, kper). These are zero-based kswr, kstp, and kper + values. (default is None) + totim : float + The simulation time. (default is None) + + Returns + ---------- + data : numpy record array + Array has size (nitems). + + See Also + -------- + + Notes + ----- + if both kswrkstpkper and totim are None, will return the last entry + + Examples + -------- + + """ + if kswrkstpkper is not None: + kswr1 = kswrkstpkper[0] + kstp1 = kswrkstpkper[1] + kper1 = kswrkstpkper[2] + + totim1 = self._recordarray[np.where( + (self._recordarray['kswr'] == kswr1) & + (self._recordarray['kstp'] == kstp1) & + (self._recordarray['kper'] == kper1))]["totim"][0] + elif totim is not None: + totim1 = totim + elif idx is not None: + totim1 = self._recordarray['totim'][idx] + else: + totim1 = self._times[-1] + + try: + ipos = self.recorddict[totim1] + self.file.seek(ipos) + if self.type == 'exchange': + self.nitems, self.itemlist = self.nentries[totim1] + r = self._read_qaq() + elif self.type == 'structure': + self.nitems, self.itemlist = self.nentries[totim1] + r = self._read_structure() + else: + r = self.read_record(count=self.nrecord) + + # add totim to data record array + s = np.zeros(r.shape[0], dtype=self.out_dtype) + s['totim'] = totim1 + for name in r.dtype.names: + s[name] = r[name] + return s.view(dtype=self.out_dtype) + except: + return None + + def get_ts(self, irec=0, iconn=0, klay=0, istr=0): + """ + Get a time series from a swr binary file. + + Parameters + ---------- + irec : int + is the zero-based reach (stage, qm, qaq) or reach group number + (budget) to retrieve. (default is 0) + iconn : int + is the zero-based connection number for reach (irch) to retrieve + qm data. iconn is only used if qm data is being read. + (default is 0) + klay : int + is the zero-based layer number for reach (irch) to retrieve + qaq data . klay is only used if qaq data is being read. + (default is 0) + klay : int + is the zero-based structure number for reach (irch) to retrieve + structure data . isrt is only used if structure data is being read. + (default is 0) + + Returns + ---------- + out : numpy recarray + Array has size (ntimes, nitems). The first column in the + data array will contain time (totim). nitems is 2 for stage + data, 15 for budget data, 3 for qm data, and 11 for qaq + data. + + See Also + -------- + + Notes + ----- + + The irec, iconn, and klay values must be zero-based. + + Examples + -------- + + """ + + if irec + 1 > self.nrecord: + err = 'Error: specified irec ({}) '.format(irec) + \ + 'exceeds the total number of records ()'.format(self.nrecord) + raise Exception(err) + + gage_record = None + if self.type == 'stage' or self.type == 'budget': + gage_record = self._get_ts(irec=irec) + elif self.type == 'flow': + gage_record = self._get_ts_qm(irec=irec, iconn=iconn) + elif self.type == 'exchange': + gage_record = self._get_ts_qaq(irec=irec, klay=klay) + elif self.type == 'structure': + gage_record = self._get_ts_structure(irec=irec, istr=istr) + + return gage_record + + def _read_connectivity(self): + self.conn_dtype = np.dtype([('reach', 'i4'), + ('from', 'i4'), ('to', 'i4')]) + conn = np.zeros((self.nrecord, 3), np.int) + icount = 0 + for nrg in range(self.flowitems): + flowitems = self.read_integer() + for ic in range(flowitems): + conn[icount, 0] = nrg + conn[icount, 1] = self.read_integer() - 1 + conn[icount, 2] = self.read_integer() - 1 + icount += 1 + return conn + + def _build_dtypes(self): + self.vtotim = ('totim', self.floattype) + if self.type == 'stage': + vtype = [('stage', self.floattype)] + elif self.type == 'budget': + vtype = [('stage', self.floattype), ('qsflow', self.floattype), + ('qlatflow', self.floattype), ('quzflow', self.floattype), + ('rain', self.floattype), ('evap', self.floattype), + ('qbflow', self.floattype), ('qeflow', self.floattype), + ('qexflow', self.floattype), ('qbcflow', self.floattype), + ('qcrflow', self.floattype), ('dv', self.floattype), + ('inf-out', self.floattype), ('volume', self.floattype)] + elif self.type == 'flow': + vtype = [('flow', self.floattype), + ('velocity', self.floattype)] + elif self.type == 'exchange': + vtype = [('layer', 'i4'), ('bottom', 'f8'), ('stage', 'f8'), + ('depth', 'f8'), ('head', 'f8'), ('wetper', 'f8'), + ('cond', 'f8'), ('headdiff', 'f8'), ('exchange', 'f8')] + elif self.type == 'structure': + vtype = [('usstage', 'f8'), ('dsstage', 'f8'), ('gateelev', 'f8'), + ('opening', 'f8'), ('strflow', 'f8')] + self.dtype = np.dtype(vtype) + temp = list(vtype) + if self.type == 'exchange': + temp.insert(0, ('reach', 'i4')) + self.qaq_dtype = np.dtype(temp) + elif self.type == 'structure': + temp.insert(0, ('structure', 'i4')) + temp.insert(0, ('reach', 'i4')) + self.str_dtype = np.dtype(temp) + temp.insert(0, self.vtotim) + self.out_dtype = np.dtype(temp) + return + + def _read_header(self): + nitems = 0 + if self.type == 'exchange' or self.type == 'structure': + itemlist = np.zeros(self.nrecord, np.int) + try: + for i in range(self.nrecord): + itemlist[i] = self.read_integer() + nitems += itemlist[i] + self.nitems = nitems + except: + if self.verbose: + sys.stdout.write('\nCould not read itemlist') + return 0.0, 0.0, 0, 0, 0, False + try: + totim = self.read_real() + dt = self.read_real() + kper = self.read_integer() - 1 + kstp = self.read_integer() - 1 + kswr = self.read_integer() - 1 + if self.type == 'exchange' or self.type == 'structure': + self.nentries[totim] = (nitems, itemlist) + return totim, dt, kper, kstp, kswr, True + except: + return 0.0, 0.0, 0, 0, 0, False + + def _get_ts(self, irec=0): + + # create array + gage_record = np.zeros(self._ntimes, dtype=self.out_dtype) + + # iterate through the record dictionary + idx = 0 + for key, value in self.recorddict.items(): + totim = np.array(key) + gage_record['totim'][idx] = totim + + self.file.seek(value) + r = self._get_data() + for name in r.dtype.names: + gage_record[name][idx] = r[name][irec] + idx += 1 + + return gage_record.view(dtype=self.out_dtype) + + def _get_ts_qm(self, irec=0, iconn=0): + + # create array + gage_record = np.zeros(self._ntimes, dtype=self.out_dtype) + + # iterate through the record dictionary + idx = 0 + for key, value in self.recorddict.items(): + totim = key + gage_record['totim'][idx] = totim + + self.file.seek(value) + r = self._get_data() + + # find correct entry for reach and connection + for i in range(self.nrecord): + inode = self.connectivity[i, 1] + ic = self.connectivity[i, 2] + if irec == inode and ic == iconn: + for name in r.dtype.names: + gage_record[name][idx] = r[name][i] + break + idx += 1 + + return gage_record.view(dtype=self.out_dtype) + + def _get_ts_qaq(self, irec=0, klay=0): + + # create array + gage_record = np.zeros(self._ntimes, dtype=self.out_dtype) + + # iterate through the record dictionary + idx = 0 + for key, value in self.recorddict.items(): + totim = key + gage_record['totim'][idx] = totim + + self.nitems, self.itemlist = self.nentries[key] + + self.file.seek(value) + r = self._get_data() + + # find correct entry for record and layer + ilen = np.shape(r)[0] + for i in range(ilen): + ir = r['reach'][i] + il = r['layer'][i] + if ir == irec and il == klay: + for name in r.dtype.names: + gage_record[name][idx] = r[name][i] + break + idx += 1 + + return gage_record.view(dtype=self.out_dtype) + + def _get_ts_structure(self, irec=0, istr=0): + + # create array + gage_record = np.zeros(self._ntimes, dtype=self.out_dtype) + + # iterate through the record dictionary + idx = 0 + for key, value in self.recorddict.items(): + totim = key + gage_record['totim'][idx] = totim + + self.nitems, self.itemlist = self.nentries[key] + + self.file.seek(value) + r = self._get_data() + + # find correct entry for record and structure number + ilen = np.shape(r)[0] + for i in range(ilen): + ir = r['reach'][i] + il = r['structure'][i] + if ir == irec and il == istr: + for name in r.dtype.names: + gage_record[name][idx] = r[name][i] + break + idx += 1 + + return gage_record.view(dtype=self.out_dtype) + + def _get_data(self): + if self.type == 'exchange': + return self._read_qaq() + elif self.type == 'structure': + return self._read_structure() + else: + return self.read_record(count=self.nrecord) + + def _read_qaq(self): + + # read qaq data using standard record reader + bd = self.read_record(count=self.nitems) + bd['layer'] -= 1 + + # add reach number to qaq data + r = np.zeros(self.nitems, dtype=self.qaq_dtype) + + # build array with reach numbers + reaches = np.zeros(self.nitems, dtype=np.int32) + idx = 0 + for irch in range(self.nrecord): + klay = self.itemlist[irch] + for k in range(klay): + # r[idx, 0] = irch + reaches[idx] = irch + idx += 1 + + # add reach to array returned + r['reach'] = reaches.copy() + + # add read data to array returned + for idx, k in enumerate(self.dtype.names): + r[k] = bd[k] + return r + + def _read_structure(self): + + # read qaq data using standard record reader + bd = self.read_record(count=self.nitems) + + # add reach and structure number to structure data + r = np.zeros(self.nitems, dtype=self.str_dtype) + + # build array with reach numbers + reaches = np.zeros(self.nitems, dtype=np.int32) + struct = np.zeros(self.nitems, dtype=np.int32) + idx = 0 + for irch in range(self.nrecord): + nstr = self.itemlist[irch] + for n in range(nstr): + reaches[idx] = irch + struct[idx] = n + idx += 1 + + # add reach to array returned + r['reach'] = reaches.copy() + r['structure'] = struct.copy() + + # add read data to array returned + for idx, k in enumerate(self.dtype.names): + r[k] = bd[k] + return r + + def _build_index(self): + """ + Build the recordarray recarray and recorddict dictionary, which map + the header information to the position in the binary file. + """ + self.file.seek(self.datastart) + if self.verbose: + sys.stdout.write('Generating SWR binary data time list\n') + self._ntimes = 0 + self._times = [] + self._kswrkstpkper = [] + self.recorddict = OrderedDict() + + idx = 0 + while True: + # --output something to screen so it is possible to determine + # that the time list is being created + idx += 1 + if self.verbose: + v = divmod(float(idx), 72.) + if v[1] == 0.0: + sys.stdout.write('.') + # read header + totim, dt, kper, kstp, kswr, success = self._read_header() + if success: + if self.type == 'exchange': + bytes = self.nitems * \ + (self.integerbyte + + 8 * self.realbyte) + elif self.type == 'structure': + bytes = self.nitems * (5 * self.realbyte) + else: + bytes = self.nrecord * self.items * \ + self.realbyte + ipos = self.file.tell() + self.file.seek(bytes, 1) + # save data + self._ntimes += 1 + self._times.append(totim) + self._kswrkstpkper.append((kswr, kstp, kper)) + header = (totim, kswr, kstp, kper) + self.recorddict[totim] = ipos + self._recordarray.append(header) + else: + if self.verbose: + sys.stdout.write('\n') + self._recordarray = np.array(self._recordarray, + dtype=self.header_dtype) + self._times = np.array(self._times) + self._kswrkstpkper = np.array(self._kswrkstpkper) + return + + +class SwrStage(SwrFile): + """ + Read binary SWR stage output from MODFLOW SWR Process binary output files + + Parameters + ---------- + filename : string + Name of the swr stage output file + precision : string + 'single' or 'double'. Default is 'double'. + verbose : bool + Write information to the screen. Default is False. + + Attributes + ---------- + + Methods + ------- + + See Also + -------- + + Notes + ----- + + Examples + -------- + + >>> import flopy + >>> stageobj = flopy.utils.SwrStage('mymodel.swr.stg') + + """ + + def __init__(self, filename, precision='double', verbose=False): + super(SwrStage, self).__init__(filename, swrtype='stage', + precision=precision, verbose=verbose) + return + + +class SwrBudget(SwrFile): + """ + Read binary SWR budget output from MODFLOW SWR Process binary output files + + Parameters + ---------- + filename : string + Name of the swr budget output file + precision : string + 'single' or 'double'. Default is 'double'. + verbose : bool + Write information to the screen. Default is False. + + Attributes + ---------- + + Methods + ------- + + See Also + -------- + + Notes + ----- + + Examples + -------- + + >>> import flopy + >>> stageobj = flopy.utils.SwrStage('mymodel.swr.bud') + + """ + + def __init__(self, filename, precision='double', verbose=False): + super(SwrBudget, self).__init__(filename, swrtype='budget', + precision=precision, verbose=verbose) + return + + +class SwrFlow(SwrFile): + """ + Read binary SWR flow output from MODFLOW SWR Process binary output files + + Parameters + ---------- + filename : string + Name of the swr flow output file + precision : string + 'single' or 'double'. Default is 'double'. + verbose : bool + Write information to the screen. Default is False. + + Attributes + ---------- + + Methods + ------- + + See Also + -------- + + Notes + ----- + + Examples + -------- + + >>> import flopy + >>> stageobj = flopy.utils.SwrStage('mymodel.swr.flow') + + """ + + def __init__(self, filename, precision='double', verbose=False): + super(SwrFlow, self).__init__(filename, swrtype='flow', + precision=precision, verbose=verbose) + return + + +class SwrExchange(SwrFile): + """ + Read binary SWR surface-water groundwater exchange output from MODFLOW SWR Process binary output files + + Parameters + ---------- + filename : string + Name of the swr surface-water groundwater exchange output file + precision : string + 'single' or 'double'. Default is 'double'. + verbose : bool + Write information to the screen. Default is False. + + Attributes + ---------- + + Methods + ------- + + See Also + -------- + + Notes + ----- + + Examples + -------- + + >>> import flopy + >>> stageobj = flopy.utils.SwrStage('mymodel.swr.qaq') + + """ + + def __init__(self, filename, precision='double', verbose=False): + super(SwrExchange, self).__init__(filename, swrtype='exchange', + precision=precision, verbose=verbose) + return + + +class SwrStructure(SwrFile): + """ + Read binary SWR structure output from MODFLOW SWR Process binary output + files + + Parameters + ---------- + filename : string + Name of the swr structure output file + precision : string + 'single' or 'double'. Default is 'double'. + verbose : bool + Write information to the screen. Default is False. + + Attributes + ---------- + + Methods + ------- + + See Also + -------- + + Notes + ----- + + Examples + -------- + + >>> import flopy + >>> stageobj = flopy.utils.SwrStage('mymodel.swr.str') + + """ + + def __init__(self, filename, precision='double', verbose=False): + super(SwrStructure, self).__init__(filename, swrtype='structure', + precision=precision, + verbose=verbose) + return diff --git a/flopy/utils/util_array.py b/flopy/utils/util_array.py index eaf7612006..42a915aac3 100644 --- a/flopy/utils/util_array.py +++ b/flopy/utils/util_array.py @@ -1,2865 +1,2865 @@ -""" -util_array module. Contains the util_2d, util_3d and transient_2d classes. - These classes encapsulate modflow-style array inputs away - from the individual packages. The end-user should not need to - instantiate these classes directly. - -""" -from __future__ import division, print_function -# from future.utils import with_metaclass - -import os -import shutil -import copy -import numpy as np -from warnings import warn -from ..utils.binaryfile import BinaryHeader -from ..utils.flopy_io import line_parse -from ..datbase import DataType, DataInterface - - -class ArrayFormat(object): - """ - ArrayFormat class for handling various output format types for both - MODFLOW and flopy - - Parameters - ---------- - u2d : Util2d instance - python : str (optional) - python-style output format descriptor e.g. {0:15.6e} - fortran : str (optional) - fortran style output format descriptor e.g. (2E15.6) - - - Attributes - ---------- - fortran : str - fortran format output descriptor (e.g. (100G15.6) - py : str - python format output descriptor (e.g. "{0:15.6E}") - numpy : str - numpy format output descriptor (e.g. "%15.6e") - npl : int - number if items per line of output - width : int - the width of the formatted numeric output - decimal : int - the number of decimal digits in the numeric output - format : str - the output format type e.g. I, G, E, etc - free : bool - free format flag - binary : bool - binary format flag - - - Methods - ------- - get_default_numpy_fmt : (dtype : [np.int32, np.float32]) - a static method to get a default numpy dtype - used for loading - decode_fortran_descriptor : (fd : str) - a static method to decode fortran descriptors into npl, format, - width, decimal. - - See Also - -------- - - Notes - ----- - - Examples - -------- - - """ - - def __init__(self, u2d, python=None, fortran=None, array_free_format=None): - - assert isinstance(u2d, Util2d), "ArrayFormat only supports Util2d," + \ - "not {0}".format(type(u2d)) - if len(u2d.shape) == 1: - self._npl_full = u2d.shape[0] - else: - self._npl_full = u2d.shape[1] - self.dtype = u2d.dtype - self._npl = None - self._format = None - self._width = None - self._decimal = None - if array_free_format is not None: - self._freeformat_model = bool(array_free_format) - else: - self._freeformat_model = bool(u2d.model.array_free_format) - - self.default_float_width = 15 - self.default_int_width = 10 - self.default_float_format = "E" - self.default_int_format = "I" - self.default_float_decimal = 6 - self.default_int_decimal = 0 - - self._fmts = ['I', 'G', 'E', 'F'] - - self._isbinary = False - self._isfree = False - - if python is not None and fortran is not None: - raise Exception("only one of [python,fortran] can be passed" + - "to ArrayFormat constructor") - - if python is not None: - self._parse_python_format(python) - - elif fortran is not None: - self._parse_fortran_format(fortran) - - else: - self._set_defaults() - - @property - def array_free_format(self): - return bool(self._freeformat_model) - - def _set_defaults(self): - if self.dtype == np.int32: - self._npl = self._npl_full - self._format = self.default_int_format - self._width = self.default_int_width - self._decimal = None - - elif self.dtype in [np.float32, bool]: - self._npl = self._npl_full - self._format = self.default_float_format - self._width = self.default_float_width - self._decimal = self.default_float_decimal - else: - raise Exception("ArrayFormat._set_defaults() error: " + - "unsupported dtype: {0}".format(str(self.dtype))) - - def __str__(self): - s = "ArrayFormat: npl:{0},format:{1},width:{2},decimal{3}" \ - .format(self.npl, self.format, self.width, self.decimal) - s += ",isfree:{0},isbinary:{1}".format(self._isfree, self._isbinary) - return s - - @staticmethod - def get_default_numpy_fmt(dtype): - if dtype == np.int32: - return "%10d" - elif dtype == np.float32: - return "%15.6E" - else: - raise Exception( - "ArrayFormat.get_default_numpy_fmt(): unrecognized " + \ - "dtype, must be np.int32 or np.float32") - - @classmethod - def integer(cls): - raise NotImplementedError() - - @classmethod - def float(cls): - raise NotImplementedError() - - @property - def binary(self): - return bool(self._isbinary) - - @property - def free(self): - return bool(self._isfree) - - def __eq__(self, other): - if isinstance(other, str): - if other.lower() == "free": - return self.free - if other.lower() == "binary": - return self.binary - else: - super(ArrayFormat, self).__eq__(other) - - @property - def npl(self): - return copy.copy(self._npl) - - @property - def format(self): - return copy.copy(self._format) - - @property - def width(self): - return copy.copy(self._width) - - @property - def decimal(self): - return copy.copy(self._decimal) - - def __setattr__(self, key, value): - if key == "format": - value = value.upper() - assert value.upper() in self._fmts - if value == 'I': - assert self.dtype == np.int32, self.dtype - self._format = value - self._decimal = None - else: - if value == 'G': - print("'G' format being reset to 'E'") - value = 'E' - self._format = value - if self.decimal is None: - self._decimal = self.default_float_decimal - - elif key == "width": - width = int(value) - if self.dtype == np.float32 and width < self.decimal: - raise Exception("width cannot be less than decimal") - elif self.dtype == np.float32 and \ - width < self.default_float_width: - print("ArrayFormat warning:setting width less " + - "than default of {0}".format(self.default_float_width)) - self._width = width - elif key == "decimal": - if self.dtype == np.int32: - raise Exception("cannot set decimal for integer dtypes") - elif self.dtype == np.float32: - value = int(value) - if value < self.default_float_decimal: - print("ArrayFormat warning: setting decimal " + - " less than default of " + - "{0}".format(self.default_float_decimal)) - if value < self.decimal: - print("ArrayFormat warning: setting decimal " + - " less than current value of " + - "{0}".format(self.default_float_decimal)) - self._decimal = int(value) - else: - raise TypeError(self.dtype) - - elif key == "entries" \ - or key == "entires_per_line" \ - or key == "npl": - value = int(value) - assert value <= self._npl_full, "cannot set npl > shape" - self._npl = value - - elif key.lower() == "binary": - value = bool(value) - if value and self.free: - # raise Exception("cannot switch from 'free' to 'binary' format") - self._isfree = False - self._isbinary = value - self._set_defaults() - - elif key.lower() == "free": - value = bool(value) - if value and self.binary: - # raise Exception("cannot switch from 'binary' to 'free' format") - self._isbinary = False - self._isfree = bool(value) - self._set_defaults() - - elif key.lower() == "fortran": - self._parse_fortran_format(value) - - elif key.lower() == "python" or key.lower() == "py": - self._parse_python_format(value) - - else: - super(ArrayFormat, self).__setattr__(key, value) - - @property - def py(self): - return self._get_python_format() - - def _get_python_format(self): - - if self.format == 'I': - fmt = 'd' - else: - fmt = self.format - pd = '{0:' + str(self.width) - if self.decimal is not None: - pd += '.' + str(self.decimal) + fmt + '}' - else: - pd += fmt + '}' - - if self.npl is None: - if self._isfree: - return (self._npl_full, pd) - else: - raise Exception("ArrayFormat._get_python_format() error: " + \ - "format is not 'free' and npl is not set") - - return (self.npl, pd) - - def _parse_python_format(self, arg): - raise NotImplementedError() - - @property - def fortran(self): - return self._get_fortran_format() - - def _get_fortran_format(self): - if self._isfree: - return "(FREE)" - if self._isbinary: - return "(BINARY)" - - fd = '({0:d}{1:s}{2:d}'.format(self.npl, self.format, self.width) - if self.decimal is not None: - fd += '.{0:d})'.format(self.decimal) - else: - fd += ')' - return fd - - def _parse_fortran_format(self, arg): - """Decode fortran descriptor - - Parameters - ---------- - arg : str - - Returns - ------- - npl, fmt, width, decimal : int, str, int, int - - """ - # strip off any quotes around format string - - npl, fmt, width, decimal = ArrayFormat.decode_fortran_descriptor(arg) - if isinstance(npl, str): - if 'FREE' in npl.upper(): - self._set_defaults() - self._isfree = True - return - - elif 'BINARY' in npl.upper(): - self._set_defaults() - self._isbinary = True - return - self._npl = int(npl) - self._format = fmt - self._width = int(width) - if decimal is not None: - self._decimal = int(decimal) - - @property - def numpy(self): - return self._get_numpy_format() - - def _get_numpy_format(self): - return "%{0}{1}.{2}".format(self.width, self.format, self.decimal) - - @staticmethod - def decode_fortran_descriptor(fd): - """Decode fortran descriptor - - Parameters - ---------- - fd : str - - Returns - ------- - npl, fmt, width, decimal : int, str, int, int - - """ - # strip off any quotes around format string - fd = fd.replace("'", "") - fd = fd.replace('"', '') - # strip off '(' and ')' - fd = fd.strip()[1:-1] - if str('FREE') in str(fd.upper()): - return 'free', None, None, None - elif str('BINARY') in str(fd.upper()): - return 'binary', None, None, None - if str('.') in str(fd): - raw = fd.split('.') - decimal = int(raw[1]) - else: - raw = [fd] - decimal = None - fmts = ['ES', 'EN', 'I', 'G', 'E', 'F'] - raw = raw[0].upper() - for fmt in fmts: - if fmt in raw: - raw = raw.split(fmt) - # '(F9.0)' will return raw = ['', '9'] - # try and except will catch this - try: - npl = int(raw[0]) - width = int(raw[1]) - except: - npl = 1 - width = int(raw[1]) - if fmt == 'G': - fmt = 'E' - elif fmt == 'ES': - fmt = 'E' - elif fmt == 'EN': - fmt = 'E' - return npl, fmt, width, decimal - raise Exception('Unrecognized format type: ' + - str(fd) + ' looking for: ' + str(fmts)) - - -def read1d(f, a): - """ - Fill the 1d array, a, with the correct number of values. Required in - case lpf 1d arrays (chani, layvka, etc) extend over more than one line - - """ - if len(a.shape) != 1: - raise ValueError('read1d: expected 1 dimension, found shape {0}' - .format(a.shape)) - values = [] - while len(values) < a.shape[0]: - line = f.readline() - if len(line) == 0: - raise ValueError('read1d: no data found') - values += line_parse(line) - a[:] = np.fromiter(values, dtype=a.dtype, count=a.shape[0]) - return a - - -def new_u2d(old_util2d, value): - new_util2d = Util2d(old_util2d.model, old_util2d.shape, old_util2d.dtype, - value, old_util2d.name, old_util2d.format.fortran, - old_util2d.cnstnt, old_util2d.iprn, - old_util2d.ext_filename, old_util2d.locat, - old_util2d.format.binary, - array_free_format=old_util2d.format.array_free_format) - return new_util2d - - -class Util3d(DataInterface): - """ - Util3d class for handling 3-D model arrays. just a thin wrapper around - Util2d - - Parameters - ---------- - model : model object - The model object (of type :class:`flopy.modflow.mf.Modflow`) to which - this package will be added. - shape : length 3 tuple - shape of the 3-D array, typically (nlay,nrow,ncol) - dtype : [np.int32, np.float32, np.bool] - the type of the data - value : variable - the data to be assigned to the 3-D array. - can be a scalar, list, or ndarray - name : string - name of the property, used for writing comments to input files - fmtin : string - modflow fmtin variable (optional). (the default is None) - cnstnt : string - modflow cnstnt variable (optional) (the default is 1.0) - iprn : int - modflow iprn variable (optional) (the default is -1) - locat : int - modflow locat variable (optional) (the default is None). If the model - instance does not support free format and the - external flag is not set and the value is a simple scalar, - then locat must be explicitly passed as it is the unit number - to read the array from - ext_filename : string - the external filename to write the array representation to - (optional) (the default is None) . - If type(value) is a string and is an accessible filename, the - ext_filename is reset to value. - bin : bool - flag to control writing external arrays as binary (optional) - (the defaut is False) - - Attributes - ---------- - array : np.ndarray - the array representation of the 3-D object - - - Methods - ------- - get_file_entry : string - get the model input file string including the control record for the - entire 3-D property - - See Also - -------- - - Notes - ----- - - Examples - -------- - - """ - - def __init__(self, model, shape, dtype, value, name, - fmtin=None, cnstnt=1.0, iprn=-1, locat=None, - ext_unit_dict=None, array_free_format=None): - """ - 3-D wrapper from Util2d - shape must be 3-D - """ - self.array_free_format = array_free_format - if isinstance(value, Util3d): - for attr in value.__dict__.items(): - setattr(self, attr[0], attr[1]) - self._model = model - self.array_free_format = array_free_format - for i, u2d in enumerate(self.util_2ds): - self.util_2ds[i] = Util2d(model, u2d.shape, u2d.dtype, - u2d._array, name=u2d.name, - fmtin=u2d.format.fortran, - locat=locat, - cnstnt=u2d.cnstnt, - ext_filename=u2d.filename, - array_free_format=array_free_format) - - return - if len(shape) != 3: - raise ValueError( - 'Util3d: expected 3 dimensions, found shape {0}'.format(shape)) - self._model = model - self.shape = shape - self._dtype = dtype - self.__value = value - isnamespecified = False - if isinstance(name, list): - self._name = name - isnamespecified = True - isnamespecified = True - isnamespecified = True - else: - t = [] - for k in range(shape[0]): - t.append(name) - self._name = t - self.name_base = [] - for k in range(shape[0]): - if isnamespecified: - self.name_base.append(self.name[k]) - else: - if 'Layer' not in self.name[k]: - self.name_base.append(self.name[k] + ' Layer ') - else: - self.name_base.append(self.name[k]) - self.fmtin = fmtin - self.cnstnt = cnstnt - self.iprn = iprn - self.locat = locat - - self.ext_filename_base = [] - if model.external_path is not None: - for k in range(shape[0]): - self.ext_filename_base. \ - append(os.path.join(model.external_path, - self.name_base[k].replace(' ', '_'))) - else: - for k in range(shape[0]): - self.ext_filename_base. \ - append(self.name_base[k].replace(' ', '_')) - - self.util_2ds = self.build_2d_instances() - - def __setitem__(self, k, value): - if isinstance(k, int): - assert k in range(0, self.shape[ - 0]), "Util3d error: k not in range nlay" - self.util_2ds[k] = new_u2d(self.util_2ds[k], value) - else: - raise NotImplementedError( - "Util3d doesn't support setitem indices" + str(k)) - - def __setattr__(self, key, value): - if hasattr(self, "util_2ds") and key == "cnstnt": - # set the cnstnt for each u2d - for u2d in self.util_2ds: - u2d.cnstnt = value - elif hasattr(self, "util_2ds") and key == "fmtin": - for u2d in self.util_2ds: - u2d.format = ArrayFormat(u2d, fortran=value, - array_free_format=self.array_free_format) - super(Util3d, self).__setattr__("fmtin", value) - elif hasattr(self, "util_2ds") and key == "how": - for u2d in self.util_2ds: - u2d.how = value - else: - # set the attribute for u3d - super(Util3d, self).__setattr__(key, value) - - @property - def name(self): - return self._name - - @property - def dtype(self): - return self._dtype - - @property - def model(self): - return self._model - - @property - def data_type(self): - return DataType.array3d - - @property - def plotable(self): - return True - - def export(self, f, **kwargs): - from flopy import export - return export.utils.array3d_export(f, self, **kwargs) - - def to_shapefile(self, filename): - """ - Export 3-D model data to shapefile (polygons). Adds an - attribute for each Util2d in self.u2ds - - Parameters - ---------- - filename : str - Shapefile name to write - - Returns - ---------- - None - - See Also - -------- - - Notes - ----- - - Examples - -------- - >>> import flopy - >>> ml = flopy.modflow.Modflow.load('test.nam') - >>> ml.lpf.hk.to_shapefile('test_hk.shp') - """ - warn( - "Deprecation warning: to_shapefile() is deprecated. use .export()", - DeprecationWarning) - - # from flopy.utils.flopy_io import write_grid_shapefile, shape_attr_name - # - # array_dict = {} - # for ilay in range(self._model.nlay): - # u2d = self[ilay] - # name = '{}_{:03d}'.format(shape_attr_name(u2d.name), ilay + 1) - # array_dict[name] = u2d.array - # write_grid_shapefile(filename, self._model.dis.sr, - # array_dict) - - self.export(filename) - - def plot(self, filename_base=None, file_extension=None, mflay=None, - fignum=None, **kwargs): - """ - Plot 3-D model input data - - Parameters - ---------- - filename_base : str - Base file name that will be used to automatically generate file - names for output image files. Plots will be exported as image - files if file_name_base is not None. (default is None) - file_extension : str - Valid matplotlib.pyplot file extension for savefig(). Only used - if filename_base is not None. (default is 'png') - mflay : int - MODFLOW zero-based layer number to return. If None, then all - all layers will be included. (default is None) - **kwargs : dict - axes : list of matplotlib.pyplot.axis - List of matplotlib.pyplot.axis that will be used to plot - data for each layer. If axes=None axes will be generated. - (default is None) - pcolor : bool - Boolean used to determine if matplotlib.pyplot.pcolormesh - plot will be plotted. (default is True) - colorbar : bool - Boolean used to determine if a color bar will be added to - the matplotlib.pyplot.pcolormesh. Only used if pcolor=True. - (default is False) - inactive : bool - Boolean used to determine if a black overlay in inactive - cells in a layer will be displayed. (default is True) - contour : bool - Boolean used to determine if matplotlib.pyplot.contour - plot will be plotted. (default is False) - clabel : bool - Boolean used to determine if matplotlib.pyplot.clabel - will be plotted. Only used if contour=True. (default is False) - grid : bool - Boolean used to determine if the model grid will be plotted - on the figure. (default is False) - masked_values : list - List of unique values to be excluded from the plot. - - Returns - ---------- - out : list - Empty list is returned if filename_base is not None. Otherwise - a list of matplotlib.pyplot.axis is returned. - - See Also - -------- - - Notes - ----- - - Examples - -------- - >>> import flopy - >>> ml = flopy.modflow.Modflow.load('test.nam') - >>> ml.lpf.hk.plot() - - """ - from flopy.plot import PlotUtilities - - axes = PlotUtilities._plot_util3d_helper(self, - filename_base=filename_base, - file_extension=file_extension, - mflay=mflay, - fignum=fignum, - **kwargs) - return axes - - def __getitem__(self, k): - if (isinstance(k, int) or - np.issubdtype(getattr(k, 'dtype', None), np.integer)): - return self.util_2ds[k] - elif len(k) == 3: - return self.array[k[0], k[1], k[2]] - else: - raise Exception("Util3d error: unsupported indices:" + str(k)) - - def get_file_entry(self): - s = '' - for u2d in self.util_2ds: - s += u2d.get_file_entry() - return s - - def get_value(self): - value = [] - for u2d in self.util_2ds: - value.append(u2d.get_value()) - return value - - @property - def array(self): - ''' - Return a numpy array of the 3D shape. If an unstructured model, then - return an array of size nodes. - - ''' - nlay, nrow, ncol = self.shape - if nrow is not None: - # typical 3D case - a = np.empty((self.shape), dtype=self._dtype) - # for i,u2d in self.uds: - for i, u2d in enumerate(self.util_2ds): - a[i] = u2d.array - else: - # unstructured case - nodes = ncol.sum() - a = np.empty((nodes), dtype=self._dtype) - istart = 0 - for i, u2d in enumerate(self.util_2ds): - istop = istart + ncol[i] - a[istart:istop] = u2d.array - istart = istop - return a - - def build_2d_instances(self): - u2ds = [] - # if value is not enumerable, then make a list of something - if not isinstance(self.__value, list) \ - and not isinstance(self.__value, np.ndarray): - self.__value = [self.__value] * self.shape[0] - - # if this is a list or 1-D array with constant values per layer - if isinstance(self.__value, list) \ - or (isinstance(self.__value, np.ndarray) - and (self.__value.ndim == 1)): - - assert len(self.__value) == self.shape[0], \ - 'length of 3d enumerable:' + str(len(self.__value)) + \ - ' != to shape[0]:' + str(self.shape[0]) - - for i, item in enumerate(self.__value): - if isinstance(item, Util2d): - # we need to reset the external name because most of the - # load() methods don't use layer-specific names - item._ext_filename = self.ext_filename_base[i] + \ - "{0}.ref".format(i + 1) - # reset the model instance in cases these Util2d's - # came from another model instance - item.model = self._model - u2ds.append(item) - else: - name = self.name_base[i] + str(i + 1) - ext_filename = None - if self._model.external_path is not None: - ext_filename = self.ext_filename_base[i] + str(i + 1) + \ - '.ref' - shape = self.shape[1:] - if shape[0] is None: - # allow for unstructured so that ncol changes by layer - shape = (self.shape[2][i],) - u2d = Util2d(self.model, shape, self.dtype, item, - fmtin=self.fmtin, name=name, - ext_filename=ext_filename, - locat=self.locat, - array_free_format=self.array_free_format) - u2ds.append(u2d) - - elif isinstance(self.__value, np.ndarray): - # if an array of shape nrow,ncol was passed, tile it out for each layer - if self.__value.shape[0] != self.shape[0]: - if self.__value.shape == (self.shape[1], self.shape[2]): - self.__value = [self.__value] * self.shape[0] - else: - raise Exception('value shape[0] != to self.shape[0] and' + - 'value.shape[[1,2]] != self.shape[[1,2]]' + - str(self.__value.shape) + ' ' + str( - self.shape)) - for i, a in enumerate(self.__value): - a = np.atleast_2d(a) - ext_filename = None - name = self.name_base[i] + str(i + 1) - if self._model.external_path is not None: - ext_filename = self.ext_filename_base[i] + str( - i + 1) + '.ref' - u2d = Util2d(self._model, self.shape[1:], self._dtype, a, - fmtin=self.fmtin, name=name, - ext_filename=ext_filename, - locat=self.locat, - array_free_format=self.array_free_format) - u2ds.append(u2d) - - else: - raise Exception('util_array_3d: value attribute must be list ' + - ' or ndarray, not' + str(type(self.__value))) - return u2ds - - @staticmethod - def load(f_handle, model, shape, dtype, name, ext_unit_dict=None, - array_format=None): - if len(shape) != 3: - raise ValueError( - 'Util3d: expected 3 dimensions, found shape {0}'.format(shape)) - nlay, nrow, ncol = shape - u2ds = [] - for k in range(nlay): - u2d_name = name + '_Layer_{0}'.format(k) - if nrow is None: - nr = 1 - nc = ncol[k] - else: - nr = nrow - nc = ncol - u2d = Util2d.load(f_handle, model, (nr, nc), dtype, u2d_name, - ext_unit_dict=ext_unit_dict, - array_format=array_format) - u2ds.append(u2d) - u3d = Util3d(model, shape, dtype, u2ds, name) - return u3d - - def __mul__(self, other): - if np.isscalar(other): - new_u2ds = [] - for u2d in self.util_2ds: - new_u2ds.append(u2d * other) - return Util3d(self._model, self.shape, self._dtype, new_u2ds, - self._name, self.fmtin, self.cnstnt, self.iprn, - self.locat) - elif isinstance(other, list): - assert len(other) == self.shape[0] - new_u2ds = [] - for u2d, item in zip(self.util_2ds, other): - new_u2ds.append(u2d * item) - return Util3d(self._model, self.shape, self._dtype, new_u2ds, - self._name, self.fmtin, self.cnstnt, self.iprn, - self.locat) - - -class Transient3d(DataInterface): - """ - Transient3d class for handling time-dependent 3-D model arrays. - just a thin wrapper around Util3d - - Parameters - ---------- - model : model object - The model object (of type :class:`flopy.modflow.mf.Modflow`) to which - this package will be added. - shape : length 3 tuple - shape of the 3-D transient arrays, typically (nlay,nrow,ncol) - dtype : [np.int32, np.float32, np.bool] - the type of the data - value : variable - the data to be assigned to the 3-D arrays. Typically a dict - of {kper:value}, where kper is the zero-based stress period - to assign a value to. Value should be cast-able to Util2d instance - can be a scalar, list, or ndarray is the array value is constant in - time. - name : string - name of the property, used for writing comments to input files and - for forming external files names (if needed) - fmtin : string - modflow fmtin variable (optional). (the default is None) - cnstnt : string - modflow cnstnt variable (optional) (the default is 1.0) - iprn : int - modflow iprn variable (optional) (the default is -1) - locat : int - modflow locat variable (optional) (the default is None). If the model - instance does not support free format and the - external flag is not set and the value is a simple scalar, - then locat must be explicitly passed as it is the unit number - to read the array from - ext_filename : string - the external filename to write the array representation to - (optional) (the default is None) . - If type(value) is a string and is an accessible filename, - the ext_filename is reset to value. - bin : bool - flag to control writing external arrays as binary (optional) - (the default is False) - - Attributes - ---------- - transient_3ds : dict{kper:Util3d} - the transient sequence of Util3d objects - - Methods - ------- - get_kper_entry : (itmp,string) - get the itmp value and the Util2d file entry of the value in - transient_2ds in bin kper. if kper < min(Transient2d.keys()), - return (1,zero_entry). If kper > < min(Transient2d.keys()), - but is not found in Transient2d.keys(), return (-1,'') - - See Also - -------- - - Notes - ----- - - Examples - -------- - - """ - - def __init__(self, model, shape, dtype, value, name, fmtin=None, - cnstnt=1.0, iprn=-1, ext_filename=None, locat=None, - bin=False, array_free_format=None): - - if isinstance(value, Transient3d): - for attr in value.__dict__.items(): - setattr(self, attr[0], attr[1]) - self._model = model - return - - self._model = model - if len(shape) != 3: - raise ValueError( - 'Transient3d: expected 3 dimensions (nlay, nrow, ncol), found ' - 'shape {0}'.format(shape)) - self.shape = shape - self._dtype = dtype - self.__value = value - self.name_base = name - self.fmtin = fmtin - self.cnstnt = cnstnt - self.iprn = iprn - self.locat = locat - self.array_free_format = array_free_format - self.transient_3ds = self.build_transient_sequence() - return - - def __setattr__(self, key, value): - # set the attribute for u3d, even for cnstnt - super(Transient3d, self).__setattr__(key, value) - - @property - def model(self): - return self._model - - @property - def name(self): - return self.name_base - - @property - def dtype(self): - return self._dtype - - @property - def data_type(self): - return DataType.transient3d - - @property - def plotable(self): - return False - - def get_zero_3d(self, kper): - name = self.name_base + str(kper + 1) + '(filled zero)' - return Util3d(self._model, self.shape, - self._dtype, 0.0, name=name, - array_free_format=self.array_free_format) - - def __getitem__(self, kper): - if kper in list(self.transient_3ds.keys()): - return self.transient_3ds[kper] - elif kper < min(self.transient_3ds.keys()): - return self.get_zero_3d(kper) - else: - for i in range(kper, -1, -1): - if i in list(self.transient_3ds.keys()): - return self.transient_3ds[i] - raise Exception("Transient2d.__getitem__(): error:" + \ - " could not find an entry before kper {0:d}".format( - kper)) - - def __setitem__(self, key, value): - try: - key = int(key) - except Exception as e: - raise Exception("Transient3d.__setitem__() error: " + \ - "'key'could not be cast to int:{0}".format(str(e))) - nper = self._model.nper - if key > self._model.nper or key < 0: - raise Exception("Transient3d.__setitem__() error: " + \ - "key {0} not in nper range {1}:{2}".format(key, 0, - nper)) - - self.transient_3ds[key] = self.__get_3d_instance(key, value) - - @property - def array(self): - arr = np.zeros((self._model.nper, self.shape[0], self.shape[1], - self.shape[2]), dtype=self._dtype) - for kper in range(self._model.nper): - u3d = self[kper] - for k in range(self.shape[0]): - arr[kper, k, :, :] = u3d[k].array - return arr - - def get_kper_entry(self, kper): - """ - get the file entry info for a given kper - returns (itmp,file entry string from Util3d) - """ - if kper in self.transient_3ds: - s = '' - for k in range(self.shape[0]): - s += self.transient_3ds[kper][k].get_file_entry() - return 1, s - elif kper < min(self.transient_3ds.keys()): - t = self.get_zero_3d(kper).get_file_entry() - s = '' - for k in range(self.shape[0]): - s += t[k].get_file_entry() - return 1, s - else: - return -1, '' - - def build_transient_sequence(self): - """ - parse self.__value into a dict{kper:Util3d} - """ - - # a dict keyed on kper (zero-based) - if isinstance(self.__value, dict): - tran_seq = {} - for key, val in self.__value.items(): - try: - key = int(key) - except: - raise Exception("Transient3d error: can't cast key: " + - str(key) + " to kper integer") - if key < 0: - raise Exception("Transient3d error: key can't be " + - " negative: " + str(key)) - try: - u3d = self.__get_3d_instance(key, val) - except Exception as e: - raise Exception("Transient3d error building Util3d " + - " instance from value at kper: " + - str(key) + "\n" + str(e)) - tran_seq[key] = u3d - return tran_seq - - # these are all for single entries - use the same Util2d for all kper - # an array of shape (nrow,ncol) - elif isinstance(self.__value, np.ndarray): - return {0: self.__get_3d_instance(0, self.__value)} - - # a filename - elif isinstance(self.__value, str): - return {0: self.__get_3d_instance(0, self.__value)} - - # a scalar - elif np.isscalar(self.__value): - return {0: self.__get_3d_instance(0, self.__value)} - - # lists aren't allowed - elif isinstance(self.__value, list): - raise Exception("Transient3d error: value cannot be a list " + - "anymore. try a dict{kper,value}") - else: - raise Exception("Transient3d error: value type not " + - " recognized: " + str(type(self.__value))) - - def __get_3d_instance(self, kper, arg): - """ - parse an argument into a Util3d instance - """ - name = '{}_period{}'.format(self.name_base, kper + 1) - u3d = Util3d(self._model, self.shape, self._dtype, arg, - fmtin=self.fmtin, name=name, - # ext_filename=ext_filename, - locat=self.locat, - array_free_format=self.array_free_format) - return u3d - - -class Transient2d(DataInterface): - """ - Transient2d class for handling time-dependent 2-D model arrays. - just a thin wrapper around Util2d - - Parameters - ---------- - model : model object - The model object (of type :class:`flopy.modflow.mf.Modflow`) to which - this package will be added. - shape : length 2 tuple - shape of the 2-D transient arrays, typically (nrow,ncol) - dtype : [np.int32, np.float32, np.bool] - the type of the data - value : variable - the data to be assigned to the 2-D arrays. Typically a dict - of {kper:value}, where kper is the zero-based stress period - to assign a value to. Value should be cast-able to Util2d instance - can be a scalar, list, or ndarray is the array value is constant in - time. - name : string - name of the property, used for writing comments to input files and - for forming external files names (if needed) - fmtin : string - modflow fmtin variable (optional). (the default is None) - cnstnt : string - modflow cnstnt variable (optional) (the default is 1.0) - iprn : int - modflow iprn variable (optional) (the default is -1) - locat : int - modflow locat variable (optional) (the default is None). If the model - instance does not support free format and the - external flag is not set and the value is a simple scalar, - then locat must be explicitly passed as it is the unit number - to read the array from - ext_filename : string - the external filename to write the array representation to - (optional) (the default is None) . - If type(value) is a string and is an accessible filename, - the ext_filename is reset to value. - bin : bool - flag to control writing external arrays as binary (optional) - (the default is False) - - Attributes - ---------- - transient_2ds : dict{kper:Util2d} - the transient sequence of Util2d objects - - Methods - ------- - get_kper_entry : (itmp,string) - get the itmp value and the Util2d file entry of the value in - transient_2ds in bin kper. if kper < min(Transient2d.keys()), - return (1,zero_entry). If kper > < min(Transient2d.keys()), - but is not found in Transient2d.keys(), return (-1,'') - - See Also - -------- - - Notes - ----- - - Examples - -------- - - """ - - def __init__(self, model, shape, dtype, value, name, fmtin=None, - cnstnt=1.0, iprn=-1, ext_filename=None, locat=None, - bin=False, array_free_format=None): - - if isinstance(value, Transient2d): - for attr in value.__dict__.items(): - setattr(self, attr[0], attr[1]) - for kper, u2d in self.transient_2ds.items(): - self.transient_2ds[kper] = Util2d(model, u2d.shape, u2d.dtype, - u2d._array, name=u2d.name, - fmtin=u2d.format.fortran, - locat=locat, - cnstnt=u2d.cnstnt, - ext_filename=u2d.filename, - array_free_format=array_free_format) - - self._model = model - return - - self._model = model - if len(shape) != 2: - raise ValueError( - 'Transient2d: expected 2 dimensions (nrow, ncol), found ' - 'shape {0}'.format(shape)) - if shape[0] is None: - # allow for unstructured so that ncol changes by layer - shape = (1, shape[1][0]) - - self.shape = shape - self._dtype = dtype - self.__value = value - self.name_base = name - self.fmtin = fmtin - self.cnstnt = cnstnt - self.iprn = iprn - self.locat = locat - self.array_free_format = array_free_format - if model.external_path is not None: - self.ext_filename_base = \ - os.path.join(model.external_path, - self.name_base.replace(' ', '_')) - else: - self.ext_filename_base = self.name_base.replace(' ', '_') - self.transient_2ds = self.build_transient_sequence() - return - - @property - def name(self): - return self.name_base - - @property - def dtype(self): - return self._dtype - - @property - def model(self): - return self._model - - @property - def data_type(self): - return DataType.transient2d - - @property - def plotable(self): - return True - - @staticmethod - def masked4d_array_to_kper_dict(m4d): - assert m4d.ndim == 4 - kper_dict = {} - for kper, arr in enumerate(m4d): - if np.all(np.isnan(arr)): - continue - elif np.any(np.isnan(arr)): - raise Exception("masked value found in array") - kper_dict[kper] = arr.copy() - return kper_dict - - @classmethod - def from_4d(cls, model, pak_name, m4ds): - """construct a Transient2d instance from a - dict(name: (masked) 4d numpy.ndarray - Parameters - ---------- - model : flopy.mbase derived type - pak_name : str package name (e.g. RCH) - m4ds : dict(name,(masked) 4d numpy.ndarray) - each ndarray must have shape (nper,1,nrow,ncol). - if an entire (nrow,ncol) slice is np.NaN, then - that kper is skipped. - Returns - ------- - Transient2d instance - """ - - assert isinstance(m4ds, dict) - keys = list(m4ds.keys()) - assert len(keys) == 1 - name = keys[0] - m4d = m4ds[name] - - assert m4d.ndim == 4 - assert m4d.shape[0] == model.nper - assert m4d.shape[1] == 1 - assert m4d.shape[2] == model.nrow - assert m4d.shape[3] == model.ncol - m4d = m4d.astype(np.float32) - kper_dict = Transient2d.masked4d_array_to_kper_dict(m4d) - return cls(model=model, shape=(model.nrow, model.ncol), - value=kper_dict, - dtype=m4d.dtype.type, name=name) - - def __setattr__(self, key, value): - if hasattr(self, "transient_2ds") and key == "cnstnt": - # set cnstnt for each u2d - for kper, u2d in self.transient_2ds.items(): - self.transient_2ds[kper].cnstnt = value - elif hasattr(self, "transient_2ds") and key == "fmtin": - # set fmtin for each u2d - for kper, u2d in self.transient_2ds.items(): - self.transient_2ds[kper].format = ArrayFormat(u2d, - fortran=value) - elif hasattr(self, "transient_2ds") and key == "how": - # set how for each u2d - for kper, u2d in self.transient_2ds.items(): - self.transient_2ds[kper].how = value - # set the attribute for u3d, even for cnstnt - super(Transient2d, self).__setattr__(key, value) - - def get_zero_2d(self, kper): - name = self.name_base + str(kper + 1) + '(filled zero)' - return Util2d(self._model, self.shape, - self._dtype, 0.0, name=name, - array_free_format=self.array_free_format) - - def to_shapefile(self, filename): - """ - Export transient 2D data to a shapefile (as polygons). Adds an - attribute for each unique Util2d instance in self.data - - Parameters - ---------- - filename : str - Shapefile name to write - - Returns - ---------- - None - - See Also - -------- - - Notes - ----- - - Examples - -------- - >>> import flopy - >>> ml = flopy.modflow.Modflow.load('test.nam') - >>> ml.rch.rech.as_shapefile('test_rech.shp') - """ - warn( - "Deprecation warning: to_shapefile() is deprecated. use .export()", - DeprecationWarning) - - # from flopy.utils.flopy_io import write_grid_shapefile, shape_attr_name - # - # array_dict = {} - # for kper in range(self._model.nper): - # u2d = self[kper] - # name = '{}_{:03d}'.format(shape_attr_name(u2d.name), kper + 1) - # array_dict[name] = u2d.array - # write_grid_shapefile(filename, self._model.dis.sr, array_dict) - self.export(filename) - - def plot(self, filename_base=None, file_extension=None, kper=0, - fignum=None, **kwargs): - """ - Plot transient 2-D model input data - - Parameters - ---------- - filename_base : str - Base file name that will be used to automatically generate file - names for output image files. Plots will be exported as image - files if file_name_base is not None. (default is None) - file_extension : str - Valid matplotlib.pyplot file extension for savefig(). Only used - if filename_base is not None. (default is 'png') - kper : int or str - model stress period. if 'all' is provided, all stress periods - will be plotted - fignum: list or int - Figure numbers for plot title - - **kwargs : dict - axes : list of matplotlib.pyplot.axis - List of matplotlib.pyplot.axis that will be used to plot - data for each layer. If axes=None axes will be generated. - (default is None) - pcolor : bool - Boolean used to determine if matplotlib.pyplot.pcolormesh - plot will be plotted. (default is True) - colorbar : bool - Boolean used to determine if a color bar will be added to - the matplotlib.pyplot.pcolormesh. Only used if pcolor=True. - (default is False) - inactive : bool - Boolean used to determine if a black overlay in inactive - cells in a layer will be displayed. (default is True) - contour : bool - Boolean used to determine if matplotlib.pyplot.contour - plot will be plotted. (default is False) - clabel : bool - Boolean used to determine if matplotlib.pyplot.clabel - will be plotted. Only used if contour=True. (default is False) - grid : bool - Boolean used to determine if the model grid will be plotted - on the figure. (default is False) - masked_values : list - List of unique values to be excluded from the plot. - kper : str - MODFLOW zero-based stress period number to return. If - kper='all' then data for all stress period will be - extracted. (default is zero). - - Returns - ---------- - out : list - Empty list is returned if filename_base is not None. Otherwise - a list of matplotlib.pyplot.axis is returned. - - See Also - -------- - - Notes - ----- - - Examples - -------- - >>> import flopy - >>> ml = flopy.modflow.Modflow.load('test.nam') - >>> ml.rch.rech.plot() - - """ - from flopy.plot import PlotUtilities - - axes = PlotUtilities._plot_transient2d_helper(self, - filename_base=filename_base, - file_extension=file_extension, - kper=kper, - fignum=fignum, - **kwargs) - - return axes - - def __getitem__(self, kper): - if kper in list(self.transient_2ds.keys()): - return self.transient_2ds[kper] - elif kper < min(self.transient_2ds.keys()): - return self.get_zero_2d(kper) - else: - for i in range(kper, -1, -1): - if i in list(self.transient_2ds.keys()): - return self.transient_2ds[i] - raise Exception("Transient2d.__getitem__(): error:" + \ - " could not find an entry before kper {0:d}".format( - kper)) - - def __setitem__(self, key, value): - try: - key = int(key) - except Exception as e: - raise Exception("Transient2d.__setitem__() error: " + \ - "'key'could not be cast to int:{0}".format(str(e))) - nper = self._model.nper - if key > self._model.nper or key < 0: - raise Exception("Transient2d.__setitem__() error: " + \ - "key {0} not in nper range {1}:{2}".format(key, 0, - nper)) - - self.transient_2ds[key] = self.__get_2d_instance(key, value) - - @property - def array(self): - arr = np.zeros((self._model.nper, 1, self.shape[0], self.shape[1]), - dtype=self._dtype) - for kper in range(self._model.nper): - u2d = self[kper] - arr[kper, 0, :, :] = u2d.array - return arr - - def export(self, f, **kwargs): - from flopy import export - return export.utils.transient2d_export(f, self, **kwargs) - - def get_kper_entry(self, kper): - """ - Get the file entry info for a given kper - returns (itmp,file entry string from Util2d) - """ - if kper in self.transient_2ds: - return (1, self.transient_2ds[kper].get_file_entry()) - elif kper < min(self.transient_2ds.keys()): - return (1, self.get_zero_2d(kper).get_file_entry()) - else: - return (-1, '') - - def build_transient_sequence(self): - """ - parse self.__value into a dict{kper:Util2d} - """ - - # a dict keyed on kper (zero-based) - if isinstance(self.__value, dict): - tran_seq = {} - for key, val in self.__value.items(): - try: - key = int(key) - except: - raise Exception("Transient2d error: can't cast key: " + - str(key) + " to kper integer") - if key < 0: - raise Exception("Transient2d error: key can't be " + - " negative: " + str(key)) - try: - u2d = self.__get_2d_instance(key, val) - except Exception as e: - raise Exception("Transient2d error building Util2d " + - " instance from value at kper: " + - str(key) + "\n" + str(e)) - tran_seq[key] = u2d - return tran_seq - - # these are all for single entries - use the same Util2d for all kper - # an array of shape (nrow,ncol) - elif isinstance(self.__value, np.ndarray): - return {0: self.__get_2d_instance(0, self.__value)} - - # a filename - elif isinstance(self.__value, str): - return {0: self.__get_2d_instance(0, self.__value)} - - # a scalar - elif np.isscalar(self.__value): - return {0: self.__get_2d_instance(0, self.__value)} - - # lists aren't allowed - elif isinstance(self.__value, list): - raise Exception("Transient2d error: value cannot be a list " + - "anymore. try a dict{kper,value}") - else: - raise Exception("Transient2d error: value type not " + - " recognized: " + str(type(self.__value))) - - def __get_2d_instance(self, kper, arg): - """ - parse an argument into a Util2d instance - """ - ext_filename = None - name = self.name_base + str(kper + 1) - ext_filename = self.ext_filename_base + str(kper) + '.ref' - u2d = Util2d(self._model, self.shape, self._dtype, arg, - fmtin=self.fmtin, name=name, - ext_filename=ext_filename, - locat=self.locat, - array_free_format=self.array_free_format) - return u2d - - -class Util2d(DataInterface): - """ - Util2d class for handling 1- or 2-D model arrays - - Parameters - ---------- - model : model object - The model object (of type :class:`flopy.modflow.mf.Modflow`) to which - this package will be added. - shape : tuple - Shape of the 1- or 2-D array - dtype : [np.int32, np.float32, np.bool] - the type of the data - value : variable - the data to be assigned to the 1- or 2-D array. - can be a scalar, list, ndarray, or filename - name : string - name of the property (optional). (the default is None - fmtin : string - modflow fmtin variable (optional). (the default is None) - cnstnt : string - modflow cnstnt variable (optional) (the default is 1.0) - iprn : int - modflow iprn variable (optional) (the default is -1) - locat : int - modflow locat variable (optional) (the default is None). If the model - instance does not support free format and the - external flag is not set and the value is a simple scalar, - then locat must be explicitly passed as it is the unit number - to read the array from) - ext_filename : string - the external filename to write the array representation to - (optional) (the default is None) . - If type(value) is a string and is an accessible filename, - the ext_filename is reset to value. - bin : bool - flag to control writing external arrays as binary (optional) - (the default is False) - - Attributes - ---------- - array : np.ndarray - the array representation of the 2-D object - how : str - the str flag to control how the array is written to the model - input files e.g. "constant","internal","external","openclose" - format : ArrayFormat object - controls the ASCII representation of the numeric array - - Methods - ------- - get_file_entry : string - get the model input file string including the control record - - See Also - -------- - - Notes - ----- - If value is a valid filename and model.external_path is None, then a copy - of the file is made and placed in model.model_ws directory. - - If value is a valid filename and model.external_path is not None, then - a copy of the file is made a placed in the external_path directory. - - If value is a scalar, it is always written as a constant, regardless of - the model.external_path setting. - - If value is an array and model.external_path is not None, then the array - is written out in the external_path directory. The name of the file that - holds the array is created from the name attribute. If the model supports - "free format", then the array is accessed via the "open/close" approach. - Otherwise, a unit number and filename is added to the name file. - - If value is an array and model.external_path is None, then the array is - written internally to the model input file. - - Examples - -------- - - """ - - def __init__(self, model, shape, dtype, value, name, fmtin=None, - cnstnt=1.0, iprn=-1, ext_filename=None, locat=None, bin=False, - how=None, array_free_format=None): - """Create 1- or 2-d array - - Parameters - ---------- - model : model object - shape : tuple - Dimensions of 1- or 2-D array, e.g. (nrow, ncol) - dtype : int or np.float32 - value : str, list, np.int32, np.float32, bool or np.ndarray - name : str - Array name or description - fmtin : str, optional - cnstnt : np.int32 or np.float32, optional - Array constant; default 1.0 - iprn : int, optional - Modflow printing option; default -1 - ext_filename : str, optional - Name of external files name where arrays are written - locat : int, optional - bin : bool, optional - If True, writes unformatted files; default False writes formatted - how : str, optional - One of "constant", "internal", "external", or "openclose" - array_free_format : bool, optional - used for generating control record - - Notes - ----- - Support with minimum of mem footprint, only creates arrays as needed, - otherwise functions with strings or constants. - - Model instance string attribute "external_path" used to determine - external array writing - """ - if isinstance(value, Util2d): - for attr in value.__dict__.items(): - setattr(self, attr[0], attr[1]) - self._model = model - self._name = name - self._ext_filename = self._name.replace(' ', '_') + ".ref" - if ext_filename is not None: - self.ext_filename = ext_filename.lower() - else: - self.ext_filename = None - if locat is not None: - self.locat = locat - return - - # some defense - if dtype != np.int32 and np.issubdtype(dtype, np.integer): - # Modflow only uses 4-byte integers - dtype = np.dtype(dtype) - if np.dtype(int).itemsize != 4: - # show warning for platforms where int is not 4-bytes - warn('Util2d: setting integer dtype from {0} to int32' - .format(dtype)) - dtype = np.int32 - if dtype not in [np.int32, np.float32, np.bool]: - raise TypeError('Util2d:unsupported dtype: ' + str(dtype)) - - if name is not None: - name = name.lower() - if ext_filename is not None: - ext_filename = ext_filename.lower() - - self._model = model - if len(shape) not in (1, 2): - raise ValueError( - 'Util2d: shape must describe 1- or 2-dimensions, ' - 'e.g. (nrow, ncol)') - if min(shape) < 1: - raise ValueError('Util2d: each shape dimension must be at least 1') - self.shape = shape - self._dtype = dtype - self._name = name - self.locat = locat - self.parse_value(value) - if self.vtype == str: - fmtin = "(FREE)" - self.__value_built = None - self.cnstnt = dtype(cnstnt) - - self.iprn = iprn - self._format = ArrayFormat(self, fortran=fmtin, - array_free_format=array_free_format) - self._format._isbinary = bool(bin) - self.ext_filename = ext_filename - self._ext_filename = self._name.replace(' ', '_') + ".ref" - - self._acceptable_hows = ["constant", "internal", "external", - "openclose"] - - if how is not None: - how = how.lower() - assert how in self._acceptable_hows - self._how = how - else: - self._decide_how() - - @property - def name(self): - return self._name - - @property - def dtype(self): - return self._dtype - - @property - def model(self): - return self._model - - @property - def data_type(self): - return DataType.array2d - - @property - def plotable(self): - return True - - def _decide_how(self): - # if a constant was passed in - if self.vtype in [np.int32, np.float32]: - self._how = "constant" - # if a filename was passed in or external path was set - elif self._model.external_path is not None or \ - self.vtype == str: - if self.format.array_free_format: - self._how = "openclose" - else: - self._how = "external" - else: - self._how = "internal" - - def plot(self, title=None, filename_base=None, file_extension=None, - fignum=None, **kwargs): - """ - Plot 2-D model input data - - Parameters - ---------- - title : str - Plot title. If a plot title is not provide one will be - created based on data name (self._name). (default is None) - filename_base : str - Base file name that will be used to automatically generate file - names for output image files. Plots will be exported as image - files if file_name_base is not None. (default is None) - file_extension : str - Valid matplotlib.pyplot file extension for savefig(). Only used - if filename_base is not None. (default is 'png') - **kwargs : dict - axes : list of matplotlib.pyplot.axis - List of matplotlib.pyplot.axis that will be used to plot - data for each layer. If axes=None axes will be generated. - (default is None) - pcolor : bool - Boolean used to determine if matplotlib.pyplot.pcolormesh - plot will be plotted. (default is True) - colorbar : bool - Boolean used to determine if a color bar will be added to - the matplotlib.pyplot.pcolormesh. Only used if pcolor=True. - (default is False) - inactive : bool - Boolean used to determine if a black overlay in inactive - cells in a layer will be displayed. (default is True) - contour : bool - Boolean used to determine if matplotlib.pyplot.contour - plot will be plotted. (default is False) - clabel : bool - Boolean used to determine if matplotlib.pyplot.clabel - will be plotted. Only used if contour=True. (default is False) - grid : bool - Boolean used to determine if the model grid will be plotted - on the figure. (default is False) - masked_values : list - List of unique values to be excluded from the plot. - - Returns - ---------- - out : list - Empty list is returned if filename_base is not None. Otherwise - a list of matplotlib.pyplot.axis is returned. - - See Also - -------- - - Notes - ----- - - Examples - -------- - >>> import flopy - >>> ml = flopy.modflow.Modflow.load('test.nam') - >>> ml.dis.top.plot() - - """ - from flopy.plot import PlotUtilities - - axes = PlotUtilities._plot_util2d_helper(self, title=title, - filename_base=filename_base, - file_extension=file_extension, - fignum=fignum, **kwargs) - return axes - - def export(self, f, **kwargs): - from flopy import export - return export.utils.array2d_export(f, self, **kwargs) - - def to_shapefile(self, filename): - """ - Export 2-D model data to a shapefile (as polygons) of self.array - - Parameters - ---------- - filename : str - Shapefile name to write - - Returns - ---------- - None - - See Also - -------- - - Notes - ----- - - Examples - -------- - >>> import flopy - >>> ml = flopy.modflow.Modflow.load('test.nam') - >>> ml.dis.top.as_shapefile('test_top.shp') - """ - - warn( - "Deprecation warning: to_shapefile() is deprecated. use .export()", - DeprecationWarning) - # from flopy.utils.flopy_io import write_grid_shapefile, shape_attr_name - # name = shape_attr_name(self._name, keep_layer=True) - # write_grid_shapefile(filename, self._model.dis.sr, {name: - # self.array}) - self.export(filename) - - def set_fmtin(self, fmtin): - self._format = ArrayFormat(self, fortran=fmtin, - array_free_format=self.format.array_free_format) - - def get_value(self): - return copy.deepcopy(self.__value) - - # overloads, tries to avoid creating arrays if possible - def __add__(self, other): - if self.vtype in [np.int32, np.float32] and self.vtype == other.vtype: - return self.__value + other.get_value() - else: - return self.array + other.array - - def __sub__(self, other): - if self.vtype in [np.int32, np.float32] and self.vtype == other.vtype: - return self.__value - other.get_value() - else: - return self.array - other.array - - def __mul__(self, other): - if np.isscalar(other): - return Util2d(self._model, self.shape, self._dtype, - self._array * other, self._name, - self.format.fortran, self.cnstnt, self.iprn, - self.ext_filename, - self.locat, self.format.binary) - else: - raise NotImplementedError( - "Util2d.__mul__() not implemented for non-scalars") - - def __eq__(self, other): - if not isinstance(other, Util2d): - return False - if not np.array_equal(other.array, self.array): - return False - if other.cnstnt != self.cnstnt: - return False - return True - - def __getitem__(self, k): - if isinstance(k, int): - if len(self.shape) == 1: - return self.array[k] - elif self.shape[0] == 1: - return self.array[0, k] - elif self.shape[1] == 1: - return self.array[k, 0] - else: - raise Exception( - "Util2d.__getitem__() error: an integer was passed, " + - "self.shape > 1 in both dimensions") - else: - if isinstance(k, tuple): - if len(k) == 2: - return self.array[k[0], k[1]] - if len(k) == 1: - return self.array[k] - else: - return self.array[(k,)] - - def __setitem__(self, k, value): - """ - this one is dangerous because it resets __value - """ - a = self.array - a[k] = value - a = a.astype(self._dtype) - self.__value = a - if self.__value_built is not None: - self.__value_built = None - - def __setattr__(self, key, value): - if key == "fmtin": - self._format = ArrayFormat(self, fortran=value) - elif key == "format": - assert isinstance(value, ArrayFormat) - self._format = value - elif key == "how": - value = value.lower() - assert value in self._acceptable_hows - self._how = value - elif key == "model": - self._model = value - else: - super(Util2d, self).__setattr__(key, value) - - def all(self): - return self.array.all() - - def __len__(self): - return self.shape[0] - - def sum(self): - return self.array.sum() - - def unique(self): - return np.unique(self.array) - - @property - def format(self): - # don't return a copy because we want to allow - # access to the attributes of ArrayFormat - return self._format - - @property - def how(self): - return copy.copy(self._how) - - @property - def vtype(self): - return type(self.__value) - - @property - def python_file_path(self): - """ - where python is going to write the file - Returns - ------- - file_path (str) : path relative to python: includes model_ws - """ - # if self.vtype != str: - # raise Exception("Util2d call to python_file_path " + - # "for vtype != str") - python_file_path = '' - if self._model.model_ws != '.': - python_file_path = os.path.join(self._model.model_ws) - if self._model.external_path is not None: - python_file_path = os.path.join(python_file_path, - self._model.external_path) - python_file_path = os.path.join(python_file_path, - self.filename) - return python_file_path - - @property - def filename(self): - if self.vtype != str: - if self.ext_filename is not None: - filename = os.path.split(self.ext_filename)[-1] - else: - filename = os.path.split(self._ext_filename)[-1] - else: - filename = os.path.split(self.__value)[-1] - return filename - - @property - def model_file_path(self): - """ - where the model expects the file to be - - Returns - ------- - file_path (str): path relative to the name file - - """ - - model_file_path = '' - if self._model.external_path is not None: - model_file_path = os.path.join(model_file_path, - self._model.external_path) - model_file_path = os.path.join(model_file_path, self.filename) - return model_file_path - - def get_constant_cr(self, value): - - if self.format.array_free_format: - lay_space = '{0:>27s}'.format('') - if self.vtype in [int, np.int32]: - lay_space = '{0:>32s}'.format('') - cr = 'CONSTANT ' + self.format.py[1].format(value) - cr = '{0:s}{1:s}#{2:<30s}\n'.format(cr, lay_space, - self._name) - else: - cr = self._get_fixed_cr(0, value=value) - return cr - - def _get_fixed_cr(self, locat, value=None): - fformat = self.format.fortran - if value is None: - value = self.cnstnt - if self.format.binary: - if locat is None: - raise Exception("Util2d._get_fixed_cr(): locat is None but" + \ - "format is binary") - if not self.format.array_free_format: - locat = -1 * np.abs(locat) - if locat is None: - locat = 0 - if locat == 0: - fformat = '' - if self.dtype == np.int32: - cr = '{0:>10.0f}{1:>10.0f}{2:>19s}{3:>10.0f} #{4}\n' \ - .format(locat, value, fformat, - self.iprn, self._name) - elif self._dtype == np.float32: - cr = '{0:>10.0f}{1:>10.5G}{2:>19s}{3:>10.0f} #{4}\n' \ - .format(locat, value, fformat, - self.iprn, self._name) - else: - raise Exception( - 'Util2d: error generating fixed-format control record, ' - 'dtype must be np.int32 or np.float32') - return cr - - def get_internal_cr(self): - if self.format.array_free_format: - cr = 'INTERNAL {0:15} {1:>10s} {2:2.0f} #{3:<30s}\n' \ - .format(self.cnstnt_str, self.format.fortran, self.iprn, - self._name) - return cr - else: - return self._get_fixed_cr(self.locat) - - @property - def cnstnt_str(self): - if isinstance(self.cnstnt, str): - return self.cnstnt - else: - return "{0:15.6G}".format(self.cnstnt) - - def get_openclose_cr(self): - cr = 'OPEN/CLOSE {0:>30s} {1:15} {2:>10s} {3:2.0f} {4:<30s}\n'.format( - self.model_file_path, self.cnstnt_str, - self.format.fortran, self.iprn, - self._name) - return cr - - def get_external_cr(self): - locat = self._model.next_ext_unit() - # if self.format.binary: - # locat = -1 * np.abs(locat) - self._model.add_external(self.model_file_path, locat, - self.format.binary) - if self.format.array_free_format: - cr = 'EXTERNAL {0:>30d} {1:15} {2:>10s} {3:2.0f} {4:<30s}\n'.format( - locat, self.cnstnt_str, - self.format.fortran, self.iprn, - self._name) - return cr - else: - return self._get_fixed_cr(locat) - - def get_file_entry(self, how=None): - - if how is not None: - how = how.lower() - else: - how = self._how - - if not self.format.array_free_format and self.format.free: - print("Util2d {0}: can't be free format...resetting".format( - self._name)) - self.format._isfree = False - - if not self.format.array_free_format and self.how == "internal" and self.locat is None: - print("Util2d {0}: locat is None, but ".format(self._name) + \ - "model does not " + \ - "support free format and how is internal..." + \ - "resetting how = external") - how = "external" - - if (self.format.binary or self._model.external_path) \ - and how in ["constant", "internal"]: - print("Util2d:{0}: ".format(self._name) + \ - "resetting 'how' to external") - if self.format.array_free_format: - how = "openclose" - else: - how = "external" - if how == "internal": - assert not self.format.binary, "Util2d error: 'how' is internal, but" + \ - "format is binary" - cr = self.get_internal_cr() - return cr + self.string - - elif how == "external" or how == "openclose": - if how == "openclose": - assert self.format.array_free_format, "Util2d error: 'how' is openclose," + \ - "but model doesn't support free fmt" - - # write a file if needed - if self.vtype != str: - if self.format.binary: - self.write_bin(self.shape, self.python_file_path, - self._array, - bintype="head") - else: - self.write_txt(self.shape, self.python_file_path, - self._array, - fortran_format=self.format.fortran) - - elif self.__value != self.python_file_path: - if os.path.exists(self.python_file_path): - # if the file already exists, remove it - if self._model.verbose: - print("Util2d warning: removing existing array " + - "file {0}".format(self.model_file_path)) - try: - os.remove(self.python_file_path) - except Exception as e: - raise Exception( - "Util2d: error removing existing file " + \ - self.python_file_path) - # copy the file to the new model location - try: - shutil.copy2(self.__value, self.python_file_path) - except Exception as e: - raise Exception("Util2d.get_file_array(): error copying " + - "{0} to {1}:{2}".format(self.__value, - self.python_file_path, - str(e))) - if how == "external": - return self.get_external_cr() - else: - return self.get_openclose_cr() - - elif how == "constant": - if self.vtype not in [np.int32, np.float32]: - u = np.unique(self._array) - assert u.shape[ - 0] == 1, "Util2d error: 'how' is constant, but array " + \ - "is not uniform" - value = u[0] - else: - value = self.__value - return self.get_constant_cr(value) - - else: - raise Exception("Util2d.get_file_entry() error: " + \ - "unrecognized 'how':{0}".format(how)) - - @property - def string(self): - """ - get the string representation of value attribute - - Note: - the string representation DOES NOT include the effects of the control - record multiplier - this method is used primarily for writing model input files - - """ - # convert array to sting with specified format - a_string = self.array2string(self.shape, self._array, - python_format=self.format.py) - return a_string - - @property - def array(self): - """ - Get the COPY of array representation of value attribute with the - effects of the control record multiplier applied. - - Returns - ------- - array : numpy.ndarray - Copy of the array with the multiplier applied. - - Note - ---- - .array is a COPY of the array representation as seen by the - model - with the effects of the control record multiplier applied. - - """ - if isinstance(self.cnstnt, str): - print("WARNING: cnstnt is str for {0}".format(self.name)) - return self._array.astype(self.dtype) - if isinstance(self.cnstnt, (int, np.int32)): - cnstnt = self.cnstnt - else: - if self.cnstnt == 0.0: - cnstnt = 1.0 - else: - cnstnt = self.cnstnt - # return a copy of self._array since it is being - # multiplied - return (self._array * cnstnt).astype(self._dtype) - - @property - def _array(self): - """ - get the array representation of value attribute - if value is a string or a constant, the array is loaded/built only once - - Note: - the return array representation DOES NOT include the effect of the multiplier - in the control record. To get the array as the model sees it (with the multiplier applied), - use the Util2d.array method. - """ - if self.vtype == str: - if self.__value_built is None: - file_in = open(self.__value, 'r') - - if self.format.binary: - header, self.__value_built = Util2d.load_bin(self.shape, - file_in, - self._dtype, - bintype="head") - else: - self.__value_built = Util2d.load_txt(self.shape, file_in, - self._dtype, - self.format.fortran).astype( - self._dtype) - file_in.close() - return self.__value_built - elif self.vtype != np.ndarray: - if self.__value_built is None: - self.__value_built = np.ones(self.shape, dtype=self._dtype) \ - * self.__value - return self.__value_built - else: - return self.__value - - @staticmethod - def load_block(shape, file_in, dtype): - """Load block format from a MT3D file to a 2-D array - - Parameters - ---------- - shape : tuple of int - Array dimensions (nrow, ncol) - file_in : file or str - Filename or file handle - dtype : np.int32 or np.float32 - - Returns - ------- - 2-D array - """ - if len(shape) != 2: - raise ValueError( - 'Util2d.load_block(): expected 2 dimensions, found shape {0}' - .format(shape)) - nrow, ncol = shape - data = np.ma.zeros(shape, dtype=dtype) - data.mask = True - openfile = not hasattr(file_in, 'read') - if openfile: - file_in = open(file_in, 'r') - line = file_in.readline().strip() - nblock = int(line.split()[0]) - for n in range(nblock): - line = file_in.readline().strip() - raw = line.split() - if len(raw) < 5: - raise ValueError('Util2d.load_block(): expected 5 items, ' - 'found {0}: {1}'.format(len(raw), line)) - i1, i2 = int(raw[0]) - 1, int(raw[1]) - j1, j2 = int(raw[2]) - 1, int(raw[3]) - data[i1:i2, j1:j2] = raw[4] - if openfile: - file_in.close() - if data.mask.any(): - warn('Util2d.load_block(): blocks do not cover full array') - return data.data - - @staticmethod - def load_txt(shape, file_in, dtype, fmtin): - """Load formatted file to a 1-D or 2-D array - - Parameters - ---------- - shape : tuple of int - One or two array dimensions - file_in : file or str - Filename or file handle - dtype : np.int32 or np.float32 - fmtin : str - Fortran array format descriptor, '(FREE)' or e.g. '(10G11.4)' - - Notes - ----- - This method is similar to MODFLOW's U1DREL, U1DINT, U2DREL and U2DINT - subroutines, but only for formatted files. - - Returns - ------- - 1-D or 2-D array - """ - if len(shape) == 1: - num_items = shape[0] - elif len(shape) == 2: - nrow, ncol = shape - num_items = nrow * ncol - else: - raise ValueError( - 'Util2d.load_txt(): expected 1 or 2 dimensions, found shape {0}' - .format(shape)) - openfile = not hasattr(file_in, 'read') - if openfile: - file_in = open(file_in, 'r') - npl, fmt, width, decimal = ArrayFormat.decode_fortran_descriptor(fmtin) - items = [] - while len(items) < num_items: - line = file_in.readline() - if len(line) == 0: - raise ValueError('Util2d.load_txt(): no data found') - if npl == 'free': - if ',' in line: - line = line.replace(',', ' ') - if '*' in line: # use slower method for these types of lines - for item in line.split(): - if '*' in item: - num, val = item.split('*') - # repeat val num times - items += int(num) * [val] - else: - items.append(item) - else: - items += line.split() - else: # fixed width - pos = 0 - for i in range(npl): - try: - item = line[pos:pos + width].strip() - pos += width - if item: - items.append(item) - except IndexError: - break - if openfile: - file_in.close() - data = np.fromiter(items, dtype=dtype, count=num_items) - if data.size != num_items: - raise ValueError('Util2d.load_txt(): expected array size {0},' - ' but found size {1}'.format(num_items, - data.size)) - return data.reshape(shape) - - @staticmethod - def write_txt(shape, file_out, data, fortran_format="(FREE)", - python_format=None): - if fortran_format.upper() == '(FREE)' and python_format is None: - np.savetxt(file_out, np.atleast_2d(data), - ArrayFormat.get_default_numpy_fmt(data.dtype), - delimiter='') - return - if not hasattr(file_out, "write"): - file_out = open(file_out, 'w') - file_out.write( - Util2d.array2string(shape, data, fortran_format=fortran_format, - python_format=python_format)) - - @staticmethod - def array2string(shape, data, fortran_format="(FREE)", - python_format=None): - """ - return a string representation of - a (possibly wrapped format) array from a file - (self.__value) and casts to the proper type (self._dtype) - made static to support the load functionality - this routine now supports fixed format arrays where the numbers - may touch. - """ - if len(shape) == 2: - nrow, ncol = shape - else: - nrow = 1 - ncol = shape[0] - data = np.atleast_2d(data) - if python_format is None: - column_length, fmt, width, decimal = \ - ArrayFormat.decode_fortran_descriptor(fortran_format) - if decimal is None: - output_fmt = '{0}0:{1}{2}{3}'.format('{', width, 'd', '}') - else: - output_fmt = '{0}0:{1}.{2}{3}{4}'.format('{', width, decimal, - fmt, '}') - else: - try: - column_length, output_fmt = int(python_format[0]), \ - python_format[1] - except: - raise Exception('Util2d.write_txt: \nunable to parse' - + 'python_format:\n {0}\n'. - format(python_format) - + ' python_format should be a list with\n' - + ' [column_length, fmt]\n' - + ' e.g., [10, {0:10.2e}]') - if ncol % column_length == 0: - linereturnflag = False - else: - linereturnflag = True - # write the array to a string - s = "" - for i in range(nrow): - icol = 0 - for j in range(ncol): - try: - s = s + output_fmt.format(data[i, j]) - except Exception as e: - raise Exception("error writing array value" + \ - "{0} at r,c [{1},{2}]\n{3}".format( - data[i, j], i, j, str(e))) - if (j + 1) % column_length == 0.0 and (j != 0 or ncol == 1): - s += '\n' - if linereturnflag: - s += '\n' - return s - - @staticmethod - def load_bin(shape, file_in, dtype, bintype=None): - """Load unformatted file to a 2-D array - - Parameters - ---------- - shape : tuple of int - One or two array dimensions - file_in : file or str - Filename or file handle - dtype : np.int32 or np.float32 - Data type of unformatted file and Numpy array; use np.int32 for - Fortran's INTEGER, and np.float32 for Fortran's REAL data types. - bintype : str - Normally 'Head' - - Notes - ----- - This method is similar to MODFLOW's U2DREL and U2DINT subroutines, - but only for unformatted files. - - Returns - ------- - 2-D array - """ - import flopy.utils.binaryfile as bf - nrow, ncol = shape - num_items = nrow * ncol - if dtype != np.int32 and np.issubdtype(dtype, np.integer): - # Modflow only uses 4-byte integers - dtype = np.dtype(dtype) - if dtype.itemsize != 4: - # show warning for platforms where int is not 4-bytes - warn('Util2d: setting integer dtype from {0} to int32' - .format(dtype)) - dtype = np.int32 - openfile = not hasattr(file_in, 'read') - if openfile: - file_in = open(file_in, 'rb') - header_data = None - if bintype is not None and np.issubdtype(dtype, np.floating): - header_dtype = bf.BinaryHeader.set_dtype(bintype=bintype) - header_data = np.fromfile(file_in, dtype=header_dtype, count=1) - data = np.fromfile(file_in, dtype=dtype, count=num_items) - if openfile: - file_in.close() - if data.size != num_items: - raise ValueError('Util2d.load_bin(): expected array size {0},' - ' but found size {1}'.format(num_items, - data.size)) - return header_data, data.reshape(shape) - - @staticmethod - def write_bin(shape, file_out, data, bintype=None, header_data=None): - if not hasattr(file_out, 'write'): - file_out = open(file_out, 'wb') - dtype = data.dtype - if bintype is not None: - if header_data is None: - header_data = BinaryHeader.create(bintype=bintype, - nrow=shape[0], - ncol=shape[1]) - if header_data is not None: - header_data.tofile(file_out) - data.tofile(file_out) - return - - def parse_value(self, value): - """ - parses and casts the raw value into an acceptable format for __value - lot of defense here, so we can make assumptions later - """ - if isinstance(value, list): - value = np.array(value) - - if isinstance(value, bool): - if self._dtype == np.bool: - try: - self.__value = np.bool(value) - - except: - raise Exception('Util2d:could not cast ' + - 'boolean value to type "np.bool": ' + - str(value)) - else: - raise Exception('Util2d:value type is bool, ' + - ' but dtype not set as np.bool') - elif isinstance(value, str): - if os.path.exists(value): - self.__value = value - return - elif self.dtype == np.int32: - try: - self.__value = np.int32(value) - except: - raise Exception("Util2d error: str not a file and " + - "couldn't be cast to int: {0}".format( - value)) - - else: - try: - self.__value = float(value) - except: - raise Exception("Util2d error: str not a file and " + - "couldn't be cast to float: {0}".format( - value)) - - elif np.isscalar(value): - if self.dtype == np.int32: - try: - self.__value = np.int32(value) - except: - raise Exception('Util2d:could not cast scalar ' + - 'value to type "int": ' + str(value)) - elif self._dtype == np.float32: - try: - self.__value = np.float32(value) - except: - raise Exception('Util2d:could not cast ' + - 'scalar value to type "float": ' + - str(value)) - - elif isinstance(value, np.ndarray): - # if value is 3d, but dimension 1 is only length 1, - # then drop the first dimension - if len(value.shape) == 3 and value.shape[0] == 1: - value = value[0] - if self.shape != value.shape: - raise Exception('Util2d:self.shape: ' + str(self.shape) + - ' does not match value.shape: ' + - str(value.shape)) - if self._dtype != value.dtype: - value = value.astype(self._dtype) - self.__value = value - - else: - raise Exception('Util2d:unsupported type in util_array: ' + - str(type(value))) - - @staticmethod - def load(f_handle, model, shape, dtype, name, ext_unit_dict=None, - array_free_format=None, array_format="modflow"): - """ - functionality to load Util2d instance from an existing - model input file. - external and internal record types must be fully loaded - if you are using fixed format record types,make sure - ext_unit_dict has been initialized from the NAM file - """ - if shape == (0, 0): - raise IndexError('No information on model grid dimensions. ' - 'Need nrow, ncol to load a Util2d array.') - curr_unit = None - if ext_unit_dict is not None: - # determine the current file's unit number - cfile = f_handle.name - for cunit in ext_unit_dict: - if cfile == ext_unit_dict[cunit].filename: - curr_unit = cunit - break - - # Allows for special MT3D array reader - # array_format = None - # if hasattr(model, 'array_format'): - # array_format = model.array_format - - cr_dict = Util2d.parse_control_record(f_handle.readline(), - current_unit=curr_unit, - dtype=dtype, - ext_unit_dict=ext_unit_dict, - array_format=array_format) - - if cr_dict['type'] == 'constant': - u2d = Util2d(model, shape, dtype, cr_dict['cnstnt'], name=name, - iprn=cr_dict['iprn'], fmtin="(FREE)", - array_free_format=array_free_format) - - elif cr_dict['type'] == 'open/close': - # clean up the filename a little - fname = cr_dict['fname'] - fname = fname.replace("'", "") - fname = fname.replace('"', '') - fname = fname.replace('\'', '') - fname = fname.replace('\"', '') - fname = fname.replace('\\', os.path.sep) - fname = os.path.join(model.model_ws, fname) - # load_txt(shape, file_in, dtype, fmtin): - assert os.path.exists(fname), "Util2d.load() error: open/close " + \ - "file " + str(fname) + " not found" - if str('binary') not in str(cr_dict['fmtin'].lower()): - f = open(fname, 'r') - data = Util2d.load_txt(shape=shape, - file_in=f, - dtype=dtype, fmtin=cr_dict['fmtin']) - else: - f = open(fname, 'rb') - header_data, data = Util2d.load_bin(shape, f, dtype, - bintype='Head') - f.close() - u2d = Util2d(model, shape, dtype, data, name=name, - iprn=cr_dict['iprn'], fmtin="(FREE)", - cnstnt=cr_dict['cnstnt'], - array_free_format=array_free_format) - - - elif cr_dict['type'] == 'internal': - data = Util2d.load_txt(shape, f_handle, dtype, cr_dict['fmtin']) - u2d = Util2d(model, shape, dtype, data, name=name, - iprn=cr_dict['iprn'], fmtin="(FREE)", - cnstnt=cr_dict['cnstnt'], locat=None, - array_free_format=array_free_format) - - elif cr_dict['type'] == 'external': - ext_unit = ext_unit_dict[cr_dict['nunit']] - if ext_unit.filehandle is None: - raise IOError('cannot read unit {0}, filename: {1}' - .format(cr_dict['nunit'], ext_unit.filename)) - elif 'binary' not in str(cr_dict['fmtin'].lower()): - assert cr_dict['nunit'] in list(ext_unit_dict.keys()) - data = Util2d.load_txt(shape, ext_unit.filehandle, - dtype, cr_dict['fmtin']) - else: - if cr_dict['nunit'] not in list(ext_unit_dict.keys()): - cr_dict["nunit"] *= -1 - assert cr_dict['nunit'] in list(ext_unit_dict.keys()) - header_data, data = Util2d.load_bin( - shape, ext_unit.filehandle, dtype, - bintype='Head') - u2d = Util2d(model, shape, dtype, data, name=name, - iprn=cr_dict['iprn'], fmtin="(FREE)", - cnstnt=cr_dict['cnstnt'], - array_free_format=array_free_format) - # track this unit number so we can remove it from the external - # file list later - model.pop_key_list.append(cr_dict['nunit']) - elif cr_dict['type'] == 'block': - data = Util2d.load_block(shape, f_handle, dtype) - u2d = Util2d(model, shape, dtype, data, name=name, - iprn=cr_dict['iprn'], fmtin="(FREE)", - cnstnt=cr_dict['cnstnt'], locat=None, - array_free_format=array_free_format) - - return u2d - - @staticmethod - def parse_control_record(line, current_unit=None, dtype=np.float32, - ext_unit_dict=None, array_format=None): - """ - parses a control record when reading an existing file - rectifies fixed to free format - current_unit (optional) indicates the unit number of the file being parsed - """ - free_fmt = ['open/close', 'internal', 'external', 'constant'] - raw = line.strip().split() - freefmt, cnstnt, fmtin, iprn, nunit = None, None, None, -1, None - fname = None - isfloat = False - if dtype == np.float or dtype == np.float32: - isfloat = True - # if free format keywords - if str(raw[0].lower()) in str(free_fmt): - freefmt = raw[0].lower() - if raw[0].lower() == 'constant': - if isfloat: - cnstnt = np.float(raw[1].lower().replace('d', 'e')) - else: - cnstnt = np.int(raw[1].lower()) - if raw[0].lower() == 'internal': - if isfloat: - cnstnt = np.float(raw[1].lower().replace('d', 'e')) - else: - cnstnt = np.int(raw[1].lower()) - fmtin = raw[2].strip() - iprn = 0 - if len(raw) >= 4: - iprn = int(raw[3]) - elif raw[0].lower() == 'external': - if ext_unit_dict is not None: - try: - # td = ext_unit_dict[int(raw[1])] - fname = ext_unit_dict[int(raw[1])].filename.strip() - except: - print(' could not determine filename ' + - 'for unit {}'.format(raw[1])) - - nunit = int(raw[1]) - if isfloat: - cnstnt = np.float(raw[2].lower().replace('d', 'e')) - else: - cnstnt = np.int(raw[2].lower()) - fmtin = raw[3].strip() - iprn = 0 - if len(raw) >= 5: - iprn = int(raw[4]) - elif raw[0].lower() == 'open/close': - fname = raw[1].strip() - if isfloat: - cnstnt = np.float(raw[2].lower().replace('d', 'e')) - else: - cnstnt = np.int(raw[2].lower()) - fmtin = raw[3].strip() - iprn = 0 - if len(raw) >= 5: - iprn = int(raw[4]) - npl, fmt, width, decimal = None, None, None, None - else: - locat = np.int(line[0:10].strip()) - if isfloat: - if len(line) >= 20: - cnstnt = np.float( - line[10:20].strip().lower().replace('d', 'e')) - else: - cnstnt = 0.0 - else: - if len(line) >= 20: - cnstnt = np.int(line[10:20].strip()) - else: - cnstnt = 0 - # if cnstnt == 0: - # cnstnt = 1 - if locat != 0: - if len(line) >= 40: - fmtin = line[20:40].strip() - else: - fmtin = '' - try: - iprn = np.int(line[40:50].strip()) - except: - iprn = 0 - # locat = int(raw[0]) - # cnstnt = float(raw[1]) - # fmtin = raw[2].strip() - # iprn = int(raw[3]) - if locat == 0: - freefmt = 'constant' - elif locat < 0: - freefmt = 'external' - nunit = np.int(locat) * -1 - fmtin = '(binary)' - elif locat > 0: - # if the unit number matches the current file, it's internal - if locat == current_unit: - freefmt = 'internal' - else: - freefmt = 'external' - nunit = np.int(locat) - - # Reset for special MT3D control flags - if array_format == 'mt3d': - if locat == 100: - freefmt = 'internal' - nunit = current_unit - elif locat == 101: - freefmt = 'block' - nunit = current_unit - elif locat == 102: - raise NotImplementedError( - 'MT3D zonal format not supported...') - elif locat == 103: - freefmt = 'internal' - nunit = current_unit - fmtin = '(free)' - - cr_dict = {} - cr_dict['type'] = freefmt - cr_dict['cnstnt'] = cnstnt - cr_dict['nunit'] = nunit - cr_dict['iprn'] = iprn - cr_dict['fmtin'] = fmtin - cr_dict['fname'] = fname - return cr_dict +""" +util_array module. Contains the util_2d, util_3d and transient_2d classes. + These classes encapsulate modflow-style array inputs away + from the individual packages. The end-user should not need to + instantiate these classes directly. + +""" +from __future__ import division, print_function +# from future.utils import with_metaclass + +import os +import shutil +import copy +import numpy as np +from warnings import warn +from ..utils.binaryfile import BinaryHeader +from ..utils.flopy_io import line_parse +from ..datbase import DataType, DataInterface + + +class ArrayFormat(object): + """ + ArrayFormat class for handling various output format types for both + MODFLOW and flopy + + Parameters + ---------- + u2d : Util2d instance + python : str (optional) + python-style output format descriptor e.g. {0:15.6e} + fortran : str (optional) + fortran style output format descriptor e.g. (2E15.6) + + + Attributes + ---------- + fortran : str + fortran format output descriptor (e.g. (100G15.6) + py : str + python format output descriptor (e.g. "{0:15.6E}") + numpy : str + numpy format output descriptor (e.g. "%15.6e") + npl : int + number if items per line of output + width : int + the width of the formatted numeric output + decimal : int + the number of decimal digits in the numeric output + format : str + the output format type e.g. I, G, E, etc + free : bool + free format flag + binary : bool + binary format flag + + + Methods + ------- + get_default_numpy_fmt : (dtype : [np.int32, np.float32]) + a static method to get a default numpy dtype - used for loading + decode_fortran_descriptor : (fd : str) + a static method to decode fortran descriptors into npl, format, + width, decimal. + + See Also + -------- + + Notes + ----- + + Examples + -------- + + """ + + def __init__(self, u2d, python=None, fortran=None, array_free_format=None): + + assert isinstance(u2d, Util2d), "ArrayFormat only supports Util2d," + \ + "not {0}".format(type(u2d)) + if len(u2d.shape) == 1: + self._npl_full = u2d.shape[0] + else: + self._npl_full = u2d.shape[1] + self.dtype = u2d.dtype + self._npl = None + self._format = None + self._width = None + self._decimal = None + if array_free_format is not None: + self._freeformat_model = bool(array_free_format) + else: + self._freeformat_model = bool(u2d.model.array_free_format) + + self.default_float_width = 15 + self.default_int_width = 10 + self.default_float_format = "E" + self.default_int_format = "I" + self.default_float_decimal = 6 + self.default_int_decimal = 0 + + self._fmts = ['I', 'G', 'E', 'F'] + + self._isbinary = False + self._isfree = False + + if python is not None and fortran is not None: + raise Exception("only one of [python,fortran] can be passed" + + "to ArrayFormat constructor") + + if python is not None: + self._parse_python_format(python) + + elif fortran is not None: + self._parse_fortran_format(fortran) + + else: + self._set_defaults() + + @property + def array_free_format(self): + return bool(self._freeformat_model) + + def _set_defaults(self): + if self.dtype == np.int32: + self._npl = self._npl_full + self._format = self.default_int_format + self._width = self.default_int_width + self._decimal = None + + elif self.dtype in [np.float32, bool]: + self._npl = self._npl_full + self._format = self.default_float_format + self._width = self.default_float_width + self._decimal = self.default_float_decimal + else: + raise Exception("ArrayFormat._set_defaults() error: " + + "unsupported dtype: {0}".format(str(self.dtype))) + + def __str__(self): + s = "ArrayFormat: npl:{0},format:{1},width:{2},decimal{3}" \ + .format(self.npl, self.format, self.width, self.decimal) + s += ",isfree:{0},isbinary:{1}".format(self._isfree, self._isbinary) + return s + + @staticmethod + def get_default_numpy_fmt(dtype): + if dtype == np.int32: + return "%10d" + elif dtype == np.float32: + return "%15.6E" + else: + raise Exception( + "ArrayFormat.get_default_numpy_fmt(): unrecognized " + \ + "dtype, must be np.int32 or np.float32") + + @classmethod + def integer(cls): + raise NotImplementedError() + + @classmethod + def float(cls): + raise NotImplementedError() + + @property + def binary(self): + return bool(self._isbinary) + + @property + def free(self): + return bool(self._isfree) + + def __eq__(self, other): + if isinstance(other, str): + if other.lower() == "free": + return self.free + if other.lower() == "binary": + return self.binary + else: + super(ArrayFormat, self).__eq__(other) + + @property + def npl(self): + return copy.copy(self._npl) + + @property + def format(self): + return copy.copy(self._format) + + @property + def width(self): + return copy.copy(self._width) + + @property + def decimal(self): + return copy.copy(self._decimal) + + def __setattr__(self, key, value): + if key == "format": + value = value.upper() + assert value.upper() in self._fmts + if value == 'I': + assert self.dtype == np.int32, self.dtype + self._format = value + self._decimal = None + else: + if value == 'G': + print("'G' format being reset to 'E'") + value = 'E' + self._format = value + if self.decimal is None: + self._decimal = self.default_float_decimal + + elif key == "width": + width = int(value) + if self.dtype == np.float32 and width < self.decimal: + raise Exception("width cannot be less than decimal") + elif self.dtype == np.float32 and \ + width < self.default_float_width: + print("ArrayFormat warning:setting width less " + + "than default of {0}".format(self.default_float_width)) + self._width = width + elif key == "decimal": + if self.dtype == np.int32: + raise Exception("cannot set decimal for integer dtypes") + elif self.dtype == np.float32: + value = int(value) + if value < self.default_float_decimal: + print("ArrayFormat warning: setting decimal " + + " less than default of " + + "{0}".format(self.default_float_decimal)) + if value < self.decimal: + print("ArrayFormat warning: setting decimal " + + " less than current value of " + + "{0}".format(self.default_float_decimal)) + self._decimal = int(value) + else: + raise TypeError(self.dtype) + + elif key == "entries" \ + or key == "entires_per_line" \ + or key == "npl": + value = int(value) + assert value <= self._npl_full, "cannot set npl > shape" + self._npl = value + + elif key.lower() == "binary": + value = bool(value) + if value and self.free: + # raise Exception("cannot switch from 'free' to 'binary' format") + self._isfree = False + self._isbinary = value + self._set_defaults() + + elif key.lower() == "free": + value = bool(value) + if value and self.binary: + # raise Exception("cannot switch from 'binary' to 'free' format") + self._isbinary = False + self._isfree = bool(value) + self._set_defaults() + + elif key.lower() == "fortran": + self._parse_fortran_format(value) + + elif key.lower() == "python" or key.lower() == "py": + self._parse_python_format(value) + + else: + super(ArrayFormat, self).__setattr__(key, value) + + @property + def py(self): + return self._get_python_format() + + def _get_python_format(self): + + if self.format == 'I': + fmt = 'd' + else: + fmt = self.format + pd = '{0:' + str(self.width) + if self.decimal is not None: + pd += '.' + str(self.decimal) + fmt + '}' + else: + pd += fmt + '}' + + if self.npl is None: + if self._isfree: + return (self._npl_full, pd) + else: + raise Exception("ArrayFormat._get_python_format() error: " + \ + "format is not 'free' and npl is not set") + + return (self.npl, pd) + + def _parse_python_format(self, arg): + raise NotImplementedError() + + @property + def fortran(self): + return self._get_fortran_format() + + def _get_fortran_format(self): + if self._isfree: + return "(FREE)" + if self._isbinary: + return "(BINARY)" + + fd = '({0:d}{1:s}{2:d}'.format(self.npl, self.format, self.width) + if self.decimal is not None: + fd += '.{0:d})'.format(self.decimal) + else: + fd += ')' + return fd + + def _parse_fortran_format(self, arg): + """Decode fortran descriptor + + Parameters + ---------- + arg : str + + Returns + ------- + npl, fmt, width, decimal : int, str, int, int + + """ + # strip off any quotes around format string + + npl, fmt, width, decimal = ArrayFormat.decode_fortran_descriptor(arg) + if isinstance(npl, str): + if 'FREE' in npl.upper(): + self._set_defaults() + self._isfree = True + return + + elif 'BINARY' in npl.upper(): + self._set_defaults() + self._isbinary = True + return + self._npl = int(npl) + self._format = fmt + self._width = int(width) + if decimal is not None: + self._decimal = int(decimal) + + @property + def numpy(self): + return self._get_numpy_format() + + def _get_numpy_format(self): + return "%{0}{1}.{2}".format(self.width, self.format, self.decimal) + + @staticmethod + def decode_fortran_descriptor(fd): + """Decode fortran descriptor + + Parameters + ---------- + fd : str + + Returns + ------- + npl, fmt, width, decimal : int, str, int, int + + """ + # strip off any quotes around format string + fd = fd.replace("'", "") + fd = fd.replace('"', '') + # strip off '(' and ')' + fd = fd.strip()[1:-1] + if str('FREE') in str(fd.upper()): + return 'free', None, None, None + elif str('BINARY') in str(fd.upper()): + return 'binary', None, None, None + if str('.') in str(fd): + raw = fd.split('.') + decimal = int(raw[1]) + else: + raw = [fd] + decimal = None + fmts = ['ES', 'EN', 'I', 'G', 'E', 'F'] + raw = raw[0].upper() + for fmt in fmts: + if fmt in raw: + raw = raw.split(fmt) + # '(F9.0)' will return raw = ['', '9'] + # try and except will catch this + try: + npl = int(raw[0]) + width = int(raw[1]) + except: + npl = 1 + width = int(raw[1]) + if fmt == 'G': + fmt = 'E' + elif fmt == 'ES': + fmt = 'E' + elif fmt == 'EN': + fmt = 'E' + return npl, fmt, width, decimal + raise Exception('Unrecognized format type: ' + + str(fd) + ' looking for: ' + str(fmts)) + + +def read1d(f, a): + """ + Fill the 1d array, a, with the correct number of values. Required in + case lpf 1d arrays (chani, layvka, etc) extend over more than one line + + """ + if len(a.shape) != 1: + raise ValueError('read1d: expected 1 dimension, found shape {0}' + .format(a.shape)) + values = [] + while len(values) < a.shape[0]: + line = f.readline() + if len(line) == 0: + raise ValueError('read1d: no data found') + values += line_parse(line) + a[:] = np.fromiter(values, dtype=a.dtype, count=a.shape[0]) + return a + + +def new_u2d(old_util2d, value): + new_util2d = Util2d(old_util2d.model, old_util2d.shape, old_util2d.dtype, + value, old_util2d.name, old_util2d.format.fortran, + old_util2d.cnstnt, old_util2d.iprn, + old_util2d.ext_filename, old_util2d.locat, + old_util2d.format.binary, + array_free_format=old_util2d.format.array_free_format) + return new_util2d + + +class Util3d(DataInterface): + """ + Util3d class for handling 3-D model arrays. just a thin wrapper around + Util2d + + Parameters + ---------- + model : model object + The model object (of type :class:`flopy.modflow.mf.Modflow`) to which + this package will be added. + shape : length 3 tuple + shape of the 3-D array, typically (nlay,nrow,ncol) + dtype : [np.int32, np.float32, np.bool] + the type of the data + value : variable + the data to be assigned to the 3-D array. + can be a scalar, list, or ndarray + name : string + name of the property, used for writing comments to input files + fmtin : string + modflow fmtin variable (optional). (the default is None) + cnstnt : string + modflow cnstnt variable (optional) (the default is 1.0) + iprn : int + modflow iprn variable (optional) (the default is -1) + locat : int + modflow locat variable (optional) (the default is None). If the model + instance does not support free format and the + external flag is not set and the value is a simple scalar, + then locat must be explicitly passed as it is the unit number + to read the array from + ext_filename : string + the external filename to write the array representation to + (optional) (the default is None) . + If type(value) is a string and is an accessible filename, the + ext_filename is reset to value. + bin : bool + flag to control writing external arrays as binary (optional) + (the defaut is False) + + Attributes + ---------- + array : np.ndarray + the array representation of the 3-D object + + + Methods + ------- + get_file_entry : string + get the model input file string including the control record for the + entire 3-D property + + See Also + -------- + + Notes + ----- + + Examples + -------- + + """ + + def __init__(self, model, shape, dtype, value, name, + fmtin=None, cnstnt=1.0, iprn=-1, locat=None, + ext_unit_dict=None, array_free_format=None): + """ + 3-D wrapper from Util2d - shape must be 3-D + """ + self.array_free_format = array_free_format + if isinstance(value, Util3d): + for attr in value.__dict__.items(): + setattr(self, attr[0], attr[1]) + self._model = model + self.array_free_format = array_free_format + for i, u2d in enumerate(self.util_2ds): + self.util_2ds[i] = Util2d(model, u2d.shape, u2d.dtype, + u2d._array, name=u2d.name, + fmtin=u2d.format.fortran, + locat=locat, + cnstnt=u2d.cnstnt, + ext_filename=u2d.filename, + array_free_format=array_free_format) + + return + if len(shape) != 3: + raise ValueError( + 'Util3d: expected 3 dimensions, found shape {0}'.format(shape)) + self._model = model + self.shape = shape + self._dtype = dtype + self.__value = value + isnamespecified = False + if isinstance(name, list): + self._name = name + isnamespecified = True + isnamespecified = True + isnamespecified = True + else: + t = [] + for k in range(shape[0]): + t.append(name) + self._name = t + self.name_base = [] + for k in range(shape[0]): + if isnamespecified: + self.name_base.append(self.name[k]) + else: + if 'Layer' not in self.name[k]: + self.name_base.append(self.name[k] + ' Layer ') + else: + self.name_base.append(self.name[k]) + self.fmtin = fmtin + self.cnstnt = cnstnt + self.iprn = iprn + self.locat = locat + + self.ext_filename_base = [] + if model.external_path is not None: + for k in range(shape[0]): + self.ext_filename_base. \ + append(os.path.join(model.external_path, + self.name_base[k].replace(' ', '_'))) + else: + for k in range(shape[0]): + self.ext_filename_base. \ + append(self.name_base[k].replace(' ', '_')) + + self.util_2ds = self.build_2d_instances() + + def __setitem__(self, k, value): + if isinstance(k, int): + assert k in range(0, self.shape[ + 0]), "Util3d error: k not in range nlay" + self.util_2ds[k] = new_u2d(self.util_2ds[k], value) + else: + raise NotImplementedError( + "Util3d doesn't support setitem indices" + str(k)) + + def __setattr__(self, key, value): + if hasattr(self, "util_2ds") and key == "cnstnt": + # set the cnstnt for each u2d + for u2d in self.util_2ds: + u2d.cnstnt = value + elif hasattr(self, "util_2ds") and key == "fmtin": + for u2d in self.util_2ds: + u2d.format = ArrayFormat(u2d, fortran=value, + array_free_format=self.array_free_format) + super(Util3d, self).__setattr__("fmtin", value) + elif hasattr(self, "util_2ds") and key == "how": + for u2d in self.util_2ds: + u2d.how = value + else: + # set the attribute for u3d + super(Util3d, self).__setattr__(key, value) + + @property + def name(self): + return self._name + + @property + def dtype(self): + return self._dtype + + @property + def model(self): + return self._model + + @property + def data_type(self): + return DataType.array3d + + @property + def plotable(self): + return True + + def export(self, f, **kwargs): + from flopy import export + return export.utils.array3d_export(f, self, **kwargs) + + def to_shapefile(self, filename): + """ + Export 3-D model data to shapefile (polygons). Adds an + attribute for each Util2d in self.u2ds + + Parameters + ---------- + filename : str + Shapefile name to write + + Returns + ---------- + None + + See Also + -------- + + Notes + ----- + + Examples + -------- + >>> import flopy + >>> ml = flopy.modflow.Modflow.load('test.nam') + >>> ml.lpf.hk.to_shapefile('test_hk.shp') + """ + warn( + "Deprecation warning: to_shapefile() is deprecated. use .export()", + DeprecationWarning) + + # from flopy.utils.flopy_io import write_grid_shapefile, shape_attr_name + # + # array_dict = {} + # for ilay in range(self._model.nlay): + # u2d = self[ilay] + # name = '{}_{:03d}'.format(shape_attr_name(u2d.name), ilay + 1) + # array_dict[name] = u2d.array + # write_grid_shapefile(filename, self._model.dis.sr, + # array_dict) + + self.export(filename) + + def plot(self, filename_base=None, file_extension=None, mflay=None, + fignum=None, **kwargs): + """ + Plot 3-D model input data + + Parameters + ---------- + filename_base : str + Base file name that will be used to automatically generate file + names for output image files. Plots will be exported as image + files if file_name_base is not None. (default is None) + file_extension : str + Valid matplotlib.pyplot file extension for savefig(). Only used + if filename_base is not None. (default is 'png') + mflay : int + MODFLOW zero-based layer number to return. If None, then all + all layers will be included. (default is None) + **kwargs : dict + axes : list of matplotlib.pyplot.axis + List of matplotlib.pyplot.axis that will be used to plot + data for each layer. If axes=None axes will be generated. + (default is None) + pcolor : bool + Boolean used to determine if matplotlib.pyplot.pcolormesh + plot will be plotted. (default is True) + colorbar : bool + Boolean used to determine if a color bar will be added to + the matplotlib.pyplot.pcolormesh. Only used if pcolor=True. + (default is False) + inactive : bool + Boolean used to determine if a black overlay in inactive + cells in a layer will be displayed. (default is True) + contour : bool + Boolean used to determine if matplotlib.pyplot.contour + plot will be plotted. (default is False) + clabel : bool + Boolean used to determine if matplotlib.pyplot.clabel + will be plotted. Only used if contour=True. (default is False) + grid : bool + Boolean used to determine if the model grid will be plotted + on the figure. (default is False) + masked_values : list + List of unique values to be excluded from the plot. + + Returns + ---------- + out : list + Empty list is returned if filename_base is not None. Otherwise + a list of matplotlib.pyplot.axis is returned. + + See Also + -------- + + Notes + ----- + + Examples + -------- + >>> import flopy + >>> ml = flopy.modflow.Modflow.load('test.nam') + >>> ml.lpf.hk.plot() + + """ + from flopy.plot import PlotUtilities + + axes = PlotUtilities._plot_util3d_helper(self, + filename_base=filename_base, + file_extension=file_extension, + mflay=mflay, + fignum=fignum, + **kwargs) + return axes + + def __getitem__(self, k): + if (isinstance(k, int) or + np.issubdtype(getattr(k, 'dtype', None), np.integer)): + return self.util_2ds[k] + elif len(k) == 3: + return self.array[k[0], k[1], k[2]] + else: + raise Exception("Util3d error: unsupported indices:" + str(k)) + + def get_file_entry(self): + s = '' + for u2d in self.util_2ds: + s += u2d.get_file_entry() + return s + + def get_value(self): + value = [] + for u2d in self.util_2ds: + value.append(u2d.get_value()) + return value + + @property + def array(self): + ''' + Return a numpy array of the 3D shape. If an unstructured model, then + return an array of size nodes. + + ''' + nlay, nrow, ncol = self.shape + if nrow is not None: + # typical 3D case + a = np.empty((self.shape), dtype=self._dtype) + # for i,u2d in self.uds: + for i, u2d in enumerate(self.util_2ds): + a[i] = u2d.array + else: + # unstructured case + nodes = ncol.sum() + a = np.empty((nodes), dtype=self._dtype) + istart = 0 + for i, u2d in enumerate(self.util_2ds): + istop = istart + ncol[i] + a[istart:istop] = u2d.array + istart = istop + return a + + def build_2d_instances(self): + u2ds = [] + # if value is not enumerable, then make a list of something + if not isinstance(self.__value, list) \ + and not isinstance(self.__value, np.ndarray): + self.__value = [self.__value] * self.shape[0] + + # if this is a list or 1-D array with constant values per layer + if isinstance(self.__value, list) \ + or (isinstance(self.__value, np.ndarray) + and (self.__value.ndim == 1)): + + assert len(self.__value) == self.shape[0], \ + 'length of 3d enumerable:' + str(len(self.__value)) + \ + ' != to shape[0]:' + str(self.shape[0]) + + for i, item in enumerate(self.__value): + if isinstance(item, Util2d): + # we need to reset the external name because most of the + # load() methods don't use layer-specific names + item._ext_filename = self.ext_filename_base[i] + \ + "{0}.ref".format(i + 1) + # reset the model instance in cases these Util2d's + # came from another model instance + item.model = self._model + u2ds.append(item) + else: + name = self.name_base[i] + str(i + 1) + ext_filename = None + if self._model.external_path is not None: + ext_filename = self.ext_filename_base[i] + str(i + 1) + \ + '.ref' + shape = self.shape[1:] + if shape[0] is None: + # allow for unstructured so that ncol changes by layer + shape = (self.shape[2][i],) + u2d = Util2d(self.model, shape, self.dtype, item, + fmtin=self.fmtin, name=name, + ext_filename=ext_filename, + locat=self.locat, + array_free_format=self.array_free_format) + u2ds.append(u2d) + + elif isinstance(self.__value, np.ndarray): + # if an array of shape nrow,ncol was passed, tile it out for each layer + if self.__value.shape[0] != self.shape[0]: + if self.__value.shape == (self.shape[1], self.shape[2]): + self.__value = [self.__value] * self.shape[0] + else: + raise Exception('value shape[0] != to self.shape[0] and' + + 'value.shape[[1,2]] != self.shape[[1,2]]' + + str(self.__value.shape) + ' ' + str( + self.shape)) + for i, a in enumerate(self.__value): + a = np.atleast_2d(a) + ext_filename = None + name = self.name_base[i] + str(i + 1) + if self._model.external_path is not None: + ext_filename = self.ext_filename_base[i] + str( + i + 1) + '.ref' + u2d = Util2d(self._model, self.shape[1:], self._dtype, a, + fmtin=self.fmtin, name=name, + ext_filename=ext_filename, + locat=self.locat, + array_free_format=self.array_free_format) + u2ds.append(u2d) + + else: + raise Exception('util_array_3d: value attribute must be list ' + + ' or ndarray, not' + str(type(self.__value))) + return u2ds + + @staticmethod + def load(f_handle, model, shape, dtype, name, ext_unit_dict=None, + array_format=None): + if len(shape) != 3: + raise ValueError( + 'Util3d: expected 3 dimensions, found shape {0}'.format(shape)) + nlay, nrow, ncol = shape + u2ds = [] + for k in range(nlay): + u2d_name = name + '_Layer_{0}'.format(k) + if nrow is None: + nr = 1 + nc = ncol[k] + else: + nr = nrow + nc = ncol + u2d = Util2d.load(f_handle, model, (nr, nc), dtype, u2d_name, + ext_unit_dict=ext_unit_dict, + array_format=array_format) + u2ds.append(u2d) + u3d = Util3d(model, shape, dtype, u2ds, name) + return u3d + + def __mul__(self, other): + if np.isscalar(other): + new_u2ds = [] + for u2d in self.util_2ds: + new_u2ds.append(u2d * other) + return Util3d(self._model, self.shape, self._dtype, new_u2ds, + self._name, self.fmtin, self.cnstnt, self.iprn, + self.locat) + elif isinstance(other, list): + assert len(other) == self.shape[0] + new_u2ds = [] + for u2d, item in zip(self.util_2ds, other): + new_u2ds.append(u2d * item) + return Util3d(self._model, self.shape, self._dtype, new_u2ds, + self._name, self.fmtin, self.cnstnt, self.iprn, + self.locat) + + +class Transient3d(DataInterface): + """ + Transient3d class for handling time-dependent 3-D model arrays. + just a thin wrapper around Util3d + + Parameters + ---------- + model : model object + The model object (of type :class:`flopy.modflow.mf.Modflow`) to which + this package will be added. + shape : length 3 tuple + shape of the 3-D transient arrays, typically (nlay,nrow,ncol) + dtype : [np.int32, np.float32, np.bool] + the type of the data + value : variable + the data to be assigned to the 3-D arrays. Typically a dict + of {kper:value}, where kper is the zero-based stress period + to assign a value to. Value should be cast-able to Util2d instance + can be a scalar, list, or ndarray is the array value is constant in + time. + name : string + name of the property, used for writing comments to input files and + for forming external files names (if needed) + fmtin : string + modflow fmtin variable (optional). (the default is None) + cnstnt : string + modflow cnstnt variable (optional) (the default is 1.0) + iprn : int + modflow iprn variable (optional) (the default is -1) + locat : int + modflow locat variable (optional) (the default is None). If the model + instance does not support free format and the + external flag is not set and the value is a simple scalar, + then locat must be explicitly passed as it is the unit number + to read the array from + ext_filename : string + the external filename to write the array representation to + (optional) (the default is None) . + If type(value) is a string and is an accessible filename, + the ext_filename is reset to value. + bin : bool + flag to control writing external arrays as binary (optional) + (the default is False) + + Attributes + ---------- + transient_3ds : dict{kper:Util3d} + the transient sequence of Util3d objects + + Methods + ------- + get_kper_entry : (itmp,string) + get the itmp value and the Util2d file entry of the value in + transient_2ds in bin kper. if kper < min(Transient2d.keys()), + return (1,zero_entry). If kper > < min(Transient2d.keys()), + but is not found in Transient2d.keys(), return (-1,'') + + See Also + -------- + + Notes + ----- + + Examples + -------- + + """ + + def __init__(self, model, shape, dtype, value, name, fmtin=None, + cnstnt=1.0, iprn=-1, ext_filename=None, locat=None, + bin=False, array_free_format=None): + + if isinstance(value, Transient3d): + for attr in value.__dict__.items(): + setattr(self, attr[0], attr[1]) + self._model = model + return + + self._model = model + if len(shape) != 3: + raise ValueError( + 'Transient3d: expected 3 dimensions (nlay, nrow, ncol), found ' + 'shape {0}'.format(shape)) + self.shape = shape + self._dtype = dtype + self.__value = value + self.name_base = name + self.fmtin = fmtin + self.cnstnt = cnstnt + self.iprn = iprn + self.locat = locat + self.array_free_format = array_free_format + self.transient_3ds = self.build_transient_sequence() + return + + def __setattr__(self, key, value): + # set the attribute for u3d, even for cnstnt + super(Transient3d, self).__setattr__(key, value) + + @property + def model(self): + return self._model + + @property + def name(self): + return self.name_base + + @property + def dtype(self): + return self._dtype + + @property + def data_type(self): + return DataType.transient3d + + @property + def plotable(self): + return False + + def get_zero_3d(self, kper): + name = self.name_base + str(kper + 1) + '(filled zero)' + return Util3d(self._model, self.shape, + self._dtype, 0.0, name=name, + array_free_format=self.array_free_format) + + def __getitem__(self, kper): + if kper in list(self.transient_3ds.keys()): + return self.transient_3ds[kper] + elif kper < min(self.transient_3ds.keys()): + return self.get_zero_3d(kper) + else: + for i in range(kper, -1, -1): + if i in list(self.transient_3ds.keys()): + return self.transient_3ds[i] + raise Exception("Transient2d.__getitem__(): error:" + \ + " could not find an entry before kper {0:d}".format( + kper)) + + def __setitem__(self, key, value): + try: + key = int(key) + except Exception as e: + raise Exception("Transient3d.__setitem__() error: " + \ + "'key'could not be cast to int:{0}".format(str(e))) + nper = self._model.nper + if key > self._model.nper or key < 0: + raise Exception("Transient3d.__setitem__() error: " + \ + "key {0} not in nper range {1}:{2}".format(key, 0, + nper)) + + self.transient_3ds[key] = self.__get_3d_instance(key, value) + + @property + def array(self): + arr = np.zeros((self._model.nper, self.shape[0], self.shape[1], + self.shape[2]), dtype=self._dtype) + for kper in range(self._model.nper): + u3d = self[kper] + for k in range(self.shape[0]): + arr[kper, k, :, :] = u3d[k].array + return arr + + def get_kper_entry(self, kper): + """ + get the file entry info for a given kper + returns (itmp,file entry string from Util3d) + """ + if kper in self.transient_3ds: + s = '' + for k in range(self.shape[0]): + s += self.transient_3ds[kper][k].get_file_entry() + return 1, s + elif kper < min(self.transient_3ds.keys()): + t = self.get_zero_3d(kper).get_file_entry() + s = '' + for k in range(self.shape[0]): + s += t[k].get_file_entry() + return 1, s + else: + return -1, '' + + def build_transient_sequence(self): + """ + parse self.__value into a dict{kper:Util3d} + """ + + # a dict keyed on kper (zero-based) + if isinstance(self.__value, dict): + tran_seq = {} + for key, val in self.__value.items(): + try: + key = int(key) + except: + raise Exception("Transient3d error: can't cast key: " + + str(key) + " to kper integer") + if key < 0: + raise Exception("Transient3d error: key can't be " + + " negative: " + str(key)) + try: + u3d = self.__get_3d_instance(key, val) + except Exception as e: + raise Exception("Transient3d error building Util3d " + + " instance from value at kper: " + + str(key) + "\n" + str(e)) + tran_seq[key] = u3d + return tran_seq + + # these are all for single entries - use the same Util2d for all kper + # an array of shape (nrow,ncol) + elif isinstance(self.__value, np.ndarray): + return {0: self.__get_3d_instance(0, self.__value)} + + # a filename + elif isinstance(self.__value, str): + return {0: self.__get_3d_instance(0, self.__value)} + + # a scalar + elif np.isscalar(self.__value): + return {0: self.__get_3d_instance(0, self.__value)} + + # lists aren't allowed + elif isinstance(self.__value, list): + raise Exception("Transient3d error: value cannot be a list " + + "anymore. try a dict{kper,value}") + else: + raise Exception("Transient3d error: value type not " + + " recognized: " + str(type(self.__value))) + + def __get_3d_instance(self, kper, arg): + """ + parse an argument into a Util3d instance + """ + name = '{}_period{}'.format(self.name_base, kper + 1) + u3d = Util3d(self._model, self.shape, self._dtype, arg, + fmtin=self.fmtin, name=name, + # ext_filename=ext_filename, + locat=self.locat, + array_free_format=self.array_free_format) + return u3d + + +class Transient2d(DataInterface): + """ + Transient2d class for handling time-dependent 2-D model arrays. + just a thin wrapper around Util2d + + Parameters + ---------- + model : model object + The model object (of type :class:`flopy.modflow.mf.Modflow`) to which + this package will be added. + shape : length 2 tuple + shape of the 2-D transient arrays, typically (nrow,ncol) + dtype : [np.int32, np.float32, np.bool] + the type of the data + value : variable + the data to be assigned to the 2-D arrays. Typically a dict + of {kper:value}, where kper is the zero-based stress period + to assign a value to. Value should be cast-able to Util2d instance + can be a scalar, list, or ndarray is the array value is constant in + time. + name : string + name of the property, used for writing comments to input files and + for forming external files names (if needed) + fmtin : string + modflow fmtin variable (optional). (the default is None) + cnstnt : string + modflow cnstnt variable (optional) (the default is 1.0) + iprn : int + modflow iprn variable (optional) (the default is -1) + locat : int + modflow locat variable (optional) (the default is None). If the model + instance does not support free format and the + external flag is not set and the value is a simple scalar, + then locat must be explicitly passed as it is the unit number + to read the array from + ext_filename : string + the external filename to write the array representation to + (optional) (the default is None) . + If type(value) is a string and is an accessible filename, + the ext_filename is reset to value. + bin : bool + flag to control writing external arrays as binary (optional) + (the default is False) + + Attributes + ---------- + transient_2ds : dict{kper:Util2d} + the transient sequence of Util2d objects + + Methods + ------- + get_kper_entry : (itmp,string) + get the itmp value and the Util2d file entry of the value in + transient_2ds in bin kper. if kper < min(Transient2d.keys()), + return (1,zero_entry). If kper > < min(Transient2d.keys()), + but is not found in Transient2d.keys(), return (-1,'') + + See Also + -------- + + Notes + ----- + + Examples + -------- + + """ + + def __init__(self, model, shape, dtype, value, name, fmtin=None, + cnstnt=1.0, iprn=-1, ext_filename=None, locat=None, + bin=False, array_free_format=None): + + if isinstance(value, Transient2d): + for attr in value.__dict__.items(): + setattr(self, attr[0], attr[1]) + for kper, u2d in self.transient_2ds.items(): + self.transient_2ds[kper] = Util2d(model, u2d.shape, u2d.dtype, + u2d._array, name=u2d.name, + fmtin=u2d.format.fortran, + locat=locat, + cnstnt=u2d.cnstnt, + ext_filename=u2d.filename, + array_free_format=array_free_format) + + self._model = model + return + + self._model = model + if len(shape) != 2: + raise ValueError( + 'Transient2d: expected 2 dimensions (nrow, ncol), found ' + 'shape {0}'.format(shape)) + if shape[0] is None: + # allow for unstructured so that ncol changes by layer + shape = (1, shape[1][0]) + + self.shape = shape + self._dtype = dtype + self.__value = value + self.name_base = name + self.fmtin = fmtin + self.cnstnt = cnstnt + self.iprn = iprn + self.locat = locat + self.array_free_format = array_free_format + if model.external_path is not None: + self.ext_filename_base = \ + os.path.join(model.external_path, + self.name_base.replace(' ', '_')) + else: + self.ext_filename_base = self.name_base.replace(' ', '_') + self.transient_2ds = self.build_transient_sequence() + return + + @property + def name(self): + return self.name_base + + @property + def dtype(self): + return self._dtype + + @property + def model(self): + return self._model + + @property + def data_type(self): + return DataType.transient2d + + @property + def plotable(self): + return True + + @staticmethod + def masked4d_array_to_kper_dict(m4d): + assert m4d.ndim == 4 + kper_dict = {} + for kper, arr in enumerate(m4d): + if np.all(np.isnan(arr)): + continue + elif np.any(np.isnan(arr)): + raise Exception("masked value found in array") + kper_dict[kper] = arr.copy() + return kper_dict + + @classmethod + def from_4d(cls, model, pak_name, m4ds): + """construct a Transient2d instance from a + dict(name: (masked) 4d numpy.ndarray + Parameters + ---------- + model : flopy.mbase derived type + pak_name : str package name (e.g. RCH) + m4ds : dict(name,(masked) 4d numpy.ndarray) + each ndarray must have shape (nper,1,nrow,ncol). + if an entire (nrow,ncol) slice is np.NaN, then + that kper is skipped. + Returns + ------- + Transient2d instance + """ + + assert isinstance(m4ds, dict) + keys = list(m4ds.keys()) + assert len(keys) == 1 + name = keys[0] + m4d = m4ds[name] + + assert m4d.ndim == 4 + assert m4d.shape[0] == model.nper + assert m4d.shape[1] == 1 + assert m4d.shape[2] == model.nrow + assert m4d.shape[3] == model.ncol + m4d = m4d.astype(np.float32) + kper_dict = Transient2d.masked4d_array_to_kper_dict(m4d) + return cls(model=model, shape=(model.nrow, model.ncol), + value=kper_dict, + dtype=m4d.dtype.type, name=name) + + def __setattr__(self, key, value): + if hasattr(self, "transient_2ds") and key == "cnstnt": + # set cnstnt for each u2d + for kper, u2d in self.transient_2ds.items(): + self.transient_2ds[kper].cnstnt = value + elif hasattr(self, "transient_2ds") and key == "fmtin": + # set fmtin for each u2d + for kper, u2d in self.transient_2ds.items(): + self.transient_2ds[kper].format = ArrayFormat(u2d, + fortran=value) + elif hasattr(self, "transient_2ds") and key == "how": + # set how for each u2d + for kper, u2d in self.transient_2ds.items(): + self.transient_2ds[kper].how = value + # set the attribute for u3d, even for cnstnt + super(Transient2d, self).__setattr__(key, value) + + def get_zero_2d(self, kper): + name = self.name_base + str(kper + 1) + '(filled zero)' + return Util2d(self._model, self.shape, + self._dtype, 0.0, name=name, + array_free_format=self.array_free_format) + + def to_shapefile(self, filename): + """ + Export transient 2D data to a shapefile (as polygons). Adds an + attribute for each unique Util2d instance in self.data + + Parameters + ---------- + filename : str + Shapefile name to write + + Returns + ---------- + None + + See Also + -------- + + Notes + ----- + + Examples + -------- + >>> import flopy + >>> ml = flopy.modflow.Modflow.load('test.nam') + >>> ml.rch.rech.as_shapefile('test_rech.shp') + """ + warn( + "Deprecation warning: to_shapefile() is deprecated. use .export()", + DeprecationWarning) + + # from flopy.utils.flopy_io import write_grid_shapefile, shape_attr_name + # + # array_dict = {} + # for kper in range(self._model.nper): + # u2d = self[kper] + # name = '{}_{:03d}'.format(shape_attr_name(u2d.name), kper + 1) + # array_dict[name] = u2d.array + # write_grid_shapefile(filename, self._model.dis.sr, array_dict) + self.export(filename) + + def plot(self, filename_base=None, file_extension=None, kper=0, + fignum=None, **kwargs): + """ + Plot transient 2-D model input data + + Parameters + ---------- + filename_base : str + Base file name that will be used to automatically generate file + names for output image files. Plots will be exported as image + files if file_name_base is not None. (default is None) + file_extension : str + Valid matplotlib.pyplot file extension for savefig(). Only used + if filename_base is not None. (default is 'png') + kper : int or str + model stress period. if 'all' is provided, all stress periods + will be plotted + fignum: list or int + Figure numbers for plot title + + **kwargs : dict + axes : list of matplotlib.pyplot.axis + List of matplotlib.pyplot.axis that will be used to plot + data for each layer. If axes=None axes will be generated. + (default is None) + pcolor : bool + Boolean used to determine if matplotlib.pyplot.pcolormesh + plot will be plotted. (default is True) + colorbar : bool + Boolean used to determine if a color bar will be added to + the matplotlib.pyplot.pcolormesh. Only used if pcolor=True. + (default is False) + inactive : bool + Boolean used to determine if a black overlay in inactive + cells in a layer will be displayed. (default is True) + contour : bool + Boolean used to determine if matplotlib.pyplot.contour + plot will be plotted. (default is False) + clabel : bool + Boolean used to determine if matplotlib.pyplot.clabel + will be plotted. Only used if contour=True. (default is False) + grid : bool + Boolean used to determine if the model grid will be plotted + on the figure. (default is False) + masked_values : list + List of unique values to be excluded from the plot. + kper : str + MODFLOW zero-based stress period number to return. If + kper='all' then data for all stress period will be + extracted. (default is zero). + + Returns + ---------- + out : list + Empty list is returned if filename_base is not None. Otherwise + a list of matplotlib.pyplot.axis is returned. + + See Also + -------- + + Notes + ----- + + Examples + -------- + >>> import flopy + >>> ml = flopy.modflow.Modflow.load('test.nam') + >>> ml.rch.rech.plot() + + """ + from flopy.plot import PlotUtilities + + axes = PlotUtilities._plot_transient2d_helper(self, + filename_base=filename_base, + file_extension=file_extension, + kper=kper, + fignum=fignum, + **kwargs) + + return axes + + def __getitem__(self, kper): + if kper in list(self.transient_2ds.keys()): + return self.transient_2ds[kper] + elif kper < min(self.transient_2ds.keys()): + return self.get_zero_2d(kper) + else: + for i in range(kper, -1, -1): + if i in list(self.transient_2ds.keys()): + return self.transient_2ds[i] + raise Exception("Transient2d.__getitem__(): error:" + \ + " could not find an entry before kper {0:d}".format( + kper)) + + def __setitem__(self, key, value): + try: + key = int(key) + except Exception as e: + raise Exception("Transient2d.__setitem__() error: " + \ + "'key'could not be cast to int:{0}".format(str(e))) + nper = self._model.nper + if key > self._model.nper or key < 0: + raise Exception("Transient2d.__setitem__() error: " + \ + "key {0} not in nper range {1}:{2}".format(key, 0, + nper)) + + self.transient_2ds[key] = self.__get_2d_instance(key, value) + + @property + def array(self): + arr = np.zeros((self._model.nper, 1, self.shape[0], self.shape[1]), + dtype=self._dtype) + for kper in range(self._model.nper): + u2d = self[kper] + arr[kper, 0, :, :] = u2d.array + return arr + + def export(self, f, **kwargs): + from flopy import export + return export.utils.transient2d_export(f, self, **kwargs) + + def get_kper_entry(self, kper): + """ + Get the file entry info for a given kper + returns (itmp,file entry string from Util2d) + """ + if kper in self.transient_2ds: + return (1, self.transient_2ds[kper].get_file_entry()) + elif kper < min(self.transient_2ds.keys()): + return (1, self.get_zero_2d(kper).get_file_entry()) + else: + return (-1, '') + + def build_transient_sequence(self): + """ + parse self.__value into a dict{kper:Util2d} + """ + + # a dict keyed on kper (zero-based) + if isinstance(self.__value, dict): + tran_seq = {} + for key, val in self.__value.items(): + try: + key = int(key) + except: + raise Exception("Transient2d error: can't cast key: " + + str(key) + " to kper integer") + if key < 0: + raise Exception("Transient2d error: key can't be " + + " negative: " + str(key)) + try: + u2d = self.__get_2d_instance(key, val) + except Exception as e: + raise Exception("Transient2d error building Util2d " + + " instance from value at kper: " + + str(key) + "\n" + str(e)) + tran_seq[key] = u2d + return tran_seq + + # these are all for single entries - use the same Util2d for all kper + # an array of shape (nrow,ncol) + elif isinstance(self.__value, np.ndarray): + return {0: self.__get_2d_instance(0, self.__value)} + + # a filename + elif isinstance(self.__value, str): + return {0: self.__get_2d_instance(0, self.__value)} + + # a scalar + elif np.isscalar(self.__value): + return {0: self.__get_2d_instance(0, self.__value)} + + # lists aren't allowed + elif isinstance(self.__value, list): + raise Exception("Transient2d error: value cannot be a list " + + "anymore. try a dict{kper,value}") + else: + raise Exception("Transient2d error: value type not " + + " recognized: " + str(type(self.__value))) + + def __get_2d_instance(self, kper, arg): + """ + parse an argument into a Util2d instance + """ + ext_filename = None + name = self.name_base + str(kper + 1) + ext_filename = self.ext_filename_base + str(kper) + '.ref' + u2d = Util2d(self._model, self.shape, self._dtype, arg, + fmtin=self.fmtin, name=name, + ext_filename=ext_filename, + locat=self.locat, + array_free_format=self.array_free_format) + return u2d + + +class Util2d(DataInterface): + """ + Util2d class for handling 1- or 2-D model arrays + + Parameters + ---------- + model : model object + The model object (of type :class:`flopy.modflow.mf.Modflow`) to which + this package will be added. + shape : tuple + Shape of the 1- or 2-D array + dtype : [np.int32, np.float32, np.bool] + the type of the data + value : variable + the data to be assigned to the 1- or 2-D array. + can be a scalar, list, ndarray, or filename + name : string + name of the property (optional). (the default is None + fmtin : string + modflow fmtin variable (optional). (the default is None) + cnstnt : string + modflow cnstnt variable (optional) (the default is 1.0) + iprn : int + modflow iprn variable (optional) (the default is -1) + locat : int + modflow locat variable (optional) (the default is None). If the model + instance does not support free format and the + external flag is not set and the value is a simple scalar, + then locat must be explicitly passed as it is the unit number + to read the array from) + ext_filename : string + the external filename to write the array representation to + (optional) (the default is None) . + If type(value) is a string and is an accessible filename, + the ext_filename is reset to value. + bin : bool + flag to control writing external arrays as binary (optional) + (the default is False) + + Attributes + ---------- + array : np.ndarray + the array representation of the 2-D object + how : str + the str flag to control how the array is written to the model + input files e.g. "constant","internal","external","openclose" + format : ArrayFormat object + controls the ASCII representation of the numeric array + + Methods + ------- + get_file_entry : string + get the model input file string including the control record + + See Also + -------- + + Notes + ----- + If value is a valid filename and model.external_path is None, then a copy + of the file is made and placed in model.model_ws directory. + + If value is a valid filename and model.external_path is not None, then + a copy of the file is made a placed in the external_path directory. + + If value is a scalar, it is always written as a constant, regardless of + the model.external_path setting. + + If value is an array and model.external_path is not None, then the array + is written out in the external_path directory. The name of the file that + holds the array is created from the name attribute. If the model supports + "free format", then the array is accessed via the "open/close" approach. + Otherwise, a unit number and filename is added to the name file. + + If value is an array and model.external_path is None, then the array is + written internally to the model input file. + + Examples + -------- + + """ + + def __init__(self, model, shape, dtype, value, name, fmtin=None, + cnstnt=1.0, iprn=-1, ext_filename=None, locat=None, bin=False, + how=None, array_free_format=None): + """Create 1- or 2-d array + + Parameters + ---------- + model : model object + shape : tuple + Dimensions of 1- or 2-D array, e.g. (nrow, ncol) + dtype : int or np.float32 + value : str, list, np.int32, np.float32, bool or np.ndarray + name : str + Array name or description + fmtin : str, optional + cnstnt : np.int32 or np.float32, optional + Array constant; default 1.0 + iprn : int, optional + Modflow printing option; default -1 + ext_filename : str, optional + Name of external files name where arrays are written + locat : int, optional + bin : bool, optional + If True, writes unformatted files; default False writes formatted + how : str, optional + One of "constant", "internal", "external", or "openclose" + array_free_format : bool, optional + used for generating control record + + Notes + ----- + Support with minimum of mem footprint, only creates arrays as needed, + otherwise functions with strings or constants. + + Model instance string attribute "external_path" used to determine + external array writing + """ + if isinstance(value, Util2d): + for attr in value.__dict__.items(): + setattr(self, attr[0], attr[1]) + self._model = model + self._name = name + self._ext_filename = self._name.replace(' ', '_') + ".ref" + if ext_filename is not None: + self.ext_filename = ext_filename.lower() + else: + self.ext_filename = None + if locat is not None: + self.locat = locat + return + + # some defense + if dtype != np.int32 and np.issubdtype(dtype, np.integer): + # Modflow only uses 4-byte integers + dtype = np.dtype(dtype) + if np.dtype(int).itemsize != 4: + # show warning for platforms where int is not 4-bytes + warn('Util2d: setting integer dtype from {0} to int32' + .format(dtype)) + dtype = np.int32 + if dtype not in [np.int32, np.float32, np.bool]: + raise TypeError('Util2d:unsupported dtype: ' + str(dtype)) + + if name is not None: + name = name.lower() + if ext_filename is not None: + ext_filename = ext_filename.lower() + + self._model = model + if len(shape) not in (1, 2): + raise ValueError( + 'Util2d: shape must describe 1- or 2-dimensions, ' + 'e.g. (nrow, ncol)') + if min(shape) < 1: + raise ValueError('Util2d: each shape dimension must be at least 1') + self.shape = shape + self._dtype = dtype + self._name = name + self.locat = locat + self.parse_value(value) + if self.vtype == str: + fmtin = "(FREE)" + self.__value_built = None + self.cnstnt = dtype(cnstnt) + + self.iprn = iprn + self._format = ArrayFormat(self, fortran=fmtin, + array_free_format=array_free_format) + self._format._isbinary = bool(bin) + self.ext_filename = ext_filename + self._ext_filename = self._name.replace(' ', '_') + ".ref" + + self._acceptable_hows = ["constant", "internal", "external", + "openclose"] + + if how is not None: + how = how.lower() + assert how in self._acceptable_hows + self._how = how + else: + self._decide_how() + + @property + def name(self): + return self._name + + @property + def dtype(self): + return self._dtype + + @property + def model(self): + return self._model + + @property + def data_type(self): + return DataType.array2d + + @property + def plotable(self): + return True + + def _decide_how(self): + # if a constant was passed in + if self.vtype in [np.int32, np.float32]: + self._how = "constant" + # if a filename was passed in or external path was set + elif self._model.external_path is not None or \ + self.vtype == str: + if self.format.array_free_format: + self._how = "openclose" + else: + self._how = "external" + else: + self._how = "internal" + + def plot(self, title=None, filename_base=None, file_extension=None, + fignum=None, **kwargs): + """ + Plot 2-D model input data + + Parameters + ---------- + title : str + Plot title. If a plot title is not provide one will be + created based on data name (self._name). (default is None) + filename_base : str + Base file name that will be used to automatically generate file + names for output image files. Plots will be exported as image + files if file_name_base is not None. (default is None) + file_extension : str + Valid matplotlib.pyplot file extension for savefig(). Only used + if filename_base is not None. (default is 'png') + **kwargs : dict + axes : list of matplotlib.pyplot.axis + List of matplotlib.pyplot.axis that will be used to plot + data for each layer. If axes=None axes will be generated. + (default is None) + pcolor : bool + Boolean used to determine if matplotlib.pyplot.pcolormesh + plot will be plotted. (default is True) + colorbar : bool + Boolean used to determine if a color bar will be added to + the matplotlib.pyplot.pcolormesh. Only used if pcolor=True. + (default is False) + inactive : bool + Boolean used to determine if a black overlay in inactive + cells in a layer will be displayed. (default is True) + contour : bool + Boolean used to determine if matplotlib.pyplot.contour + plot will be plotted. (default is False) + clabel : bool + Boolean used to determine if matplotlib.pyplot.clabel + will be plotted. Only used if contour=True. (default is False) + grid : bool + Boolean used to determine if the model grid will be plotted + on the figure. (default is False) + masked_values : list + List of unique values to be excluded from the plot. + + Returns + ---------- + out : list + Empty list is returned if filename_base is not None. Otherwise + a list of matplotlib.pyplot.axis is returned. + + See Also + -------- + + Notes + ----- + + Examples + -------- + >>> import flopy + >>> ml = flopy.modflow.Modflow.load('test.nam') + >>> ml.dis.top.plot() + + """ + from flopy.plot import PlotUtilities + + axes = PlotUtilities._plot_util2d_helper(self, title=title, + filename_base=filename_base, + file_extension=file_extension, + fignum=fignum, **kwargs) + return axes + + def export(self, f, **kwargs): + from flopy import export + return export.utils.array2d_export(f, self, **kwargs) + + def to_shapefile(self, filename): + """ + Export 2-D model data to a shapefile (as polygons) of self.array + + Parameters + ---------- + filename : str + Shapefile name to write + + Returns + ---------- + None + + See Also + -------- + + Notes + ----- + + Examples + -------- + >>> import flopy + >>> ml = flopy.modflow.Modflow.load('test.nam') + >>> ml.dis.top.as_shapefile('test_top.shp') + """ + + warn( + "Deprecation warning: to_shapefile() is deprecated. use .export()", + DeprecationWarning) + # from flopy.utils.flopy_io import write_grid_shapefile, shape_attr_name + # name = shape_attr_name(self._name, keep_layer=True) + # write_grid_shapefile(filename, self._model.dis.sr, {name: + # self.array}) + self.export(filename) + + def set_fmtin(self, fmtin): + self._format = ArrayFormat(self, fortran=fmtin, + array_free_format=self.format.array_free_format) + + def get_value(self): + return copy.deepcopy(self.__value) + + # overloads, tries to avoid creating arrays if possible + def __add__(self, other): + if self.vtype in [np.int32, np.float32] and self.vtype == other.vtype: + return self.__value + other.get_value() + else: + return self.array + other.array + + def __sub__(self, other): + if self.vtype in [np.int32, np.float32] and self.vtype == other.vtype: + return self.__value - other.get_value() + else: + return self.array - other.array + + def __mul__(self, other): + if np.isscalar(other): + return Util2d(self._model, self.shape, self._dtype, + self._array * other, self._name, + self.format.fortran, self.cnstnt, self.iprn, + self.ext_filename, + self.locat, self.format.binary) + else: + raise NotImplementedError( + "Util2d.__mul__() not implemented for non-scalars") + + def __eq__(self, other): + if not isinstance(other, Util2d): + return False + if not np.array_equal(other.array, self.array): + return False + if other.cnstnt != self.cnstnt: + return False + return True + + def __getitem__(self, k): + if isinstance(k, int): + if len(self.shape) == 1: + return self.array[k] + elif self.shape[0] == 1: + return self.array[0, k] + elif self.shape[1] == 1: + return self.array[k, 0] + else: + raise Exception( + "Util2d.__getitem__() error: an integer was passed, " + + "self.shape > 1 in both dimensions") + else: + if isinstance(k, tuple): + if len(k) == 2: + return self.array[k[0], k[1]] + if len(k) == 1: + return self.array[k] + else: + return self.array[(k,)] + + def __setitem__(self, k, value): + """ + this one is dangerous because it resets __value + """ + a = self.array + a[k] = value + a = a.astype(self._dtype) + self.__value = a + if self.__value_built is not None: + self.__value_built = None + + def __setattr__(self, key, value): + if key == "fmtin": + self._format = ArrayFormat(self, fortran=value) + elif key == "format": + assert isinstance(value, ArrayFormat) + self._format = value + elif key == "how": + value = value.lower() + assert value in self._acceptable_hows + self._how = value + elif key == "model": + self._model = value + else: + super(Util2d, self).__setattr__(key, value) + + def all(self): + return self.array.all() + + def __len__(self): + return self.shape[0] + + def sum(self): + return self.array.sum() + + def unique(self): + return np.unique(self.array) + + @property + def format(self): + # don't return a copy because we want to allow + # access to the attributes of ArrayFormat + return self._format + + @property + def how(self): + return copy.copy(self._how) + + @property + def vtype(self): + return type(self.__value) + + @property + def python_file_path(self): + """ + where python is going to write the file + Returns + ------- + file_path (str) : path relative to python: includes model_ws + """ + # if self.vtype != str: + # raise Exception("Util2d call to python_file_path " + + # "for vtype != str") + python_file_path = '' + if self._model.model_ws != '.': + python_file_path = os.path.join(self._model.model_ws) + if self._model.external_path is not None: + python_file_path = os.path.join(python_file_path, + self._model.external_path) + python_file_path = os.path.join(python_file_path, + self.filename) + return python_file_path + + @property + def filename(self): + if self.vtype != str: + if self.ext_filename is not None: + filename = os.path.split(self.ext_filename)[-1] + else: + filename = os.path.split(self._ext_filename)[-1] + else: + filename = os.path.split(self.__value)[-1] + return filename + + @property + def model_file_path(self): + """ + where the model expects the file to be + + Returns + ------- + file_path (str): path relative to the name file + + """ + + model_file_path = '' + if self._model.external_path is not None: + model_file_path = os.path.join(model_file_path, + self._model.external_path) + model_file_path = os.path.join(model_file_path, self.filename) + return model_file_path + + def get_constant_cr(self, value): + + if self.format.array_free_format: + lay_space = '{0:>27s}'.format('') + if self.vtype in [int, np.int32]: + lay_space = '{0:>32s}'.format('') + cr = 'CONSTANT ' + self.format.py[1].format(value) + cr = '{0:s}{1:s}#{2:<30s}\n'.format(cr, lay_space, + self._name) + else: + cr = self._get_fixed_cr(0, value=value) + return cr + + def _get_fixed_cr(self, locat, value=None): + fformat = self.format.fortran + if value is None: + value = self.cnstnt + if self.format.binary: + if locat is None: + raise Exception("Util2d._get_fixed_cr(): locat is None but" + \ + "format is binary") + if not self.format.array_free_format: + locat = -1 * np.abs(locat) + if locat is None: + locat = 0 + if locat == 0: + fformat = '' + if self.dtype == np.int32: + cr = '{0:>10.0f}{1:>10.0f}{2:>19s}{3:>10.0f} #{4}\n' \ + .format(locat, value, fformat, + self.iprn, self._name) + elif self._dtype == np.float32: + cr = '{0:>10.0f}{1:>10.5G}{2:>19s}{3:>10.0f} #{4}\n' \ + .format(locat, value, fformat, + self.iprn, self._name) + else: + raise Exception( + 'Util2d: error generating fixed-format control record, ' + 'dtype must be np.int32 or np.float32') + return cr + + def get_internal_cr(self): + if self.format.array_free_format: + cr = 'INTERNAL {0:15} {1:>10s} {2:2.0f} #{3:<30s}\n' \ + .format(self.cnstnt_str, self.format.fortran, self.iprn, + self._name) + return cr + else: + return self._get_fixed_cr(self.locat) + + @property + def cnstnt_str(self): + if isinstance(self.cnstnt, str): + return self.cnstnt + else: + return "{0:15.6G}".format(self.cnstnt) + + def get_openclose_cr(self): + cr = 'OPEN/CLOSE {0:>30s} {1:15} {2:>10s} {3:2.0f} {4:<30s}\n'.format( + self.model_file_path, self.cnstnt_str, + self.format.fortran, self.iprn, + self._name) + return cr + + def get_external_cr(self): + locat = self._model.next_ext_unit() + # if self.format.binary: + # locat = -1 * np.abs(locat) + self._model.add_external(self.model_file_path, locat, + self.format.binary) + if self.format.array_free_format: + cr = 'EXTERNAL {0:>30d} {1:15} {2:>10s} {3:2.0f} {4:<30s}\n'.format( + locat, self.cnstnt_str, + self.format.fortran, self.iprn, + self._name) + return cr + else: + return self._get_fixed_cr(locat) + + def get_file_entry(self, how=None): + + if how is not None: + how = how.lower() + else: + how = self._how + + if not self.format.array_free_format and self.format.free: + print("Util2d {0}: can't be free format...resetting".format( + self._name)) + self.format._isfree = False + + if not self.format.array_free_format and self.how == "internal" and self.locat is None: + print("Util2d {0}: locat is None, but ".format(self._name) + \ + "model does not " + \ + "support free format and how is internal..." + \ + "resetting how = external") + how = "external" + + if (self.format.binary or self._model.external_path) \ + and how in ["constant", "internal"]: + print("Util2d:{0}: ".format(self._name) + \ + "resetting 'how' to external") + if self.format.array_free_format: + how = "openclose" + else: + how = "external" + if how == "internal": + assert not self.format.binary, "Util2d error: 'how' is internal, but" + \ + "format is binary" + cr = self.get_internal_cr() + return cr + self.string + + elif how == "external" or how == "openclose": + if how == "openclose": + assert self.format.array_free_format, "Util2d error: 'how' is openclose," + \ + "but model doesn't support free fmt" + + # write a file if needed + if self.vtype != str: + if self.format.binary: + self.write_bin(self.shape, self.python_file_path, + self._array, + bintype="head") + else: + self.write_txt(self.shape, self.python_file_path, + self._array, + fortran_format=self.format.fortran) + + elif self.__value != self.python_file_path: + if os.path.exists(self.python_file_path): + # if the file already exists, remove it + if self._model.verbose: + print("Util2d warning: removing existing array " + + "file {0}".format(self.model_file_path)) + try: + os.remove(self.python_file_path) + except Exception as e: + raise Exception( + "Util2d: error removing existing file " + \ + self.python_file_path) + # copy the file to the new model location + try: + shutil.copy2(self.__value, self.python_file_path) + except Exception as e: + raise Exception("Util2d.get_file_array(): error copying " + + "{0} to {1}:{2}".format(self.__value, + self.python_file_path, + str(e))) + if how == "external": + return self.get_external_cr() + else: + return self.get_openclose_cr() + + elif how == "constant": + if self.vtype not in [np.int32, np.float32]: + u = np.unique(self._array) + assert u.shape[ + 0] == 1, "Util2d error: 'how' is constant, but array " + \ + "is not uniform" + value = u[0] + else: + value = self.__value + return self.get_constant_cr(value) + + else: + raise Exception("Util2d.get_file_entry() error: " + \ + "unrecognized 'how':{0}".format(how)) + + @property + def string(self): + """ + get the string representation of value attribute + + Note: + the string representation DOES NOT include the effects of the control + record multiplier - this method is used primarily for writing model input files + + """ + # convert array to sting with specified format + a_string = self.array2string(self.shape, self._array, + python_format=self.format.py) + return a_string + + @property + def array(self): + """ + Get the COPY of array representation of value attribute with the + effects of the control record multiplier applied. + + Returns + ------- + array : numpy.ndarray + Copy of the array with the multiplier applied. + + Note + ---- + .array is a COPY of the array representation as seen by the + model - with the effects of the control record multiplier applied. + + """ + if isinstance(self.cnstnt, str): + print("WARNING: cnstnt is str for {0}".format(self.name)) + return self._array.astype(self.dtype) + if isinstance(self.cnstnt, (int, np.int32)): + cnstnt = self.cnstnt + else: + if self.cnstnt == 0.0: + cnstnt = 1.0 + else: + cnstnt = self.cnstnt + # return a copy of self._array since it is being + # multiplied + return (self._array * cnstnt).astype(self._dtype) + + @property + def _array(self): + """ + get the array representation of value attribute + if value is a string or a constant, the array is loaded/built only once + + Note: + the return array representation DOES NOT include the effect of the multiplier + in the control record. To get the array as the model sees it (with the multiplier applied), + use the Util2d.array method. + """ + if self.vtype == str: + if self.__value_built is None: + file_in = open(self.__value, 'r') + + if self.format.binary: + header, self.__value_built = Util2d.load_bin(self.shape, + file_in, + self._dtype, + bintype="head") + else: + self.__value_built = Util2d.load_txt(self.shape, file_in, + self._dtype, + self.format.fortran).astype( + self._dtype) + file_in.close() + return self.__value_built + elif self.vtype != np.ndarray: + if self.__value_built is None: + self.__value_built = np.ones(self.shape, dtype=self._dtype) \ + * self.__value + return self.__value_built + else: + return self.__value + + @staticmethod + def load_block(shape, file_in, dtype): + """Load block format from a MT3D file to a 2-D array + + Parameters + ---------- + shape : tuple of int + Array dimensions (nrow, ncol) + file_in : file or str + Filename or file handle + dtype : np.int32 or np.float32 + + Returns + ------- + 2-D array + """ + if len(shape) != 2: + raise ValueError( + 'Util2d.load_block(): expected 2 dimensions, found shape {0}' + .format(shape)) + nrow, ncol = shape + data = np.ma.zeros(shape, dtype=dtype) + data.mask = True + openfile = not hasattr(file_in, 'read') + if openfile: + file_in = open(file_in, 'r') + line = file_in.readline().strip() + nblock = int(line.split()[0]) + for n in range(nblock): + line = file_in.readline().strip() + raw = line.split() + if len(raw) < 5: + raise ValueError('Util2d.load_block(): expected 5 items, ' + 'found {0}: {1}'.format(len(raw), line)) + i1, i2 = int(raw[0]) - 1, int(raw[1]) + j1, j2 = int(raw[2]) - 1, int(raw[3]) + data[i1:i2, j1:j2] = raw[4] + if openfile: + file_in.close() + if data.mask.any(): + warn('Util2d.load_block(): blocks do not cover full array') + return data.data + + @staticmethod + def load_txt(shape, file_in, dtype, fmtin): + """Load formatted file to a 1-D or 2-D array + + Parameters + ---------- + shape : tuple of int + One or two array dimensions + file_in : file or str + Filename or file handle + dtype : np.int32 or np.float32 + fmtin : str + Fortran array format descriptor, '(FREE)' or e.g. '(10G11.4)' + + Notes + ----- + This method is similar to MODFLOW's U1DREL, U1DINT, U2DREL and U2DINT + subroutines, but only for formatted files. + + Returns + ------- + 1-D or 2-D array + """ + if len(shape) == 1: + num_items = shape[0] + elif len(shape) == 2: + nrow, ncol = shape + num_items = nrow * ncol + else: + raise ValueError( + 'Util2d.load_txt(): expected 1 or 2 dimensions, found shape {0}' + .format(shape)) + openfile = not hasattr(file_in, 'read') + if openfile: + file_in = open(file_in, 'r') + npl, fmt, width, decimal = ArrayFormat.decode_fortran_descriptor(fmtin) + items = [] + while len(items) < num_items: + line = file_in.readline() + if len(line) == 0: + raise ValueError('Util2d.load_txt(): no data found') + if npl == 'free': + if ',' in line: + line = line.replace(',', ' ') + if '*' in line: # use slower method for these types of lines + for item in line.split(): + if '*' in item: + num, val = item.split('*') + # repeat val num times + items += int(num) * [val] + else: + items.append(item) + else: + items += line.split() + else: # fixed width + pos = 0 + for i in range(npl): + try: + item = line[pos:pos + width].strip() + pos += width + if item: + items.append(item) + except IndexError: + break + if openfile: + file_in.close() + data = np.fromiter(items, dtype=dtype, count=num_items) + if data.size != num_items: + raise ValueError('Util2d.load_txt(): expected array size {0},' + ' but found size {1}'.format(num_items, + data.size)) + return data.reshape(shape) + + @staticmethod + def write_txt(shape, file_out, data, fortran_format="(FREE)", + python_format=None): + if fortran_format.upper() == '(FREE)' and python_format is None: + np.savetxt(file_out, np.atleast_2d(data), + ArrayFormat.get_default_numpy_fmt(data.dtype), + delimiter='') + return + if not hasattr(file_out, "write"): + file_out = open(file_out, 'w') + file_out.write( + Util2d.array2string(shape, data, fortran_format=fortran_format, + python_format=python_format)) + + @staticmethod + def array2string(shape, data, fortran_format="(FREE)", + python_format=None): + """ + return a string representation of + a (possibly wrapped format) array from a file + (self.__value) and casts to the proper type (self._dtype) + made static to support the load functionality + this routine now supports fixed format arrays where the numbers + may touch. + """ + if len(shape) == 2: + nrow, ncol = shape + else: + nrow = 1 + ncol = shape[0] + data = np.atleast_2d(data) + if python_format is None: + column_length, fmt, width, decimal = \ + ArrayFormat.decode_fortran_descriptor(fortran_format) + if decimal is None: + output_fmt = '{0}0:{1}{2}{3}'.format('{', width, 'd', '}') + else: + output_fmt = '{0}0:{1}.{2}{3}{4}'.format('{', width, decimal, + fmt, '}') + else: + try: + column_length, output_fmt = int(python_format[0]), \ + python_format[1] + except: + raise Exception('Util2d.write_txt: \nunable to parse' + + 'python_format:\n {0}\n'. + format(python_format) + + ' python_format should be a list with\n' + + ' [column_length, fmt]\n' + + ' e.g., [10, {0:10.2e}]') + if ncol % column_length == 0: + linereturnflag = False + else: + linereturnflag = True + # write the array to a string + s = "" + for i in range(nrow): + icol = 0 + for j in range(ncol): + try: + s = s + output_fmt.format(data[i, j]) + except Exception as e: + raise Exception("error writing array value" + \ + "{0} at r,c [{1},{2}]\n{3}".format( + data[i, j], i, j, str(e))) + if (j + 1) % column_length == 0.0 and (j != 0 or ncol == 1): + s += '\n' + if linereturnflag: + s += '\n' + return s + + @staticmethod + def load_bin(shape, file_in, dtype, bintype=None): + """Load unformatted file to a 2-D array + + Parameters + ---------- + shape : tuple of int + One or two array dimensions + file_in : file or str + Filename or file handle + dtype : np.int32 or np.float32 + Data type of unformatted file and Numpy array; use np.int32 for + Fortran's INTEGER, and np.float32 for Fortran's REAL data types. + bintype : str + Normally 'Head' + + Notes + ----- + This method is similar to MODFLOW's U2DREL and U2DINT subroutines, + but only for unformatted files. + + Returns + ------- + 2-D array + """ + import flopy.utils.binaryfile as bf + nrow, ncol = shape + num_items = nrow * ncol + if dtype != np.int32 and np.issubdtype(dtype, np.integer): + # Modflow only uses 4-byte integers + dtype = np.dtype(dtype) + if dtype.itemsize != 4: + # show warning for platforms where int is not 4-bytes + warn('Util2d: setting integer dtype from {0} to int32' + .format(dtype)) + dtype = np.int32 + openfile = not hasattr(file_in, 'read') + if openfile: + file_in = open(file_in, 'rb') + header_data = None + if bintype is not None and np.issubdtype(dtype, np.floating): + header_dtype = bf.BinaryHeader.set_dtype(bintype=bintype) + header_data = np.fromfile(file_in, dtype=header_dtype, count=1) + data = np.fromfile(file_in, dtype=dtype, count=num_items) + if openfile: + file_in.close() + if data.size != num_items: + raise ValueError('Util2d.load_bin(): expected array size {0},' + ' but found size {1}'.format(num_items, + data.size)) + return header_data, data.reshape(shape) + + @staticmethod + def write_bin(shape, file_out, data, bintype=None, header_data=None): + if not hasattr(file_out, 'write'): + file_out = open(file_out, 'wb') + dtype = data.dtype + if bintype is not None: + if header_data is None: + header_data = BinaryHeader.create(bintype=bintype, + nrow=shape[0], + ncol=shape[1]) + if header_data is not None: + header_data.tofile(file_out) + data.tofile(file_out) + return + + def parse_value(self, value): + """ + parses and casts the raw value into an acceptable format for __value + lot of defense here, so we can make assumptions later + """ + if isinstance(value, list): + value = np.array(value) + + if isinstance(value, bool): + if self._dtype == np.bool: + try: + self.__value = np.bool(value) + + except: + raise Exception('Util2d:could not cast ' + + 'boolean value to type "np.bool": ' + + str(value)) + else: + raise Exception('Util2d:value type is bool, ' + + ' but dtype not set as np.bool') + elif isinstance(value, str): + if os.path.exists(value): + self.__value = value + return + elif self.dtype == np.int32: + try: + self.__value = np.int32(value) + except: + raise Exception("Util2d error: str not a file and " + + "couldn't be cast to int: {0}".format( + value)) + + else: + try: + self.__value = float(value) + except: + raise Exception("Util2d error: str not a file and " + + "couldn't be cast to float: {0}".format( + value)) + + elif np.isscalar(value): + if self.dtype == np.int32: + try: + self.__value = np.int32(value) + except: + raise Exception('Util2d:could not cast scalar ' + + 'value to type "int": ' + str(value)) + elif self._dtype == np.float32: + try: + self.__value = np.float32(value) + except: + raise Exception('Util2d:could not cast ' + + 'scalar value to type "float": ' + + str(value)) + + elif isinstance(value, np.ndarray): + # if value is 3d, but dimension 1 is only length 1, + # then drop the first dimension + if len(value.shape) == 3 and value.shape[0] == 1: + value = value[0] + if self.shape != value.shape: + raise Exception('Util2d:self.shape: ' + str(self.shape) + + ' does not match value.shape: ' + + str(value.shape)) + if self._dtype != value.dtype: + value = value.astype(self._dtype) + self.__value = value + + else: + raise Exception('Util2d:unsupported type in util_array: ' + + str(type(value))) + + @staticmethod + def load(f_handle, model, shape, dtype, name, ext_unit_dict=None, + array_free_format=None, array_format="modflow"): + """ + functionality to load Util2d instance from an existing + model input file. + external and internal record types must be fully loaded + if you are using fixed format record types,make sure + ext_unit_dict has been initialized from the NAM file + """ + if shape == (0, 0): + raise IndexError('No information on model grid dimensions. ' + 'Need nrow, ncol to load a Util2d array.') + curr_unit = None + if ext_unit_dict is not None: + # determine the current file's unit number + cfile = f_handle.name + for cunit in ext_unit_dict: + if cfile == ext_unit_dict[cunit].filename: + curr_unit = cunit + break + + # Allows for special MT3D array reader + # array_format = None + # if hasattr(model, 'array_format'): + # array_format = model.array_format + + cr_dict = Util2d.parse_control_record(f_handle.readline(), + current_unit=curr_unit, + dtype=dtype, + ext_unit_dict=ext_unit_dict, + array_format=array_format) + + if cr_dict['type'] == 'constant': + u2d = Util2d(model, shape, dtype, cr_dict['cnstnt'], name=name, + iprn=cr_dict['iprn'], fmtin="(FREE)", + array_free_format=array_free_format) + + elif cr_dict['type'] == 'open/close': + # clean up the filename a little + fname = cr_dict['fname'] + fname = fname.replace("'", "") + fname = fname.replace('"', '') + fname = fname.replace('\'', '') + fname = fname.replace('\"', '') + fname = fname.replace('\\', os.path.sep) + fname = os.path.join(model.model_ws, fname) + # load_txt(shape, file_in, dtype, fmtin): + assert os.path.exists(fname), "Util2d.load() error: open/close " + \ + "file " + str(fname) + " not found" + if str('binary') not in str(cr_dict['fmtin'].lower()): + f = open(fname, 'r') + data = Util2d.load_txt(shape=shape, + file_in=f, + dtype=dtype, fmtin=cr_dict['fmtin']) + else: + f = open(fname, 'rb') + header_data, data = Util2d.load_bin(shape, f, dtype, + bintype='Head') + f.close() + u2d = Util2d(model, shape, dtype, data, name=name, + iprn=cr_dict['iprn'], fmtin="(FREE)", + cnstnt=cr_dict['cnstnt'], + array_free_format=array_free_format) + + + elif cr_dict['type'] == 'internal': + data = Util2d.load_txt(shape, f_handle, dtype, cr_dict['fmtin']) + u2d = Util2d(model, shape, dtype, data, name=name, + iprn=cr_dict['iprn'], fmtin="(FREE)", + cnstnt=cr_dict['cnstnt'], locat=None, + array_free_format=array_free_format) + + elif cr_dict['type'] == 'external': + ext_unit = ext_unit_dict[cr_dict['nunit']] + if ext_unit.filehandle is None: + raise IOError('cannot read unit {0}, filename: {1}' + .format(cr_dict['nunit'], ext_unit.filename)) + elif 'binary' not in str(cr_dict['fmtin'].lower()): + assert cr_dict['nunit'] in list(ext_unit_dict.keys()) + data = Util2d.load_txt(shape, ext_unit.filehandle, + dtype, cr_dict['fmtin']) + else: + if cr_dict['nunit'] not in list(ext_unit_dict.keys()): + cr_dict["nunit"] *= -1 + assert cr_dict['nunit'] in list(ext_unit_dict.keys()) + header_data, data = Util2d.load_bin( + shape, ext_unit.filehandle, dtype, + bintype='Head') + u2d = Util2d(model, shape, dtype, data, name=name, + iprn=cr_dict['iprn'], fmtin="(FREE)", + cnstnt=cr_dict['cnstnt'], + array_free_format=array_free_format) + # track this unit number so we can remove it from the external + # file list later + model.pop_key_list.append(cr_dict['nunit']) + elif cr_dict['type'] == 'block': + data = Util2d.load_block(shape, f_handle, dtype) + u2d = Util2d(model, shape, dtype, data, name=name, + iprn=cr_dict['iprn'], fmtin="(FREE)", + cnstnt=cr_dict['cnstnt'], locat=None, + array_free_format=array_free_format) + + return u2d + + @staticmethod + def parse_control_record(line, current_unit=None, dtype=np.float32, + ext_unit_dict=None, array_format=None): + """ + parses a control record when reading an existing file + rectifies fixed to free format + current_unit (optional) indicates the unit number of the file being parsed + """ + free_fmt = ['open/close', 'internal', 'external', 'constant'] + raw = line.strip().split() + freefmt, cnstnt, fmtin, iprn, nunit = None, None, None, -1, None + fname = None + isfloat = False + if dtype == np.float or dtype == np.float32: + isfloat = True + # if free format keywords + if str(raw[0].lower()) in str(free_fmt): + freefmt = raw[0].lower() + if raw[0].lower() == 'constant': + if isfloat: + cnstnt = np.float(raw[1].lower().replace('d', 'e')) + else: + cnstnt = np.int(raw[1].lower()) + if raw[0].lower() == 'internal': + if isfloat: + cnstnt = np.float(raw[1].lower().replace('d', 'e')) + else: + cnstnt = np.int(raw[1].lower()) + fmtin = raw[2].strip() + iprn = 0 + if len(raw) >= 4: + iprn = int(raw[3]) + elif raw[0].lower() == 'external': + if ext_unit_dict is not None: + try: + # td = ext_unit_dict[int(raw[1])] + fname = ext_unit_dict[int(raw[1])].filename.strip() + except: + print(' could not determine filename ' + + 'for unit {}'.format(raw[1])) + + nunit = int(raw[1]) + if isfloat: + cnstnt = np.float(raw[2].lower().replace('d', 'e')) + else: + cnstnt = np.int(raw[2].lower()) + fmtin = raw[3].strip() + iprn = 0 + if len(raw) >= 5: + iprn = int(raw[4]) + elif raw[0].lower() == 'open/close': + fname = raw[1].strip() + if isfloat: + cnstnt = np.float(raw[2].lower().replace('d', 'e')) + else: + cnstnt = np.int(raw[2].lower()) + fmtin = raw[3].strip() + iprn = 0 + if len(raw) >= 5: + iprn = int(raw[4]) + npl, fmt, width, decimal = None, None, None, None + else: + locat = np.int(line[0:10].strip()) + if isfloat: + if len(line) >= 20: + cnstnt = np.float( + line[10:20].strip().lower().replace('d', 'e')) + else: + cnstnt = 0.0 + else: + if len(line) >= 20: + cnstnt = np.int(line[10:20].strip()) + else: + cnstnt = 0 + # if cnstnt == 0: + # cnstnt = 1 + if locat != 0: + if len(line) >= 40: + fmtin = line[20:40].strip() + else: + fmtin = '' + try: + iprn = np.int(line[40:50].strip()) + except: + iprn = 0 + # locat = int(raw[0]) + # cnstnt = float(raw[1]) + # fmtin = raw[2].strip() + # iprn = int(raw[3]) + if locat == 0: + freefmt = 'constant' + elif locat < 0: + freefmt = 'external' + nunit = np.int(locat) * -1 + fmtin = '(binary)' + elif locat > 0: + # if the unit number matches the current file, it's internal + if locat == current_unit: + freefmt = 'internal' + else: + freefmt = 'external' + nunit = np.int(locat) + + # Reset for special MT3D control flags + if array_format == 'mt3d': + if locat == 100: + freefmt = 'internal' + nunit = current_unit + elif locat == 101: + freefmt = 'block' + nunit = current_unit + elif locat == 102: + raise NotImplementedError( + 'MT3D zonal format not supported...') + elif locat == 103: + freefmt = 'internal' + nunit = current_unit + fmtin = '(free)' + + cr_dict = {} + cr_dict['type'] = freefmt + cr_dict['cnstnt'] = cnstnt + cr_dict['nunit'] = nunit + cr_dict['iprn'] = iprn + cr_dict['fmtin'] = fmtin + cr_dict['fname'] = fname + return cr_dict diff --git a/flopy/utils/util_list.py b/flopy/utils/util_list.py index a6ca2065b9..33d50a1fe7 100644 --- a/flopy/utils/util_list.py +++ b/flopy/utils/util_list.py @@ -1,1181 +1,1181 @@ -""" -util_list module. Contains the mflist class. - This classes encapsulates modflow-style list inputs away - from the individual packages. The end-user should not need to - instantiate this class directly. - - some more info - -""" -from __future__ import division, print_function - -import os -import warnings -import numpy as np -from ..datbase import DataInterface, DataListInterface, DataType -from ..utils.recarray_utils import create_empty_recarray - -try: - from numpy.lib import NumpyVersion - numpy114 = NumpyVersion(np.__version__) >= '1.14.0' -except ImportError: - numpy114 = False - - -class MfList(DataInterface, DataListInterface): - """ - a generic object for handling transient boundary condition lists - - Parameters - ---------- - package : package object - The package object (of type :class:`flopy.pakbase.Package`) to which - this MfList will be added. - data : varies - the data of the transient list (optional). (the default is None) - - Attributes - ---------- - mxact : int - the max number of active bc for any stress period - - Methods - ------- - add_record(kper,index,value) : None - add a record to stress period kper at index location - write_transient(f) : None - write the transient sequence to the model input file f - check_kij() : None - checks for boundaries outside of model domain - issues warnings only - - See Also - -------- - - Notes - ----- - - Examples - -------- - - """ - - def __init__(self, package, data=None, dtype=None, model=None, - list_free_format=None, binary=False): - - if isinstance(data, MfList): - for attr in data.__dict__.items(): - setattr(self, attr[0], attr[1]) - if model is None: - self._model = package.parent - else: - self._model = model - self._package = package - return - - self._package = package - if model is None: - self._model = package.parent - else: - self._model = model - if dtype is None: - assert isinstance(self.package.dtype, np.dtype) - self.__dtype = self.package.dtype - else: - self.__dtype = dtype - self.__binary = binary - self.__vtype = {} - self.__data = {} - if data is not None: - self.__cast_data(data) - self.__df = None - if list_free_format is None: - if package.parent.version == "mf2k": - list_free_format = False - self.list_free_format = list_free_format - return - - @property - def name(self): - return self.package.name - - @property - def mg(self): - return self._model.modelgrid - - @property - def sr(self): - return self.mg.sr - - @property - def model(self): - return self._model - - @property - def package(self): - return self._package - - @property - def data_type(self): - return DataType.transientlist - - @property - def plotable(self): - return True - - def get_empty(self, ncell=0): - d = create_empty_recarray(ncell, self.dtype, default_value=-1.0E+10) - return d - - def export(self, f, **kwargs): - from flopy import export - return export.utils.mflist_export(f, self, **kwargs) - - def append(self, other): - """ append the recarrays from one MfList to another - Parameters - ---------- - other: variable: an item that can be cast in to an MfList - that corresponds with self - Returns - ------- - dict of {kper:recarray} - """ - if not isinstance(other, MfList): - other = MfList(self.package, data=other, dtype=self.dtype, - model=self._model, - list_free_format=self.list_free_format) - msg = "MfList.append(): other arg must be " + \ - "MfList or dict, not {0}".format(type(other)) - assert isinstance(other, MfList), msg - - other_kpers = list(other.data.keys()) - other_kpers.sort() - - self_kpers = list(self.data.keys()) - self_kpers.sort() - - new_dict = {} - for kper in range(self._model.nper): - other_data = other[kper].copy() - self_data = self[kper].copy() - - other_len = other_data.shape[0] - self_len = self_data.shape[0] - - if (other_len == 0 and self_len == 0) or \ - (kper not in self_kpers and kper not in other_kpers): - continue - elif self_len == 0: - new_dict[kper] = other_data - elif other_len == 0: - new_dict[kper] = self_data - else: - new_len = other_data.shape[0] + self_data.shape[0] - new_data = np.recarray(new_len, dtype=self.dtype) - new_data[:self_len] = self_data - new_data[self_len:self_len + other_len] = other_data - new_dict[kper] = new_data - - - return new_dict - - def drop(self, fields): - """drop fields from an MfList - - Parameters - ---------- - fields : list or set of field names to drop - - Returns - ------- - dropped : MfList without the dropped fields - """ - if not isinstance(fields, list): - fields = [fields] - names = [n for n in self.dtype.names if n not in fields] - dtype = np.dtype( - [(k, d) for k, d in self.dtype.descr if k not in fields]) - spd = {} - for k, v in self.data.items(): - # because np 1.9 doesn't support indexing by list of columns - newarr = np.array([self.data[k][n] for n in names]).transpose() - newarr = np.array(list(map(tuple, newarr)), dtype=dtype).view( - np.recarray) - for n in dtype.names: - newarr[n] = self.data[k][n] - spd[k] = newarr - return MfList(self.package, spd, dtype=dtype) - - @property - def data(self): - return self.__data - - @property - def df(self): - if self.__df is None: - self.__df = self.get_dataframe() - return self.__df - - @property - def vtype(self): - return self.__vtype - - @property - def dtype(self): - return self.__dtype - - # Get the itmp for a given kper - def get_itmp(self, kper): - if kper not in list(self.__data.keys()): - return None - if self.__vtype[kper] is None: - return -1 - # If an external file, have to load it - if self.__vtype[kper] == str: - return self.__fromfile(self.__data[kper]).shape[0] - if self.__vtype[kper] == np.recarray: - return self.__data[kper].shape[0] - # If not any of the above, it must be an int - return self.__data[kper] - - @property - def mxact(self): - mxact = 0 - for kper in list(self.__data.keys()): - mxact = max(mxact, self.get_itmp(kper)) - return mxact - - @property - def fmt_string(self): - """Returns a C-style fmt string for numpy savetxt that corresponds to - the dtype""" - if self.list_free_format is not None: - use_free = self.list_free_format - else: - use_free = True - if self.package.parent.has_package('bas6'): - use_free = self.package.parent.bas6.ifrefm - # mt3d list data is fixed format - if 'mt3d' in self.package.parent.version.lower(): - use_free = False - fmts = [] - for field in self.dtype.descr: - vtype = field[1][1].lower() - if vtype in ('i', 'b'): - if use_free: - fmts.append('%9d') - else: - fmts.append('%10d') - elif vtype == 'f': - if use_free: - if numpy114: - # Use numpy's floating-point formatter (Dragon4) - fmts.append('%15s') - else: - fmts.append('%15.7E') - else: - fmts.append('%10G') - elif vtype == 'o': - if use_free: - fmts.append('%9s') - else: - fmts.append('%10s') - elif vtype == 's': - msg = ("MfList.fmt_string error: 'str' type found in dtype. " - "This gives unpredictable results when " - "recarray to file - change to 'object' type") - raise TypeError(msg) - else: - raise TypeError("MfList.fmt_string error: unknown vtype in " - "field: {}".format(field)) - if use_free: - fmt_string = ' ' + ' '.join(fmts) - else: - fmt_string = ''.join(fmts) - return fmt_string - - # Private method to cast the data argument - # Should only be called by the constructor - def __cast_data(self, data): - # If data is a list, then all we can do is try to cast it to - # an ndarray, then cast again to a recarray - if isinstance(data, list): - # warnings.warn("MfList casting list to array") - try: - data = np.array(data) - except Exception as e: - raise Exception("MfList error: casting list to ndarray: " + \ - str(e)) - - # If data is a dict, the we have to assume it is keyed on kper - if isinstance(data, dict): - if not list(data.keys()): - raise Exception("MfList error: data dict is empty") - for kper, d in data.items(): - try: - kper = int(kper) - except Exception as e: - raise Exception("MfList error: data dict key " + \ - "{0:s} not integer: ".format(kper) + \ - str(type(kper)) + "\n" + str(e)) - # Same as before, just try... - if isinstance(d, list): - # warnings.warn("MfList: casting list to array at " +\ - # "kper {0:d}".format(kper)) - try: - d = np.array(d) - except Exception as e: - raise Exception("MfList error: casting list " + \ - "to ndarray: " + str(e)) - - # super hack - sick of recarrays already - # if (isinstance(d,np.ndarray) and len(d.dtype.fields) > 1): - # d = d.view(np.recarray) - - if isinstance(d, np.recarray): - self.__cast_recarray(kper, d) - elif isinstance(d, np.ndarray): - self.__cast_ndarray(kper, d) - elif isinstance(d, int): - self.__cast_int(kper, d) - elif isinstance(d, str): - self.__cast_str(kper, d) - elif d is None: - self.__data[kper] = -1 - self.__vtype[kper] = None - else: - raise Exception("MfList error: unsupported data type: " + - str(type(d)) + " at kper " + - "{0:d}".format(kper)) - - # A single recarray - same MfList for all stress periods - elif isinstance(data, np.recarray): - self.__cast_recarray(0, data) - # A single ndarray - elif isinstance(data, np.ndarray): - self.__cast_ndarray(0, data) - # A single filename - elif isinstance(data, str): - self.__cast_str(0, data) - else: - raise Exception("MfList error: unsupported data type: " + \ - str(type(data))) - - def __cast_str(self, kper, d): - # If d is a string, assume it is a filename and check that it exists - assert os.path.exists(d), "MfList error: dict filename (string) \'" + \ - d + "\' value for " + \ - "kper {0:d} not found".format(kper) - self.__data[kper] = d - self.__vtype[kper] = str - - def __cast_int(self, kper, d): - # If d is an integer, then it must be 0 or -1 - if d > 0: - raise Exception("MfList error: dict integer value for " - "kper {0:10d} must be 0 or -1, " - "not {1:10d}".format(kper, d)) - if d == 0: - self.__data[kper] = 0 - self.__vtype[kper] = None - else: - self.__data[kper] = -1 - self.__vtype[kper] = None - - def __cast_recarray(self, kper, d): - assert d.dtype == self.__dtype, "MfList error: recarray dtype: " + \ - str(d.dtype) + " doesn't match " + \ - "self dtype: " + str(self.dtype) - self.__data[kper] = d - self.__vtype[kper] = np.recarray - - def __cast_ndarray(self, kper, d): - d = np.atleast_2d(d) - if d.dtype != self.__dtype: - assert d.shape[1] == len(self.dtype), "MfList error: ndarray " + \ - "shape " + str(d.shape) + \ - " doesn't match dtype " + \ - "len: " + \ - str(len(self.dtype)) - # warnings.warn("MfList: ndarray dtype does not match self " +\ - # "dtype, trying to cast") - try: - self.__data[kper] = np.core.records.fromarrays(d.transpose(), - dtype=self.dtype) - except Exception as e: - raise Exception("MfList error: casting ndarray to recarray: " + \ - str(e)) - self.__vtype[kper] = np.recarray - - def get_dataframe(self, squeeze=True): - """ - Cast recarrays for stress periods into single - dataframe containing all stress periods. - - Parameters - ---------- - squeeze : bool - Reduce number of columns in dataframe to only include - stress periods where a variable changes. - - Returns - ------- - df : dataframe - Dataframe of shape nrow = ncells, ncol = nvar x nper. If - the squeeze option is chosen, nper is the number of - stress periods where at least one cells is different, - otherwise it is equal to the number of keys in MfList.data. - - Notes - ----- - Requires pandas. - - """ - try: - import pandas as pd - except Exception as e: - msg = 'MfList.get_dataframe() requires pandas' - raise ImportError(msg) - - # make a dataframe of all data for all stress periods - names = ['k', 'i', 'j'] - if 'MNW2' in self.package.name: - names += ['wellid'] - - # find relevant variable names - # may have to iterate over the first stress period - for per in range(self._model.nper): - if hasattr(self.data[per], 'dtype'): - varnames = list([n for n in self.data[per].dtype.names - if n not in names]) - break - - # create list of dataframes for each stress period - # each with index of k, i, j - dfs = [] - for per in self.data.keys(): - recs = self.data[per] - if recs is None or len(recs) == 0: - # add an empty dataframe if a stress period is - # empty (e.g. no pumping during a predevelopment - # period) - columns = names + list(['{}{}'.format(c, per) - for c in varnames]) - dfi = pd.DataFrame(data=None, columns=columns) - dfi = dfi.set_index(names) - else: - dfi = pd.DataFrame.from_records(recs) - dfg = dfi.groupby(names) - count = dfg[varnames[0]].count().rename('n') - if (count > 1).values.any(): - print("Duplicated list entry locations aggregated " - "for kper {}".format(per)) - for kij in count[count > 1].index.values: - print(" (k,i,j) {}".format(kij)) - dfi = dfg.sum() # aggregate - dfi.columns = list(['{}{}'.format(c, per) for c in varnames]) - dfs.append(dfi) - df = pd.concat(dfs, axis=1) - if squeeze: - keep = [] - for var in varnames: - diffcols = list([n for n in df.columns if var in n]) - diff = df[diffcols].fillna(0).diff(axis=1) - diff['{}0'.format( - var)] = 1 # always return the first stress period - changed = diff.sum(axis=0) != 0 - keep.append(df.loc[:, changed.index[changed]]) - df = pd.concat(keep, axis=1) - df = df.reset_index() - df.insert(len(names), 'node', df.i * self._model.ncol + df.j) - return df - - def add_record(self, kper, index, values): - # Add a record to possible already set list for a given kper - # index is a list of k,i,j or nodes. - # values is a list of floats. - # The length of index + values must be equal to the number of names - # in dtype - assert len(index) + len(values) == len(self.dtype), \ - "MfList.add_record() error: length of index arg +" + \ - "length of value arg != length of self dtype" - # If we already have something for this kper, then add to it - if kper in list(self.__data.keys()): - if self.vtype[kper] == int: - # If a 0 or -1, reset - self.__data[kper] = self.get_empty(1) - self.__vtype[kper] = np.recarray - elif self.vtype[kper] == str: - # If filename, load into recarray - d = self.__fromfile(self.data[kper]) - d.resize(d.shape[0], d.shape[1]) - self.__data[kper] = d - self.__vtype[kper] = np.recarray - elif self.vtype[kper] == np.recarray: - # Extend the recarray - self.__data[kper] = np.append( - self.__data[kper], self.get_empty(1)) - else: - self.__data[kper] = self.get_empty(1) - self.__vtype[kper] = np.recarray - rec = list(index) - rec.extend(list(values)) - try: - self.__data[kper][-1] = tuple(rec) - except Exception as e: - raise Exception("MfList.add_record() error: adding record to " + \ - "recarray: " + str(e)) - - def __getitem__(self, kper): - # Get the recarray for a given kper - # If the data entry for kper is a string, - # return the corresponding recarray, - # but don't reset the value in the data dict - # assert kper in list(self.data.keys()), "MfList.__getitem__() kper " + \ - # str(kper) + " not in data.keys()" - try: - kper = int(kper) - except Exception as e: - raise Exception( - "MfList error: _getitem__() passed invalid kper index:" - + str(kper)) - if kper not in list(self.data.keys()): - if kper == 0: - return self.get_empty() - else: - return self.data[self.__find_last_kper(kper)] - if self.vtype[kper] == int: - if self.data[kper] == 0: - return self.get_empty() - else: - return self.data[self.__find_last_kper(kper)] - if self.vtype[kper] == str: - return self.__fromfile(self.data[kper]) - if self.vtype[kper] == np.recarray: - return self.data[kper] - - def __setitem__(self, kper, data): - if kper in list(self.__data.keys()): - if self._model.verbose: - print('removing existing data for kper={}'.format(kper)) - self.data.pop(kper) - # If data is a list, then all we can do is try to cast it to - # an ndarray, then cast again to a recarray - if isinstance(data, list): - # warnings.warn("MfList casting list to array") - try: - data = np.array(data) - except Exception as e: - raise Exception("MfList error: casting list to ndarray: " + \ - str(e)) - # cast data - if isinstance(data, int): - self.__cast_int(kper, data) - elif isinstance(data, np.recarray): - self.__cast_recarray(kper, data) - # A single ndarray - elif isinstance(data, np.ndarray): - self.__cast_ndarray(kper, data) - # A single filename - elif isinstance(data, str): - self.__cast_str(kper, data) - else: - raise Exception("MfList error: unsupported data type: " + \ - str(type(data))) - - # raise NotImplementedError("MfList.__setitem__() not implemented") - - def __fromfile(self, f): - # d = np.fromfile(f,dtype=self.dtype,count=count) - try: - d = np.genfromtxt(f, dtype=self.dtype) - except Exception as e: - raise Exception("MfList.__fromfile() error reading recarray " + - "from file " + str(e)) - return d - - def get_filenames(self): - kpers = list(self.data.keys()) - kpers.sort() - filenames = [] - first = kpers[0] - for kper in list(range(0, max(self._model.nper, max(kpers) + 1))): - # Fill missing early kpers with 0 - if (kper < first): - itmp = 0 - kper_vtype = int - elif (kper in kpers): - kper_vtype = self.__vtype[kper] - - if self._model.array_free_format and self._model.external_path is\ - not None: - # py_filepath = '' - # py_filepath = os.path.join(py_filepath, - # self._model.external_path) - filename = self.package.name[0] + "_{0:04d}.dat".format(kper) - filenames.append(filename) - return filenames - - def get_filename(self, kper): - ext = "dat" - if self.binary: - ext = 'bin' - return self.package.name[0] + '_{0:04d}.{1}'.format(kper, ext) - - @property - def binary(self): - return bool(self.__binary) - - def write_transient(self, f, single_per=None, forceInternal=False): - # forceInternal overrides isExternal (set below) for cases where - # external arrays are not supported (oh hello MNW1!) - # write the transient sequence described by the data dict - nr, nc, nl, nper = self._model.get_nrow_ncol_nlay_nper() - assert hasattr(f, "read"), "MfList.write() error: " + \ - "f argument must be a file handle" - kpers = list(self.data.keys()) - kpers.sort() - first = kpers[0] - if single_per is None: - loop_over_kpers = list(range(0, max(nper, max(kpers) + 1))) - else: - if not isinstance(single_per, list): - single_per = [single_per] - loop_over_kpers = single_per - - for kper in loop_over_kpers: - # Fill missing early kpers with 0 - if kper < first: - itmp = 0 - kper_vtype = int - elif kper in kpers: - kper_data = self.__data[kper] - kper_vtype = self.__vtype[kper] - if (kper_vtype == str): - if (not self._model.array_free_format): - kper_data = self.__fromfile(kper_data) - kper_vtype = np.recarray - itmp = self.get_itmp(kper) - if kper_vtype == np.recarray: - itmp = kper_data.shape[0] - elif (kper_vtype == int) or (kper_vtype is None): - itmp = kper_data - # Fill late missing kpers with -1 - else: - itmp = -1 - kper_vtype = int - - f.write(" {0:9d} {1:9d} # stress period {2:d}\n" - .format(itmp, 0, kper + 1)) - - isExternal = False - if self._model.array_free_format and \ - self._model.external_path is not None and \ - forceInternal is False: - isExternal = True - if self.__binary: - isExternal = True - if isExternal: - if kper_vtype == np.recarray: - py_filepath = '' - if self._model.model_ws is not None: - py_filepath = self._model.model_ws - if self._model.external_path is not None: - py_filepath = os.path.join(py_filepath, - self._model.external_path) - filename = self.get_filename(kper) - py_filepath = os.path.join(py_filepath, filename) - model_filepath = filename - if self._model.external_path is not None: - model_filepath = os.path.join( - self._model.external_path, - filename) - self.__tofile(py_filepath, kper_data) - kper_vtype = str - kper_data = model_filepath - - if kper_vtype == np.recarray: - name = f.name - if self.__binary or not numpy114: - f.close() - # switch file append mode to binary - with open(name, 'ab+') as f: - self.__tofile(f, kper_data) - # continue back to non-binary - f = open(name, 'a') - else: - self.__tofile(f, kper_data) - elif kper_vtype == str: - f.write(' open/close ' + kper_data) - if self.__binary: - f.write(' (BINARY)') - f.write('\n') - - def __tofile(self, f, data): - # Write the recarray (data) to the file (or file handle) f - assert isinstance(data, np.recarray), "MfList.__tofile() data arg " + \ - "not a recarray" - - # Add one to the kij indices - lnames = [name.lower() for name in self.dtype.names] - # --make copy of data for multiple calls - d = data.copy() - for idx in ['k', 'i', 'j', 'node']: - if idx in lnames: - d[idx] += 1 - if self.__binary: - dtype2 = [] - for name in self.dtype.names: - dtype2.append((name, np.float32)) - dtype2 = np.dtype(dtype2) - d = np.array(d, dtype=dtype2) - d.tofile(f) - else: - np.savetxt(f, d, fmt=self.fmt_string, delimiter='') - - def check_kij(self): - names = self.dtype.names - if ('k' not in names) or ('i' not in names) or ('j' not in names): - warnings.warn("MfList.check_kij(): index fieldnames \'k,i,j\' " + - "not found in self.dtype names: " + str(names)) - return - nr, nc, nl, nper = self._model.get_nrow_ncol_nlay_nper() - if nl == 0: - warnings.warn("MfList.check_kij(): unable to get dis info from " + - "model") - return - for kper in list(self.data.keys()): - out_idx = [] - data = self[kper] - if data is not None: - k = data['k'] - k_idx = np.where(np.logical_or(k < 0, k >= nl)) - if k_idx[0].shape[0] > 0: - out_idx.extend(list(k_idx[0])) - i = data['i'] - i_idx = np.where(np.logical_or(i < 0, i >= nr)) - if i_idx[0].shape[0] > 0: - out_idx.extend(list(i_idx[0])) - j = data['j'] - j_idx = np.where(np.logical_or(j < 0, j >= nc)) - if j_idx[0].shape[0]: - out_idx.extend(list(j_idx[0])) - - if len(out_idx) > 0: - warn_str = "MfList.check_kij(): warning the following " + \ - "indices are out of bounds in kper " + \ - str(kper) + ':\n' - for idx in out_idx: - d = data[idx] - warn_str += " {0:9d} {1:9d} {2:9d}\n".format( - d['k'] + 1, d['i'] + 1, d['j'] + 1) - warnings.warn(warn_str) - - def __find_last_kper(self, kper): - kpers = list(self.data.keys()) - kpers.sort() - last = 0 - for kkper in kpers[::-1]: - # if this entry is valid - if self.vtype[kkper] != int or self.data[kkper] != -1: - last = kkper - if kkper <= kper: - break - return kkper - - def get_indices(self): - """ - a helper function for plotting - get all unique indices - """ - names = self.dtype.names - lnames = [] - [lnames.append(name.lower()) for name in names] - if 'k' not in lnames or 'j' not in lnames: - raise NotImplementedError("MfList.get_indices requires kij") - kpers = list(self.data.keys()) - kpers.sort() - indices = [] - for i, kper in enumerate(kpers): - kper_vtype = self.__vtype[kper] - if (kper_vtype != int) or (kper_vtype is not None): - d = self.data[kper] - if not indices: - indices = list(zip(d['k'], d['i'], d['j'])) - else: - new_indices = list(zip(d['k'], d['i'], d['j'])) - for ni in new_indices: - if ni not in indices: - indices.append(ni) - return indices - - def attribute_by_kper(self, attr, function=np.mean, idx_val=None): - assert attr in self.dtype.names - if idx_val is not None: - assert idx_val[0] in self.dtype.names - kpers = list(self.data.keys()) - kpers.sort() - values = [] - for kper in range(0, max(self._model.nper, max(kpers))): - - if kper < min(kpers): - values.append(0) - elif kper > max(kpers) or kper not in kpers: - values.append(values[-1]) - else: - kper_data = self.__data[kper] - if idx_val is not None: - kper_data = kper_data[ - np.where(kper_data[idx_val[0]] == idx_val[1])] - # kper_vtype = self.__vtype[kper] - v = function(kper_data[attr]) - values.append(v) - return values - - def plot(self, key=None, names=None, kper=0, - filename_base=None, file_extension=None, mflay=None, - **kwargs): - """ - Plot stress period boundary condition (MfList) data for a specified - stress period - - Parameters - ---------- - key : str - MfList dictionary key. (default is None) - names : list - List of names for figure titles. (default is None) - kper : int - MODFLOW zero-based stress period number to return. (default is zero) - filename_base : str - Base file name that will be used to automatically generate file - names for output image files. Plots will be exported as image - files if file_name_base is not None. (default is None) - file_extension : str - Valid matplotlib.pyplot file extension for savefig(). Only used - if filename_base is not None. (default is 'png') - mflay : int - MODFLOW zero-based layer number to return. If None, then all - all layers will be included. (default is None) - **kwargs : dict - axes : list of matplotlib.pyplot.axis - List of matplotlib.pyplot.axis that will be used to plot - data for each layer. If axes=None axes will be generated. - (default is None) - pcolor : bool - Boolean used to determine if matplotlib.pyplot.pcolormesh - plot will be plotted. (default is True) - colorbar : bool - Boolean used to determine if a color bar will be added to - the matplotlib.pyplot.pcolormesh. Only used if pcolor=True. - (default is False) - inactive : bool - Boolean used to determine if a black overlay in inactive - cells in a layer will be displayed. (default is True) - contour : bool - Boolean used to determine if matplotlib.pyplot.contour - plot will be plotted. (default is False) - clabel : bool - Boolean used to determine if matplotlib.pyplot.clabel - will be plotted. Only used if contour=True. (default is False) - grid : bool - Boolean used to determine if the model grid will be plotted - on the figure. (default is False) - masked_values : list - List of unique values to be excluded from the plot. - - Returns - ---------- - out : list - Empty list is returned if filename_base is not None. Otherwise - a list of matplotlib.pyplot.axis is returned. - - See Also - -------- - - Notes - ----- - - Examples - -------- - >>> import flopy - >>> ml = flopy.modflow.Modflow.load('test.nam') - >>> ml.wel.stress_period_data.plot(ml.wel, kper=1) - - """ - - from flopy.plot import PlotUtilities - axes = PlotUtilities._plot_mflist_helper(self, key=key, names=names, - kper=kper, - filename_base=filename_base, - file_extension=file_extension, - mflay=mflay, - **kwargs) - - return axes - - def to_shapefile(self, filename, kper=None): - """ - Export stress period boundary condition (MfList) data for a specified - stress period - - Parameters - ---------- - filename : str - Shapefile name to write - kper : int - MODFLOW zero-based stress period number to return. (default is None) - - Returns - ---------- - None - - See Also - -------- - - Notes - ----- - - Examples - -------- - >>> import flopy - >>> ml = flopy.modflow.Modflow.load('test.nam') - >>> ml.wel.to_shapefile('test_hk.shp', kper=1) - """ - import warnings - warnings.warn( - "Deprecation warning: to_shapefile() is deprecated. use .export()") - - # if self.sr is None: - # raise Exception("MfList.to_shapefile: SpatialReference not set") - # import flopy.utils.flopy_io as fio - # if kper is None: - # keys = self.data.keys() - # keys.sort() - # else: - # keys = [kper] - # array_dict = {} - # for kk in keys: - # arrays = self.to_array(kk) - # for name, array in arrays.items(): - # for k in range(array.shape[0]): - # #aname = name+"{0:03d}_{1:02d}".format(kk, k) - # n = fio.shape_attr_name(name, length=4) - # aname = "{}{:03d}{:03d}".format(n, k+1, int(kk)+1) - # array_dict[aname] = array[k] - # fio.write_grid_shapefile(filename, self.sr, array_dict) - self.export(filename, kper=kper) - - def to_array(self, kper=0, mask=False): - """ - Convert stress period boundary condition (MfList) data for a - specified stress period to a 3-D numpy array - - Parameters - ---------- - kper : int - MODFLOW zero-based stress period number to return. (default is zero) - mask : boolean - return array with np.NaN instead of zero - Returns - ---------- - out : dict of numpy.ndarrays - Dictionary of 3-D numpy arrays containing the stress period data for - a selected stress period. The dictionary keys are the MfList dtype - names for the stress period data ('cond', 'flux', 'bhead', etc.). - - See Also - -------- - - Notes - ----- - - Examples - -------- - >>> import flopy - >>> ml = flopy.modflow.Modflow.load('test.nam') - >>> v = ml.wel.stress_period_data.to_array(kper=1) - - """ - i0 = 3 - unstructured = False - if 'inode' in self.dtype.names: - raise NotImplementedError() - - if 'node' in self.dtype.names: - if 'i' not in self.dtype.names and\ - "j" not in self.dtype.names: - i0 = 1 - unstructured = True - - arrays = {} - for name in self.dtype.names[i0:]: - if not self.dtype.fields[name][0] == object: - if unstructured: - arr = np.zeros((self._model.nlay * self._model.ncpl,)) - else: - arr = np.zeros((self._model.nlay, self._model.nrow, - self._model.ncol)) - arrays[name] = arr.copy() - - # if this kper is not found - if kper not in self.data.keys(): - kpers = list(self.data.keys()) - kpers.sort() - # if this kper is before the first entry, - # (maybe) mask and return - if kper < kpers[0]: - if mask: - for name, arr in arrays.items(): - arrays[name][:] = np.NaN - return arrays - # find the last kper - else: - kper = self.__find_last_kper(kper) - - sarr = self.data[kper] - - if np.isscalar(sarr): - # if there are no entries for this kper - if sarr == 0: - if mask: - for name, arr in arrays.items(): - arrays[name][:] = np.NaN - return arrays - else: - raise Exception("MfList: something bad happened") - - for name, arr in arrays.items(): - if unstructured: - cnt = np.zeros((self._model.nlay * self._model.ncpl,), - dtype=np.float) - else: - cnt = np.zeros( - (self._model.nlay, self._model.nrow, self._model.ncol), - dtype=np.float) - #print(name,kper) - for rec in sarr: - if unstructured: - arr[rec['node']] += rec[name] - cnt[rec['node']] += 1. - else: - arr[rec['k'], rec['i'], rec['j']] += rec[name] - cnt[rec['k'], rec['i'], rec['j']] += 1. - # average keys that should not be added - if name not in ('cond', 'flux'): - idx = cnt > 0. - arr[idx] /= cnt[idx] - if mask: - arr = np.ma.masked_where(cnt == 0., arr) - arr[cnt == 0.] = np.NaN - - arrays[name] = arr.copy() - # elif mask: - # for name, arr in arrays.items(): - # arrays[name][:] = np.NaN - return arrays - - @property - def masked_4D_arrays(self): - # get the first kper - arrays = self.to_array(kper=0, mask=True) - - # initialize these big arrays - m4ds = {} - for name, array in arrays.items(): - m4d = np.zeros((self._model.nper, self._model.nlay, - self._model.nrow, self._model.ncol)) - m4d[0, :, :, :] = array - m4ds[name] = m4d - for kper in range(1, self._model.nper): - arrays = self.to_array(kper=kper, mask=True) - for name, array in arrays.items(): - m4ds[name][kper, :, :, :] = array - return m4ds - - def masked_4D_arrays_itr(self): - # get the first kper - arrays = self.to_array(kper=0, mask=True) - - # initialize these big arrays - for name, array in arrays.items(): - m4d = np.zeros((self._model.nper, self._model.nlay, - self._model.nrow, self._model.ncol)) - m4d[0, :, :, :] = array - for kper in range(1, self._model.nper): - arrays = self.to_array(kper=kper, mask=True) - for tname, array in arrays.items(): - if tname == name: - m4d[kper, :, :, :] = array - yield name, m4d - - @property - def array(self): - return self.masked_4D_arrays - - @classmethod - def from_4d(cls, model, pak_name, m4ds): - """construct an MfList instance from a dict of - (attribute_name,masked 4D ndarray - Parameters - ---------- - model : mbase derived type - pak_name : str package name (e.g GHB) - m4ds : {attribute name:4d masked numpy.ndarray} - Returns - ------- - MfList instance - """ - sp_data = MfList.masked4D_arrays_to_stress_period_data( - model.get_package(pak_name).get_default_dtype(), m4ds) - return cls(model.get_package(pak_name), data=sp_data) - - @staticmethod - def masked4D_arrays_to_stress_period_data(dtype, m4ds): - """ convert a dictionary of 4-dim masked arrays to - a stress_period_data style dict of recarray - Parameters - ---------- - dtype : numpy dtype - - m4ds : dict {name:masked numpy 4-dim ndarray} - Returns - ------- - dict {kper:recarray} - """ - assert isinstance(m4ds, dict) - for name, m4d in m4ds.items(): - assert isinstance(m4d, np.ndarray) - assert name in dtype.names - assert m4d.ndim == 4 - keys = list(m4ds.keys()) - - for i1, key1 in enumerate(keys): - a1 = np.isnan(m4ds[key1]) - for i2, key2 in enumerate(keys[i1:]): - a2 = np.isnan(m4ds[key2]) - if not np.array_equal(a1, a2): - raise Exception("Transient2d error: masking not equal" + \ - " for {0} and {1}".format(key1, key2)) - - sp_data = {} - for kper in range(m4d.shape[0]): - vals = {} - for name, m4d in m4ds.items(): - arr = m4d[kper, :, :, :] - isnan = np.argwhere(~np.isnan(arr)) - v = [] - for k, i, j in isnan: - v.append(arr[k, i, j]) - vals[name] = v - kk = isnan[:, 0] - ii = isnan[:, 1] - jj = isnan[:, 2] - - spd = np.recarray(shape=isnan.shape[0], dtype=dtype) - spd["i"] = ii - spd["k"] = kk - spd["j"] = jj - for n, v in vals.items(): - spd[n] = v - sp_data[kper] = spd - return sp_data +""" +util_list module. Contains the mflist class. + This classes encapsulates modflow-style list inputs away + from the individual packages. The end-user should not need to + instantiate this class directly. + + some more info + +""" +from __future__ import division, print_function + +import os +import warnings +import numpy as np +from ..datbase import DataInterface, DataListInterface, DataType +from ..utils.recarray_utils import create_empty_recarray + +try: + from numpy.lib import NumpyVersion + numpy114 = NumpyVersion(np.__version__) >= '1.14.0' +except ImportError: + numpy114 = False + + +class MfList(DataInterface, DataListInterface): + """ + a generic object for handling transient boundary condition lists + + Parameters + ---------- + package : package object + The package object (of type :class:`flopy.pakbase.Package`) to which + this MfList will be added. + data : varies + the data of the transient list (optional). (the default is None) + + Attributes + ---------- + mxact : int + the max number of active bc for any stress period + + Methods + ------- + add_record(kper,index,value) : None + add a record to stress period kper at index location + write_transient(f) : None + write the transient sequence to the model input file f + check_kij() : None + checks for boundaries outside of model domain - issues warnings only + + See Also + -------- + + Notes + ----- + + Examples + -------- + + """ + + def __init__(self, package, data=None, dtype=None, model=None, + list_free_format=None, binary=False): + + if isinstance(data, MfList): + for attr in data.__dict__.items(): + setattr(self, attr[0], attr[1]) + if model is None: + self._model = package.parent + else: + self._model = model + self._package = package + return + + self._package = package + if model is None: + self._model = package.parent + else: + self._model = model + if dtype is None: + assert isinstance(self.package.dtype, np.dtype) + self.__dtype = self.package.dtype + else: + self.__dtype = dtype + self.__binary = binary + self.__vtype = {} + self.__data = {} + if data is not None: + self.__cast_data(data) + self.__df = None + if list_free_format is None: + if package.parent.version == "mf2k": + list_free_format = False + self.list_free_format = list_free_format + return + + @property + def name(self): + return self.package.name + + @property + def mg(self): + return self._model.modelgrid + + @property + def sr(self): + return self.mg.sr + + @property + def model(self): + return self._model + + @property + def package(self): + return self._package + + @property + def data_type(self): + return DataType.transientlist + + @property + def plotable(self): + return True + + def get_empty(self, ncell=0): + d = create_empty_recarray(ncell, self.dtype, default_value=-1.0E+10) + return d + + def export(self, f, **kwargs): + from flopy import export + return export.utils.mflist_export(f, self, **kwargs) + + def append(self, other): + """ append the recarrays from one MfList to another + Parameters + ---------- + other: variable: an item that can be cast in to an MfList + that corresponds with self + Returns + ------- + dict of {kper:recarray} + """ + if not isinstance(other, MfList): + other = MfList(self.package, data=other, dtype=self.dtype, + model=self._model, + list_free_format=self.list_free_format) + msg = "MfList.append(): other arg must be " + \ + "MfList or dict, not {0}".format(type(other)) + assert isinstance(other, MfList), msg + + other_kpers = list(other.data.keys()) + other_kpers.sort() + + self_kpers = list(self.data.keys()) + self_kpers.sort() + + new_dict = {} + for kper in range(self._model.nper): + other_data = other[kper].copy() + self_data = self[kper].copy() + + other_len = other_data.shape[0] + self_len = self_data.shape[0] + + if (other_len == 0 and self_len == 0) or \ + (kper not in self_kpers and kper not in other_kpers): + continue + elif self_len == 0: + new_dict[kper] = other_data + elif other_len == 0: + new_dict[kper] = self_data + else: + new_len = other_data.shape[0] + self_data.shape[0] + new_data = np.recarray(new_len, dtype=self.dtype) + new_data[:self_len] = self_data + new_data[self_len:self_len + other_len] = other_data + new_dict[kper] = new_data + + + return new_dict + + def drop(self, fields): + """drop fields from an MfList + + Parameters + ---------- + fields : list or set of field names to drop + + Returns + ------- + dropped : MfList without the dropped fields + """ + if not isinstance(fields, list): + fields = [fields] + names = [n for n in self.dtype.names if n not in fields] + dtype = np.dtype( + [(k, d) for k, d in self.dtype.descr if k not in fields]) + spd = {} + for k, v in self.data.items(): + # because np 1.9 doesn't support indexing by list of columns + newarr = np.array([self.data[k][n] for n in names]).transpose() + newarr = np.array(list(map(tuple, newarr)), dtype=dtype).view( + np.recarray) + for n in dtype.names: + newarr[n] = self.data[k][n] + spd[k] = newarr + return MfList(self.package, spd, dtype=dtype) + + @property + def data(self): + return self.__data + + @property + def df(self): + if self.__df is None: + self.__df = self.get_dataframe() + return self.__df + + @property + def vtype(self): + return self.__vtype + + @property + def dtype(self): + return self.__dtype + + # Get the itmp for a given kper + def get_itmp(self, kper): + if kper not in list(self.__data.keys()): + return None + if self.__vtype[kper] is None: + return -1 + # If an external file, have to load it + if self.__vtype[kper] == str: + return self.__fromfile(self.__data[kper]).shape[0] + if self.__vtype[kper] == np.recarray: + return self.__data[kper].shape[0] + # If not any of the above, it must be an int + return self.__data[kper] + + @property + def mxact(self): + mxact = 0 + for kper in list(self.__data.keys()): + mxact = max(mxact, self.get_itmp(kper)) + return mxact + + @property + def fmt_string(self): + """Returns a C-style fmt string for numpy savetxt that corresponds to + the dtype""" + if self.list_free_format is not None: + use_free = self.list_free_format + else: + use_free = True + if self.package.parent.has_package('bas6'): + use_free = self.package.parent.bas6.ifrefm + # mt3d list data is fixed format + if 'mt3d' in self.package.parent.version.lower(): + use_free = False + fmts = [] + for field in self.dtype.descr: + vtype = field[1][1].lower() + if vtype in ('i', 'b'): + if use_free: + fmts.append('%9d') + else: + fmts.append('%10d') + elif vtype == 'f': + if use_free: + if numpy114: + # Use numpy's floating-point formatter (Dragon4) + fmts.append('%15s') + else: + fmts.append('%15.7E') + else: + fmts.append('%10G') + elif vtype == 'o': + if use_free: + fmts.append('%9s') + else: + fmts.append('%10s') + elif vtype == 's': + msg = ("MfList.fmt_string error: 'str' type found in dtype. " + "This gives unpredictable results when " + "recarray to file - change to 'object' type") + raise TypeError(msg) + else: + raise TypeError("MfList.fmt_string error: unknown vtype in " + "field: {}".format(field)) + if use_free: + fmt_string = ' ' + ' '.join(fmts) + else: + fmt_string = ''.join(fmts) + return fmt_string + + # Private method to cast the data argument + # Should only be called by the constructor + def __cast_data(self, data): + # If data is a list, then all we can do is try to cast it to + # an ndarray, then cast again to a recarray + if isinstance(data, list): + # warnings.warn("MfList casting list to array") + try: + data = np.array(data) + except Exception as e: + raise Exception("MfList error: casting list to ndarray: " + \ + str(e)) + + # If data is a dict, the we have to assume it is keyed on kper + if isinstance(data, dict): + if not list(data.keys()): + raise Exception("MfList error: data dict is empty") + for kper, d in data.items(): + try: + kper = int(kper) + except Exception as e: + raise Exception("MfList error: data dict key " + \ + "{0:s} not integer: ".format(kper) + \ + str(type(kper)) + "\n" + str(e)) + # Same as before, just try... + if isinstance(d, list): + # warnings.warn("MfList: casting list to array at " +\ + # "kper {0:d}".format(kper)) + try: + d = np.array(d) + except Exception as e: + raise Exception("MfList error: casting list " + \ + "to ndarray: " + str(e)) + + # super hack - sick of recarrays already + # if (isinstance(d,np.ndarray) and len(d.dtype.fields) > 1): + # d = d.view(np.recarray) + + if isinstance(d, np.recarray): + self.__cast_recarray(kper, d) + elif isinstance(d, np.ndarray): + self.__cast_ndarray(kper, d) + elif isinstance(d, int): + self.__cast_int(kper, d) + elif isinstance(d, str): + self.__cast_str(kper, d) + elif d is None: + self.__data[kper] = -1 + self.__vtype[kper] = None + else: + raise Exception("MfList error: unsupported data type: " + + str(type(d)) + " at kper " + + "{0:d}".format(kper)) + + # A single recarray - same MfList for all stress periods + elif isinstance(data, np.recarray): + self.__cast_recarray(0, data) + # A single ndarray + elif isinstance(data, np.ndarray): + self.__cast_ndarray(0, data) + # A single filename + elif isinstance(data, str): + self.__cast_str(0, data) + else: + raise Exception("MfList error: unsupported data type: " + \ + str(type(data))) + + def __cast_str(self, kper, d): + # If d is a string, assume it is a filename and check that it exists + assert os.path.exists(d), "MfList error: dict filename (string) \'" + \ + d + "\' value for " + \ + "kper {0:d} not found".format(kper) + self.__data[kper] = d + self.__vtype[kper] = str + + def __cast_int(self, kper, d): + # If d is an integer, then it must be 0 or -1 + if d > 0: + raise Exception("MfList error: dict integer value for " + "kper {0:10d} must be 0 or -1, " + "not {1:10d}".format(kper, d)) + if d == 0: + self.__data[kper] = 0 + self.__vtype[kper] = None + else: + self.__data[kper] = -1 + self.__vtype[kper] = None + + def __cast_recarray(self, kper, d): + assert d.dtype == self.__dtype, "MfList error: recarray dtype: " + \ + str(d.dtype) + " doesn't match " + \ + "self dtype: " + str(self.dtype) + self.__data[kper] = d + self.__vtype[kper] = np.recarray + + def __cast_ndarray(self, kper, d): + d = np.atleast_2d(d) + if d.dtype != self.__dtype: + assert d.shape[1] == len(self.dtype), "MfList error: ndarray " + \ + "shape " + str(d.shape) + \ + " doesn't match dtype " + \ + "len: " + \ + str(len(self.dtype)) + # warnings.warn("MfList: ndarray dtype does not match self " +\ + # "dtype, trying to cast") + try: + self.__data[kper] = np.core.records.fromarrays(d.transpose(), + dtype=self.dtype) + except Exception as e: + raise Exception("MfList error: casting ndarray to recarray: " + \ + str(e)) + self.__vtype[kper] = np.recarray + + def get_dataframe(self, squeeze=True): + """ + Cast recarrays for stress periods into single + dataframe containing all stress periods. + + Parameters + ---------- + squeeze : bool + Reduce number of columns in dataframe to only include + stress periods where a variable changes. + + Returns + ------- + df : dataframe + Dataframe of shape nrow = ncells, ncol = nvar x nper. If + the squeeze option is chosen, nper is the number of + stress periods where at least one cells is different, + otherwise it is equal to the number of keys in MfList.data. + + Notes + ----- + Requires pandas. + + """ + try: + import pandas as pd + except Exception as e: + msg = 'MfList.get_dataframe() requires pandas' + raise ImportError(msg) + + # make a dataframe of all data for all stress periods + names = ['k', 'i', 'j'] + if 'MNW2' in self.package.name: + names += ['wellid'] + + # find relevant variable names + # may have to iterate over the first stress period + for per in range(self._model.nper): + if hasattr(self.data[per], 'dtype'): + varnames = list([n for n in self.data[per].dtype.names + if n not in names]) + break + + # create list of dataframes for each stress period + # each with index of k, i, j + dfs = [] + for per in self.data.keys(): + recs = self.data[per] + if recs is None or len(recs) == 0: + # add an empty dataframe if a stress period is + # empty (e.g. no pumping during a predevelopment + # period) + columns = names + list(['{}{}'.format(c, per) + for c in varnames]) + dfi = pd.DataFrame(data=None, columns=columns) + dfi = dfi.set_index(names) + else: + dfi = pd.DataFrame.from_records(recs) + dfg = dfi.groupby(names) + count = dfg[varnames[0]].count().rename('n') + if (count > 1).values.any(): + print("Duplicated list entry locations aggregated " + "for kper {}".format(per)) + for kij in count[count > 1].index.values: + print(" (k,i,j) {}".format(kij)) + dfi = dfg.sum() # aggregate + dfi.columns = list(['{}{}'.format(c, per) for c in varnames]) + dfs.append(dfi) + df = pd.concat(dfs, axis=1) + if squeeze: + keep = [] + for var in varnames: + diffcols = list([n for n in df.columns if var in n]) + diff = df[diffcols].fillna(0).diff(axis=1) + diff['{}0'.format( + var)] = 1 # always return the first stress period + changed = diff.sum(axis=0) != 0 + keep.append(df.loc[:, changed.index[changed]]) + df = pd.concat(keep, axis=1) + df = df.reset_index() + df.insert(len(names), 'node', df.i * self._model.ncol + df.j) + return df + + def add_record(self, kper, index, values): + # Add a record to possible already set list for a given kper + # index is a list of k,i,j or nodes. + # values is a list of floats. + # The length of index + values must be equal to the number of names + # in dtype + assert len(index) + len(values) == len(self.dtype), \ + "MfList.add_record() error: length of index arg +" + \ + "length of value arg != length of self dtype" + # If we already have something for this kper, then add to it + if kper in list(self.__data.keys()): + if self.vtype[kper] == int: + # If a 0 or -1, reset + self.__data[kper] = self.get_empty(1) + self.__vtype[kper] = np.recarray + elif self.vtype[kper] == str: + # If filename, load into recarray + d = self.__fromfile(self.data[kper]) + d.resize(d.shape[0], d.shape[1]) + self.__data[kper] = d + self.__vtype[kper] = np.recarray + elif self.vtype[kper] == np.recarray: + # Extend the recarray + self.__data[kper] = np.append( + self.__data[kper], self.get_empty(1)) + else: + self.__data[kper] = self.get_empty(1) + self.__vtype[kper] = np.recarray + rec = list(index) + rec.extend(list(values)) + try: + self.__data[kper][-1] = tuple(rec) + except Exception as e: + raise Exception("MfList.add_record() error: adding record to " + \ + "recarray: " + str(e)) + + def __getitem__(self, kper): + # Get the recarray for a given kper + # If the data entry for kper is a string, + # return the corresponding recarray, + # but don't reset the value in the data dict + # assert kper in list(self.data.keys()), "MfList.__getitem__() kper " + \ + # str(kper) + " not in data.keys()" + try: + kper = int(kper) + except Exception as e: + raise Exception( + "MfList error: _getitem__() passed invalid kper index:" + + str(kper)) + if kper not in list(self.data.keys()): + if kper == 0: + return self.get_empty() + else: + return self.data[self.__find_last_kper(kper)] + if self.vtype[kper] == int: + if self.data[kper] == 0: + return self.get_empty() + else: + return self.data[self.__find_last_kper(kper)] + if self.vtype[kper] == str: + return self.__fromfile(self.data[kper]) + if self.vtype[kper] == np.recarray: + return self.data[kper] + + def __setitem__(self, kper, data): + if kper in list(self.__data.keys()): + if self._model.verbose: + print('removing existing data for kper={}'.format(kper)) + self.data.pop(kper) + # If data is a list, then all we can do is try to cast it to + # an ndarray, then cast again to a recarray + if isinstance(data, list): + # warnings.warn("MfList casting list to array") + try: + data = np.array(data) + except Exception as e: + raise Exception("MfList error: casting list to ndarray: " + \ + str(e)) + # cast data + if isinstance(data, int): + self.__cast_int(kper, data) + elif isinstance(data, np.recarray): + self.__cast_recarray(kper, data) + # A single ndarray + elif isinstance(data, np.ndarray): + self.__cast_ndarray(kper, data) + # A single filename + elif isinstance(data, str): + self.__cast_str(kper, data) + else: + raise Exception("MfList error: unsupported data type: " + \ + str(type(data))) + + # raise NotImplementedError("MfList.__setitem__() not implemented") + + def __fromfile(self, f): + # d = np.fromfile(f,dtype=self.dtype,count=count) + try: + d = np.genfromtxt(f, dtype=self.dtype) + except Exception as e: + raise Exception("MfList.__fromfile() error reading recarray " + + "from file " + str(e)) + return d + + def get_filenames(self): + kpers = list(self.data.keys()) + kpers.sort() + filenames = [] + first = kpers[0] + for kper in list(range(0, max(self._model.nper, max(kpers) + 1))): + # Fill missing early kpers with 0 + if (kper < first): + itmp = 0 + kper_vtype = int + elif (kper in kpers): + kper_vtype = self.__vtype[kper] + + if self._model.array_free_format and self._model.external_path is\ + not None: + # py_filepath = '' + # py_filepath = os.path.join(py_filepath, + # self._model.external_path) + filename = self.package.name[0] + "_{0:04d}.dat".format(kper) + filenames.append(filename) + return filenames + + def get_filename(self, kper): + ext = "dat" + if self.binary: + ext = 'bin' + return self.package.name[0] + '_{0:04d}.{1}'.format(kper, ext) + + @property + def binary(self): + return bool(self.__binary) + + def write_transient(self, f, single_per=None, forceInternal=False): + # forceInternal overrides isExternal (set below) for cases where + # external arrays are not supported (oh hello MNW1!) + # write the transient sequence described by the data dict + nr, nc, nl, nper = self._model.get_nrow_ncol_nlay_nper() + assert hasattr(f, "read"), "MfList.write() error: " + \ + "f argument must be a file handle" + kpers = list(self.data.keys()) + kpers.sort() + first = kpers[0] + if single_per is None: + loop_over_kpers = list(range(0, max(nper, max(kpers) + 1))) + else: + if not isinstance(single_per, list): + single_per = [single_per] + loop_over_kpers = single_per + + for kper in loop_over_kpers: + # Fill missing early kpers with 0 + if kper < first: + itmp = 0 + kper_vtype = int + elif kper in kpers: + kper_data = self.__data[kper] + kper_vtype = self.__vtype[kper] + if (kper_vtype == str): + if (not self._model.array_free_format): + kper_data = self.__fromfile(kper_data) + kper_vtype = np.recarray + itmp = self.get_itmp(kper) + if kper_vtype == np.recarray: + itmp = kper_data.shape[0] + elif (kper_vtype == int) or (kper_vtype is None): + itmp = kper_data + # Fill late missing kpers with -1 + else: + itmp = -1 + kper_vtype = int + + f.write(" {0:9d} {1:9d} # stress period {2:d}\n" + .format(itmp, 0, kper + 1)) + + isExternal = False + if self._model.array_free_format and \ + self._model.external_path is not None and \ + forceInternal is False: + isExternal = True + if self.__binary: + isExternal = True + if isExternal: + if kper_vtype == np.recarray: + py_filepath = '' + if self._model.model_ws is not None: + py_filepath = self._model.model_ws + if self._model.external_path is not None: + py_filepath = os.path.join(py_filepath, + self._model.external_path) + filename = self.get_filename(kper) + py_filepath = os.path.join(py_filepath, filename) + model_filepath = filename + if self._model.external_path is not None: + model_filepath = os.path.join( + self._model.external_path, + filename) + self.__tofile(py_filepath, kper_data) + kper_vtype = str + kper_data = model_filepath + + if kper_vtype == np.recarray: + name = f.name + if self.__binary or not numpy114: + f.close() + # switch file append mode to binary + with open(name, 'ab+') as f: + self.__tofile(f, kper_data) + # continue back to non-binary + f = open(name, 'a') + else: + self.__tofile(f, kper_data) + elif kper_vtype == str: + f.write(' open/close ' + kper_data) + if self.__binary: + f.write(' (BINARY)') + f.write('\n') + + def __tofile(self, f, data): + # Write the recarray (data) to the file (or file handle) f + assert isinstance(data, np.recarray), "MfList.__tofile() data arg " + \ + "not a recarray" + + # Add one to the kij indices + lnames = [name.lower() for name in self.dtype.names] + # --make copy of data for multiple calls + d = data.copy() + for idx in ['k', 'i', 'j', 'node']: + if idx in lnames: + d[idx] += 1 + if self.__binary: + dtype2 = [] + for name in self.dtype.names: + dtype2.append((name, np.float32)) + dtype2 = np.dtype(dtype2) + d = np.array(d, dtype=dtype2) + d.tofile(f) + else: + np.savetxt(f, d, fmt=self.fmt_string, delimiter='') + + def check_kij(self): + names = self.dtype.names + if ('k' not in names) or ('i' not in names) or ('j' not in names): + warnings.warn("MfList.check_kij(): index fieldnames \'k,i,j\' " + + "not found in self.dtype names: " + str(names)) + return + nr, nc, nl, nper = self._model.get_nrow_ncol_nlay_nper() + if nl == 0: + warnings.warn("MfList.check_kij(): unable to get dis info from " + + "model") + return + for kper in list(self.data.keys()): + out_idx = [] + data = self[kper] + if data is not None: + k = data['k'] + k_idx = np.where(np.logical_or(k < 0, k >= nl)) + if k_idx[0].shape[0] > 0: + out_idx.extend(list(k_idx[0])) + i = data['i'] + i_idx = np.where(np.logical_or(i < 0, i >= nr)) + if i_idx[0].shape[0] > 0: + out_idx.extend(list(i_idx[0])) + j = data['j'] + j_idx = np.where(np.logical_or(j < 0, j >= nc)) + if j_idx[0].shape[0]: + out_idx.extend(list(j_idx[0])) + + if len(out_idx) > 0: + warn_str = "MfList.check_kij(): warning the following " + \ + "indices are out of bounds in kper " + \ + str(kper) + ':\n' + for idx in out_idx: + d = data[idx] + warn_str += " {0:9d} {1:9d} {2:9d}\n".format( + d['k'] + 1, d['i'] + 1, d['j'] + 1) + warnings.warn(warn_str) + + def __find_last_kper(self, kper): + kpers = list(self.data.keys()) + kpers.sort() + last = 0 + for kkper in kpers[::-1]: + # if this entry is valid + if self.vtype[kkper] != int or self.data[kkper] != -1: + last = kkper + if kkper <= kper: + break + return kkper + + def get_indices(self): + """ + a helper function for plotting - get all unique indices + """ + names = self.dtype.names + lnames = [] + [lnames.append(name.lower()) for name in names] + if 'k' not in lnames or 'j' not in lnames: + raise NotImplementedError("MfList.get_indices requires kij") + kpers = list(self.data.keys()) + kpers.sort() + indices = [] + for i, kper in enumerate(kpers): + kper_vtype = self.__vtype[kper] + if (kper_vtype != int) or (kper_vtype is not None): + d = self.data[kper] + if not indices: + indices = list(zip(d['k'], d['i'], d['j'])) + else: + new_indices = list(zip(d['k'], d['i'], d['j'])) + for ni in new_indices: + if ni not in indices: + indices.append(ni) + return indices + + def attribute_by_kper(self, attr, function=np.mean, idx_val=None): + assert attr in self.dtype.names + if idx_val is not None: + assert idx_val[0] in self.dtype.names + kpers = list(self.data.keys()) + kpers.sort() + values = [] + for kper in range(0, max(self._model.nper, max(kpers))): + + if kper < min(kpers): + values.append(0) + elif kper > max(kpers) or kper not in kpers: + values.append(values[-1]) + else: + kper_data = self.__data[kper] + if idx_val is not None: + kper_data = kper_data[ + np.where(kper_data[idx_val[0]] == idx_val[1])] + # kper_vtype = self.__vtype[kper] + v = function(kper_data[attr]) + values.append(v) + return values + + def plot(self, key=None, names=None, kper=0, + filename_base=None, file_extension=None, mflay=None, + **kwargs): + """ + Plot stress period boundary condition (MfList) data for a specified + stress period + + Parameters + ---------- + key : str + MfList dictionary key. (default is None) + names : list + List of names for figure titles. (default is None) + kper : int + MODFLOW zero-based stress period number to return. (default is zero) + filename_base : str + Base file name that will be used to automatically generate file + names for output image files. Plots will be exported as image + files if file_name_base is not None. (default is None) + file_extension : str + Valid matplotlib.pyplot file extension for savefig(). Only used + if filename_base is not None. (default is 'png') + mflay : int + MODFLOW zero-based layer number to return. If None, then all + all layers will be included. (default is None) + **kwargs : dict + axes : list of matplotlib.pyplot.axis + List of matplotlib.pyplot.axis that will be used to plot + data for each layer. If axes=None axes will be generated. + (default is None) + pcolor : bool + Boolean used to determine if matplotlib.pyplot.pcolormesh + plot will be plotted. (default is True) + colorbar : bool + Boolean used to determine if a color bar will be added to + the matplotlib.pyplot.pcolormesh. Only used if pcolor=True. + (default is False) + inactive : bool + Boolean used to determine if a black overlay in inactive + cells in a layer will be displayed. (default is True) + contour : bool + Boolean used to determine if matplotlib.pyplot.contour + plot will be plotted. (default is False) + clabel : bool + Boolean used to determine if matplotlib.pyplot.clabel + will be plotted. Only used if contour=True. (default is False) + grid : bool + Boolean used to determine if the model grid will be plotted + on the figure. (default is False) + masked_values : list + List of unique values to be excluded from the plot. + + Returns + ---------- + out : list + Empty list is returned if filename_base is not None. Otherwise + a list of matplotlib.pyplot.axis is returned. + + See Also + -------- + + Notes + ----- + + Examples + -------- + >>> import flopy + >>> ml = flopy.modflow.Modflow.load('test.nam') + >>> ml.wel.stress_period_data.plot(ml.wel, kper=1) + + """ + + from flopy.plot import PlotUtilities + axes = PlotUtilities._plot_mflist_helper(self, key=key, names=names, + kper=kper, + filename_base=filename_base, + file_extension=file_extension, + mflay=mflay, + **kwargs) + + return axes + + def to_shapefile(self, filename, kper=None): + """ + Export stress period boundary condition (MfList) data for a specified + stress period + + Parameters + ---------- + filename : str + Shapefile name to write + kper : int + MODFLOW zero-based stress period number to return. (default is None) + + Returns + ---------- + None + + See Also + -------- + + Notes + ----- + + Examples + -------- + >>> import flopy + >>> ml = flopy.modflow.Modflow.load('test.nam') + >>> ml.wel.to_shapefile('test_hk.shp', kper=1) + """ + import warnings + warnings.warn( + "Deprecation warning: to_shapefile() is deprecated. use .export()") + + # if self.sr is None: + # raise Exception("MfList.to_shapefile: SpatialReference not set") + # import flopy.utils.flopy_io as fio + # if kper is None: + # keys = self.data.keys() + # keys.sort() + # else: + # keys = [kper] + # array_dict = {} + # for kk in keys: + # arrays = self.to_array(kk) + # for name, array in arrays.items(): + # for k in range(array.shape[0]): + # #aname = name+"{0:03d}_{1:02d}".format(kk, k) + # n = fio.shape_attr_name(name, length=4) + # aname = "{}{:03d}{:03d}".format(n, k+1, int(kk)+1) + # array_dict[aname] = array[k] + # fio.write_grid_shapefile(filename, self.sr, array_dict) + self.export(filename, kper=kper) + + def to_array(self, kper=0, mask=False): + """ + Convert stress period boundary condition (MfList) data for a + specified stress period to a 3-D numpy array + + Parameters + ---------- + kper : int + MODFLOW zero-based stress period number to return. (default is zero) + mask : boolean + return array with np.NaN instead of zero + Returns + ---------- + out : dict of numpy.ndarrays + Dictionary of 3-D numpy arrays containing the stress period data for + a selected stress period. The dictionary keys are the MfList dtype + names for the stress period data ('cond', 'flux', 'bhead', etc.). + + See Also + -------- + + Notes + ----- + + Examples + -------- + >>> import flopy + >>> ml = flopy.modflow.Modflow.load('test.nam') + >>> v = ml.wel.stress_period_data.to_array(kper=1) + + """ + i0 = 3 + unstructured = False + if 'inode' in self.dtype.names: + raise NotImplementedError() + + if 'node' in self.dtype.names: + if 'i' not in self.dtype.names and\ + "j" not in self.dtype.names: + i0 = 1 + unstructured = True + + arrays = {} + for name in self.dtype.names[i0:]: + if not self.dtype.fields[name][0] == object: + if unstructured: + arr = np.zeros((self._model.nlay * self._model.ncpl,)) + else: + arr = np.zeros((self._model.nlay, self._model.nrow, + self._model.ncol)) + arrays[name] = arr.copy() + + # if this kper is not found + if kper not in self.data.keys(): + kpers = list(self.data.keys()) + kpers.sort() + # if this kper is before the first entry, + # (maybe) mask and return + if kper < kpers[0]: + if mask: + for name, arr in arrays.items(): + arrays[name][:] = np.NaN + return arrays + # find the last kper + else: + kper = self.__find_last_kper(kper) + + sarr = self.data[kper] + + if np.isscalar(sarr): + # if there are no entries for this kper + if sarr == 0: + if mask: + for name, arr in arrays.items(): + arrays[name][:] = np.NaN + return arrays + else: + raise Exception("MfList: something bad happened") + + for name, arr in arrays.items(): + if unstructured: + cnt = np.zeros((self._model.nlay * self._model.ncpl,), + dtype=np.float) + else: + cnt = np.zeros( + (self._model.nlay, self._model.nrow, self._model.ncol), + dtype=np.float) + #print(name,kper) + for rec in sarr: + if unstructured: + arr[rec['node']] += rec[name] + cnt[rec['node']] += 1. + else: + arr[rec['k'], rec['i'], rec['j']] += rec[name] + cnt[rec['k'], rec['i'], rec['j']] += 1. + # average keys that should not be added + if name not in ('cond', 'flux'): + idx = cnt > 0. + arr[idx] /= cnt[idx] + if mask: + arr = np.ma.masked_where(cnt == 0., arr) + arr[cnt == 0.] = np.NaN + + arrays[name] = arr.copy() + # elif mask: + # for name, arr in arrays.items(): + # arrays[name][:] = np.NaN + return arrays + + @property + def masked_4D_arrays(self): + # get the first kper + arrays = self.to_array(kper=0, mask=True) + + # initialize these big arrays + m4ds = {} + for name, array in arrays.items(): + m4d = np.zeros((self._model.nper, self._model.nlay, + self._model.nrow, self._model.ncol)) + m4d[0, :, :, :] = array + m4ds[name] = m4d + for kper in range(1, self._model.nper): + arrays = self.to_array(kper=kper, mask=True) + for name, array in arrays.items(): + m4ds[name][kper, :, :, :] = array + return m4ds + + def masked_4D_arrays_itr(self): + # get the first kper + arrays = self.to_array(kper=0, mask=True) + + # initialize these big arrays + for name, array in arrays.items(): + m4d = np.zeros((self._model.nper, self._model.nlay, + self._model.nrow, self._model.ncol)) + m4d[0, :, :, :] = array + for kper in range(1, self._model.nper): + arrays = self.to_array(kper=kper, mask=True) + for tname, array in arrays.items(): + if tname == name: + m4d[kper, :, :, :] = array + yield name, m4d + + @property + def array(self): + return self.masked_4D_arrays + + @classmethod + def from_4d(cls, model, pak_name, m4ds): + """construct an MfList instance from a dict of + (attribute_name,masked 4D ndarray + Parameters + ---------- + model : mbase derived type + pak_name : str package name (e.g GHB) + m4ds : {attribute name:4d masked numpy.ndarray} + Returns + ------- + MfList instance + """ + sp_data = MfList.masked4D_arrays_to_stress_period_data( + model.get_package(pak_name).get_default_dtype(), m4ds) + return cls(model.get_package(pak_name), data=sp_data) + + @staticmethod + def masked4D_arrays_to_stress_period_data(dtype, m4ds): + """ convert a dictionary of 4-dim masked arrays to + a stress_period_data style dict of recarray + Parameters + ---------- + dtype : numpy dtype + + m4ds : dict {name:masked numpy 4-dim ndarray} + Returns + ------- + dict {kper:recarray} + """ + assert isinstance(m4ds, dict) + for name, m4d in m4ds.items(): + assert isinstance(m4d, np.ndarray) + assert name in dtype.names + assert m4d.ndim == 4 + keys = list(m4ds.keys()) + + for i1, key1 in enumerate(keys): + a1 = np.isnan(m4ds[key1]) + for i2, key2 in enumerate(keys[i1:]): + a2 = np.isnan(m4ds[key2]) + if not np.array_equal(a1, a2): + raise Exception("Transient2d error: masking not equal" + \ + " for {0} and {1}".format(key1, key2)) + + sp_data = {} + for kper in range(m4d.shape[0]): + vals = {} + for name, m4d in m4ds.items(): + arr = m4d[kper, :, :, :] + isnan = np.argwhere(~np.isnan(arr)) + v = [] + for k, i, j in isnan: + v.append(arr[k, i, j]) + vals[name] = v + kk = isnan[:, 0] + ii = isnan[:, 1] + jj = isnan[:, 2] + + spd = np.recarray(shape=isnan.shape[0], dtype=dtype) + spd["i"] = ii + spd["k"] = kk + spd["j"] = jj + for n, v in vals.items(): + spd[n] = v + sp_data[kper] = spd + return sp_data diff --git a/release/make-release.py b/release/make-release.py index 24d523068a..3fe3a99e52 100644 --- a/release/make-release.py +++ b/release/make-release.py @@ -1,445 +1,445 @@ -#!/usr/bin/python - -from __future__ import print_function -import subprocess -import os -import sys -import datetime -import json -from collections import OrderedDict - -# update files and paths so that there are the same number of -# path and file entries in the paths and files list. Enter '.' -# as the path if the file is in the root repository directory -paths = ['../flopy', '../', - '../docs', '../docs', - '../', '../', '../docs'] -files = ['version.py', 'README.md', - 'USGS_release.md', 'PyPi_release.md', - 'code.json', 'DISCLAIMER.md', 'notebook_examples.md'] - -# check that there are the same number of entries in files and paths -if len(paths) != len(files): - msg = 'The number of entries in paths ' + \ - '({}) must equal '.format(len(paths)) + \ - 'the number of entries in files ({})'.format(len(files)) - assert False, msg - -pak = 'flopy' - -# authors list for Software/Code citation for FloPy -# author should be defined LastName FirstName MiddleInitial -# MiddleInitial can be absent. Use spaces instead of commas to separate -# LastName, FirstName, and MiddleInitial. -authors = ['Bakker Mark', 'Post Vincent', 'Langevin Christian D', - 'Hughes Joseph D', 'White Jeremy T', 'Leaf Andrew T', - 'Paulinski Scott R', 'Larsen Joshua D', 'Toews Michael W', - 'Morway Eric D', 'Bellino Jason C', 'Starn Jeffrey J', - 'Fienen Michael N'] - -approved = '''Disclaimer ----------- - -This software has been approved for release by the U.S. Geological Survey -(USGS). Although the software has been subjected to rigorous review, the USGS -reserves the right to update the software as needed pursuant to further analysis -and review. No warranty, expressed or implied, is made by the USGS or the U.S. -Government as to the functionality of the software and related material nor -shall the fact of release constitute any such warranty. Furthermore, the -software is released on condition that neither the USGS nor the U.S. Government -shall be held liable for any damages resulting from its authorized or -unauthorized use. -''' - -preliminary = '''Disclaimer ----------- - -This software is preliminary or provisional and is subject to revision. It is -being provided to meet the need for timely best science. The software has not -received final approval by the U.S. Geological Survey (USGS). No warranty, -expressed or implied, is made by the USGS or the U.S. Government as to the -functionality of the software and related material nor shall the fact of release -constitute any such warranty. The software is provided on the condition that -neither the USGS nor the U.S. Government shall be held liable for any damages -resulting from the authorized or unauthorized use of the software. -''' - - -def get_disclaimer(): - # get current branch - branch = get_branch() - - if 'release' in branch.lower() or 'master' in branch.lower(): - disclaimer = approved - is_approved = True - else: - disclaimer = preliminary - is_approved = False - - return is_approved, disclaimer - - -def get_branch(): - branch = None - - # determine if branch defined on command line - for argv in sys.argv: - if 'master' in argv: - branch = 'master' - elif 'develop' in argv.lower(): - branch = 'develop' - - if branch is None: - try: - # determine current branch - b = subprocess.Popen(("git", "status"), - stdout=subprocess.PIPE, - stderr=subprocess.STDOUT).communicate()[0] - if isinstance(b, bytes): - b = b.decode('utf-8') - - for line in b.splitlines(): - if 'On branch' in line: - branch = line.replace('On branch ', '').rstrip() - - except: - msg = 'Could not determine current branch. Is git installed?' - raise ValueError(msg) - - return branch - - -def get_version_str(v0, v1, v2): - version_type = ('{}'.format(v0), - '{}'.format(v1), - '{}'.format(v2)) - version = '.'.join(version_type) - return version - - -def get_tag(v0, v1, v2): - tag_type = ('{}'.format(v0), - '{}'.format(v1), - '{}'.format(v2)) - tag = '.'.join(tag_type) - return tag - - -def get_software_citation(version, is_approved): - now = datetime.datetime.now() - sb = '' - if not is_approved: - sb = ' — release candidate' - line = '[' - for ipos, author in enumerate(authors): - if ipos > 0: - line += ', ' - if ipos == len(authors) - 1: - line += 'and ' - sv = author.split() - tauthor = '{}, {}.'.format(sv[0], sv[1][0]) - if len(sv) > 2: - tauthor += ' {}.'.format(sv[2][0]) - line += tauthor - line += ', {}, '.format(now.year) + \ - 'FloPy v{}{}: '.format(version, sb) + \ - 'U.S. Geological Survey Software Release, ' + \ - '{}, '.format(now.strftime('%d %B %Y')) + \ - 'http://dx.doi.org/10.5066/F7BK19FH]' + \ - '(http://dx.doi.org/10.5066/F7BK19FH)' - - return line - - -def update_version(): - name_pos = None - try: - fpth = os.path.join(paths[0], files[0]) - - vmajor = 0 - vminor = 0 - vmicro = 0 - lines = [line.rstrip('\n') for line in open(fpth, 'r')] - for idx, line in enumerate(lines): - t = line.split() - if 'major =' in line: - vmajor = int(t[2]) - elif 'minor =' in line: - vminor = int(t[2]) - elif 'micro =' in line: - vmicro = int(t[2]) - elif '__version__' in line: - name_pos = idx + 1 - - except: - msg = 'There was a problem updating the version file' - raise IOError(msg) - - try: - # write new version file - f = open(fpth, 'w') - f.write('# {} version file automatically '.format(pak) + - 'created using...{0}\n'.format(os.path.basename(__file__))) - f.write('# created on...' + - '{0}\n'.format( - datetime.datetime.now().strftime("%B %d, %Y %H:%M:%S"))) - f.write('\n') - f.write('major = {}\n'.format(vmajor)) - f.write('minor = {}\n'.format(vminor)) - f.write('micro = {}\n'.format(vmicro)) - f.write("__version__ = '{:d}.{:d}.{:d}'.format(major, minor, micro)\n") - - # write the remainder of the version file - if name_pos is not None: - for line in lines[name_pos:]: - f.write('{}\n'.format(line)) - f.close() - print('Successfully updated version.py') - except: - msg = 'There was a problem updating the version file' - raise IOError(msg) - - # update README.md with new version information - update_readme_markdown(vmajor, vminor, vmicro) - - # update notebook_examples.md - update_notebook_examples_markdown() - - # update code.json - update_codejson(vmajor, vminor, vmicro) - - # update docs/USGS_release.md with new version information - update_USGSmarkdown(vmajor, vminor, vmicro) - - -def update_codejson(vmajor, vminor, vmicro): - # define json filename - json_fname = os.path.join(paths[4], files[4]) - - # get branch - branch = get_branch() - - # create version - version = get_tag(vmajor, vminor, vmicro) - - # load and modify json file - with open(json_fname, 'r') as f: - data = json.load(f, object_pairs_hook=OrderedDict) - - # modify the json file data - now = datetime.datetime.now() - sdate = now.strftime('%Y-%m-%d') - data[0]['date']['metadataLastUpdated'] = sdate - if 'release' in branch.lower() or 'master' in branch.lower(): - data[0]['version'] = version - data[0]['status'] = 'Production' - else: - data[0]['version'] = version - data[0]['status'] = 'Release Candidate' - - # rewrite the json file - with open(json_fname, 'w') as f: - json.dump(data, f, indent=4) - f.write('\n') - - return - - -def update_readme_markdown(vmajor, vminor, vmicro): - # create disclaimer text - is_approved, disclaimer = get_disclaimer() - - # define branch - if is_approved: - branch = 'master' - else: - branch = 'develop' - - # create version - version = get_tag(vmajor, vminor, vmicro) - - # read README.md into memory - fpth = os.path.join(paths[1], files[1]) - with open(fpth, 'r') as file: - lines = [line.rstrip() for line in file] - - # rewrite README.md - terminate = False - f = open(fpth, 'w') - for line in lines: - if '### Version ' in line: - line = '### Version {}'.format(version) - if not is_approved: - line += ' — release candidate' - elif '[Build Status]' in line: - line = '[![Build Status](https://travis-ci.org/modflowpy/' + \ - 'flopy.svg?branch={})]'.format(branch) + \ - '(https://travis-ci.org/modflowpy/flopy)' - elif '[Coverage Status]' in line: - line = '[![Coverage Status](https://coveralls.io/repos/github/' + \ - 'modflowpy/flopy/badge.svg?branch={})]'.format(branch) + \ - '(https://coveralls.io/github/modflowpy/' + \ - 'flopy?branch={})'.format(branch) - elif '[Binder]' in line: - # [![Binder](https://mybinder.org/badge_logo.svg)](https://mybinder.org/v2/gh/modflowpy/flopy.git/develop) - line = '[![Binder](https://mybinder.org/badge_logo.svg)]' + \ - '(https://mybinder.org/v2/gh/modflowpy/flopy.git/' + \ - '{}'.format(branch) + ')' - elif 'http://dx.doi.org/10.5066/F7BK19FH' in line: - line = get_software_citation(version, is_approved) - elif 'Disclaimer' in line: - line = disclaimer - terminate = True - f.write('{}\n'.format(line)) - if terminate: - break - - f.close() - - # write disclaimer markdown file - fpth = os.path.join(paths[0], 'DISCLAIMER.md') - f = open(fpth, 'w') - f.write(disclaimer) - f.close() - - return - - -def update_notebook_examples_markdown(): - # create disclaimer text - is_approved, disclaimer = get_disclaimer() - - # define branch - if is_approved: - branch = 'master' - else: - branch = 'develop' - - # read notebook_examples.md into memory - fpth = os.path.join(paths[6], files[6]) - with open(fpth, 'r') as file: - lines = [line.rstrip() for line in file] - - # rewrite notebook_examples.md - terminate = False - f = open(fpth, 'w') - for line in lines: - if '[Binder]' in line: - # [![Binder](https://mybinder.org/badge_logo.svg)](https://mybinder.org/v2/gh/modflowpy/flopy.git/develop) - line = '[![Binder](https://mybinder.org/badge_logo.svg)]' + \ - '(https://mybinder.org/v2/gh/modflowpy/flopy.git/' + \ - '{}'.format(branch) + ')' - f.write('{}\n'.format(line)) - f.close() - - -def update_USGSmarkdown(vmajor, vminor, vmicro): - # get branch - branch = get_branch() - - # create disclaimer text - is_approved, disclaimer = get_disclaimer() - - # create version - version = get_tag(vmajor, vminor, vmicro) - - # read README.md into memory - fpth = os.path.join(paths[1], files[1]) - with open(fpth, 'r') as file: - lines = [line.rstrip() for line in file] - - # write USGS_release.md - fpth = os.path.join(paths[2], files[2]) - f = open(fpth, 'w') - - # write PyPi_release.md - fpth = os.path.join(paths[3], files[3]) - f2 = open(fpth, 'w') - - # date and branch information - now = datetime.datetime.now() - sdate = now.strftime("%m/%d/%Y") - - # write header information - f.write('---\n') - f.write('title: FloPy Release Notes\n') - f.write('author:\n') - for author in authors: - sv = author.split() - tauthor = '{}'.format(sv[1]) - if len(sv) > 2: - tauthor += ' {}.'.format(sv[2][0]) - tauthor += ' {}'.format(sv[0]) - f.write(' - {}\n'.format(tauthor)) - f.write('header-includes:\n') - f.write(' - \\usepackage{fancyhdr}\n') - f.write(' - \\usepackage{lastpage}\n') - f.write(' - \\pagestyle{fancy}\n') - f.write(' - \\fancyhf{{}}\n') - f.write(' - \\fancyhead[LE, LO, RE, RO]{}\n') - f.write(' - \\fancyhead[CE, CO]{FloPy Release Notes}\n') - f.write(' - \\fancyfoot[LE, RO]{{FloPy version {}}}\n'.format(version)) - f.write(' - \\fancyfoot[CO, CE]{\\thepage\\ of \\pageref{LastPage}}\n') - f.write(' - \\fancyfoot[RE, LO]{{{}}}\n'.format(sdate)) - f.write('geometry: margin=0.75in\n') - f.write('---\n\n') - - # write select information from README.md - writeline = False - for line in lines: - if line == 'Introduction': - writeline = True - elif line == 'Getting Started': - writeline = False - elif line == 'How to Cite': - writeline = True - elif 'http://dx.doi.org/10.5066/F7BK19FH' in line: - writeline = True - line = get_software_citation(version, is_approved) - elif line == 'MODFLOW Resources': - writeline = False - elif line == 'Disclaimer': - writeline = True - elif '[MODFLOW 6](docs/mf6.md)' in line: - line = line.replace('[MODFLOW 6](docs/mf6.md)', 'MODFLOW 6') - if writeline: - f.write('{}\n'.format(line)) - line = line.replace('***', '*') - line = line.replace('##### ', '') - f2.write('{}\n'.format(line)) - - # write installation information - cweb = 'https://water.usgs.gov/ogw/flopy/flopy-{}.zip'.format(version) - line = '' - line += 'Installation\n' - line += '-----------------------------------------------\n' - line += 'To install FloPy version {} '.format(version) - line += 'from the USGS FloPy website:\n' - line += '```\n' - line += 'pip install {}\n'.format(cweb) - line += '```\n\n' - line += 'To update to FloPy version {} '.format(version) - line += 'from the USGS FloPy website:\n' - line += '```\n' - line += 'pip install {} --upgrade\n'.format(cweb) - line += '```\n' - - # - f.write(line) - - # close the USGS_release.md file - f.close() - - line = line.replace(cweb, 'flopy') - line = line.replace(' from the USGS FloPy website', '') - - f2.write(line) - - # close the PyPi_release.md file - f2.close() - - return - - -if __name__ == "__main__": - update_version() +#!/usr/bin/python + +from __future__ import print_function +import subprocess +import os +import sys +import datetime +import json +from collections import OrderedDict + +# update files and paths so that there are the same number of +# path and file entries in the paths and files list. Enter '.' +# as the path if the file is in the root repository directory +paths = ['../flopy', '../', + '../docs', '../docs', + '../', '../', '../docs'] +files = ['version.py', 'README.md', + 'USGS_release.md', 'PyPi_release.md', + 'code.json', 'DISCLAIMER.md', 'notebook_examples.md'] + +# check that there are the same number of entries in files and paths +if len(paths) != len(files): + msg = 'The number of entries in paths ' + \ + '({}) must equal '.format(len(paths)) + \ + 'the number of entries in files ({})'.format(len(files)) + assert False, msg + +pak = 'flopy' + +# authors list for Software/Code citation for FloPy +# author should be defined LastName FirstName MiddleInitial +# MiddleInitial can be absent. Use spaces instead of commas to separate +# LastName, FirstName, and MiddleInitial. +authors = ['Bakker Mark', 'Post Vincent', 'Langevin Christian D', + 'Hughes Joseph D', 'White Jeremy T', 'Leaf Andrew T', + 'Paulinski Scott R', 'Larsen Joshua D', 'Toews Michael W', + 'Morway Eric D', 'Bellino Jason C', 'Starn Jeffrey J', + 'Fienen Michael N'] + +approved = '''Disclaimer +---------- + +This software has been approved for release by the U.S. Geological Survey +(USGS). Although the software has been subjected to rigorous review, the USGS +reserves the right to update the software as needed pursuant to further analysis +and review. No warranty, expressed or implied, is made by the USGS or the U.S. +Government as to the functionality of the software and related material nor +shall the fact of release constitute any such warranty. Furthermore, the +software is released on condition that neither the USGS nor the U.S. Government +shall be held liable for any damages resulting from its authorized or +unauthorized use. +''' + +preliminary = '''Disclaimer +---------- + +This software is preliminary or provisional and is subject to revision. It is +being provided to meet the need for timely best science. The software has not +received final approval by the U.S. Geological Survey (USGS). No warranty, +expressed or implied, is made by the USGS or the U.S. Government as to the +functionality of the software and related material nor shall the fact of release +constitute any such warranty. The software is provided on the condition that +neither the USGS nor the U.S. Government shall be held liable for any damages +resulting from the authorized or unauthorized use of the software. +''' + + +def get_disclaimer(): + # get current branch + branch = get_branch() + + if 'release' in branch.lower() or 'master' in branch.lower(): + disclaimer = approved + is_approved = True + else: + disclaimer = preliminary + is_approved = False + + return is_approved, disclaimer + + +def get_branch(): + branch = None + + # determine if branch defined on command line + for argv in sys.argv: + if 'master' in argv: + branch = 'master' + elif 'develop' in argv.lower(): + branch = 'develop' + + if branch is None: + try: + # determine current branch + b = subprocess.Popen(("git", "status"), + stdout=subprocess.PIPE, + stderr=subprocess.STDOUT).communicate()[0] + if isinstance(b, bytes): + b = b.decode('utf-8') + + for line in b.splitlines(): + if 'On branch' in line: + branch = line.replace('On branch ', '').rstrip() + + except: + msg = 'Could not determine current branch. Is git installed?' + raise ValueError(msg) + + return branch + + +def get_version_str(v0, v1, v2): + version_type = ('{}'.format(v0), + '{}'.format(v1), + '{}'.format(v2)) + version = '.'.join(version_type) + return version + + +def get_tag(v0, v1, v2): + tag_type = ('{}'.format(v0), + '{}'.format(v1), + '{}'.format(v2)) + tag = '.'.join(tag_type) + return tag + + +def get_software_citation(version, is_approved): + now = datetime.datetime.now() + sb = '' + if not is_approved: + sb = ' — release candidate' + line = '[' + for ipos, author in enumerate(authors): + if ipos > 0: + line += ', ' + if ipos == len(authors) - 1: + line += 'and ' + sv = author.split() + tauthor = '{}, {}.'.format(sv[0], sv[1][0]) + if len(sv) > 2: + tauthor += ' {}.'.format(sv[2][0]) + line += tauthor + line += ', {}, '.format(now.year) + \ + 'FloPy v{}{}: '.format(version, sb) + \ + 'U.S. Geological Survey Software Release, ' + \ + '{}, '.format(now.strftime('%d %B %Y')) + \ + 'http://dx.doi.org/10.5066/F7BK19FH]' + \ + '(http://dx.doi.org/10.5066/F7BK19FH)' + + return line + + +def update_version(): + name_pos = None + try: + fpth = os.path.join(paths[0], files[0]) + + vmajor = 0 + vminor = 0 + vmicro = 0 + lines = [line.rstrip('\n') for line in open(fpth, 'r')] + for idx, line in enumerate(lines): + t = line.split() + if 'major =' in line: + vmajor = int(t[2]) + elif 'minor =' in line: + vminor = int(t[2]) + elif 'micro =' in line: + vmicro = int(t[2]) + elif '__version__' in line: + name_pos = idx + 1 + + except: + msg = 'There was a problem updating the version file' + raise IOError(msg) + + try: + # write new version file + f = open(fpth, 'w') + f.write('# {} version file automatically '.format(pak) + + 'created using...{0}\n'.format(os.path.basename(__file__))) + f.write('# created on...' + + '{0}\n'.format( + datetime.datetime.now().strftime("%B %d, %Y %H:%M:%S"))) + f.write('\n') + f.write('major = {}\n'.format(vmajor)) + f.write('minor = {}\n'.format(vminor)) + f.write('micro = {}\n'.format(vmicro)) + f.write("__version__ = '{:d}.{:d}.{:d}'.format(major, minor, micro)\n") + + # write the remainder of the version file + if name_pos is not None: + for line in lines[name_pos:]: + f.write('{}\n'.format(line)) + f.close() + print('Successfully updated version.py') + except: + msg = 'There was a problem updating the version file' + raise IOError(msg) + + # update README.md with new version information + update_readme_markdown(vmajor, vminor, vmicro) + + # update notebook_examples.md + update_notebook_examples_markdown() + + # update code.json + update_codejson(vmajor, vminor, vmicro) + + # update docs/USGS_release.md with new version information + update_USGSmarkdown(vmajor, vminor, vmicro) + + +def update_codejson(vmajor, vminor, vmicro): + # define json filename + json_fname = os.path.join(paths[4], files[4]) + + # get branch + branch = get_branch() + + # create version + version = get_tag(vmajor, vminor, vmicro) + + # load and modify json file + with open(json_fname, 'r') as f: + data = json.load(f, object_pairs_hook=OrderedDict) + + # modify the json file data + now = datetime.datetime.now() + sdate = now.strftime('%Y-%m-%d') + data[0]['date']['metadataLastUpdated'] = sdate + if 'release' in branch.lower() or 'master' in branch.lower(): + data[0]['version'] = version + data[0]['status'] = 'Production' + else: + data[0]['version'] = version + data[0]['status'] = 'Release Candidate' + + # rewrite the json file + with open(json_fname, 'w') as f: + json.dump(data, f, indent=4) + f.write('\n') + + return + + +def update_readme_markdown(vmajor, vminor, vmicro): + # create disclaimer text + is_approved, disclaimer = get_disclaimer() + + # define branch + if is_approved: + branch = 'master' + else: + branch = 'develop' + + # create version + version = get_tag(vmajor, vminor, vmicro) + + # read README.md into memory + fpth = os.path.join(paths[1], files[1]) + with open(fpth, 'r') as file: + lines = [line.rstrip() for line in file] + + # rewrite README.md + terminate = False + f = open(fpth, 'w') + for line in lines: + if '### Version ' in line: + line = '### Version {}'.format(version) + if not is_approved: + line += ' — release candidate' + elif '[Build Status]' in line: + line = '[![Build Status](https://travis-ci.org/modflowpy/' + \ + 'flopy.svg?branch={})]'.format(branch) + \ + '(https://travis-ci.org/modflowpy/flopy)' + elif '[Coverage Status]' in line: + line = '[![Coverage Status](https://coveralls.io/repos/github/' + \ + 'modflowpy/flopy/badge.svg?branch={})]'.format(branch) + \ + '(https://coveralls.io/github/modflowpy/' + \ + 'flopy?branch={})'.format(branch) + elif '[Binder]' in line: + # [![Binder](https://mybinder.org/badge_logo.svg)](https://mybinder.org/v2/gh/modflowpy/flopy.git/develop) + line = '[![Binder](https://mybinder.org/badge_logo.svg)]' + \ + '(https://mybinder.org/v2/gh/modflowpy/flopy.git/' + \ + '{}'.format(branch) + ')' + elif 'http://dx.doi.org/10.5066/F7BK19FH' in line: + line = get_software_citation(version, is_approved) + elif 'Disclaimer' in line: + line = disclaimer + terminate = True + f.write('{}\n'.format(line)) + if terminate: + break + + f.close() + + # write disclaimer markdown file + fpth = os.path.join(paths[0], 'DISCLAIMER.md') + f = open(fpth, 'w') + f.write(disclaimer) + f.close() + + return + + +def update_notebook_examples_markdown(): + # create disclaimer text + is_approved, disclaimer = get_disclaimer() + + # define branch + if is_approved: + branch = 'master' + else: + branch = 'develop' + + # read notebook_examples.md into memory + fpth = os.path.join(paths[6], files[6]) + with open(fpth, 'r') as file: + lines = [line.rstrip() for line in file] + + # rewrite notebook_examples.md + terminate = False + f = open(fpth, 'w') + for line in lines: + if '[Binder]' in line: + # [![Binder](https://mybinder.org/badge_logo.svg)](https://mybinder.org/v2/gh/modflowpy/flopy.git/develop) + line = '[![Binder](https://mybinder.org/badge_logo.svg)]' + \ + '(https://mybinder.org/v2/gh/modflowpy/flopy.git/' + \ + '{}'.format(branch) + ')' + f.write('{}\n'.format(line)) + f.close() + + +def update_USGSmarkdown(vmajor, vminor, vmicro): + # get branch + branch = get_branch() + + # create disclaimer text + is_approved, disclaimer = get_disclaimer() + + # create version + version = get_tag(vmajor, vminor, vmicro) + + # read README.md into memory + fpth = os.path.join(paths[1], files[1]) + with open(fpth, 'r') as file: + lines = [line.rstrip() for line in file] + + # write USGS_release.md + fpth = os.path.join(paths[2], files[2]) + f = open(fpth, 'w') + + # write PyPi_release.md + fpth = os.path.join(paths[3], files[3]) + f2 = open(fpth, 'w') + + # date and branch information + now = datetime.datetime.now() + sdate = now.strftime("%m/%d/%Y") + + # write header information + f.write('---\n') + f.write('title: FloPy Release Notes\n') + f.write('author:\n') + for author in authors: + sv = author.split() + tauthor = '{}'.format(sv[1]) + if len(sv) > 2: + tauthor += ' {}.'.format(sv[2][0]) + tauthor += ' {}'.format(sv[0]) + f.write(' - {}\n'.format(tauthor)) + f.write('header-includes:\n') + f.write(' - \\usepackage{fancyhdr}\n') + f.write(' - \\usepackage{lastpage}\n') + f.write(' - \\pagestyle{fancy}\n') + f.write(' - \\fancyhf{{}}\n') + f.write(' - \\fancyhead[LE, LO, RE, RO]{}\n') + f.write(' - \\fancyhead[CE, CO]{FloPy Release Notes}\n') + f.write(' - \\fancyfoot[LE, RO]{{FloPy version {}}}\n'.format(version)) + f.write(' - \\fancyfoot[CO, CE]{\\thepage\\ of \\pageref{LastPage}}\n') + f.write(' - \\fancyfoot[RE, LO]{{{}}}\n'.format(sdate)) + f.write('geometry: margin=0.75in\n') + f.write('---\n\n') + + # write select information from README.md + writeline = False + for line in lines: + if line == 'Introduction': + writeline = True + elif line == 'Getting Started': + writeline = False + elif line == 'How to Cite': + writeline = True + elif 'http://dx.doi.org/10.5066/F7BK19FH' in line: + writeline = True + line = get_software_citation(version, is_approved) + elif line == 'MODFLOW Resources': + writeline = False + elif line == 'Disclaimer': + writeline = True + elif '[MODFLOW 6](docs/mf6.md)' in line: + line = line.replace('[MODFLOW 6](docs/mf6.md)', 'MODFLOW 6') + if writeline: + f.write('{}\n'.format(line)) + line = line.replace('***', '*') + line = line.replace('##### ', '') + f2.write('{}\n'.format(line)) + + # write installation information + cweb = 'https://water.usgs.gov/ogw/flopy/flopy-{}.zip'.format(version) + line = '' + line += 'Installation\n' + line += '-----------------------------------------------\n' + line += 'To install FloPy version {} '.format(version) + line += 'from the USGS FloPy website:\n' + line += '```\n' + line += 'pip install {}\n'.format(cweb) + line += '```\n\n' + line += 'To update to FloPy version {} '.format(version) + line += 'from the USGS FloPy website:\n' + line += '```\n' + line += 'pip install {} --upgrade\n'.format(cweb) + line += '```\n' + + # + f.write(line) + + # close the USGS_release.md file + f.close() + + line = line.replace(cweb, 'flopy') + line = line.replace(' from the USGS FloPy website', '') + + f2.write(line) + + # close the PyPi_release.md file + f2.close() + + return + + +if __name__ == "__main__": + update_version() diff --git a/setup.py b/setup.py index fa7ae25ce3..efe3b8a9f3 100644 --- a/setup.py +++ b/setup.py @@ -1,39 +1,39 @@ -import os -import sys -from setuptools import setup - -# ensure minimum version of Python is running -if sys.version_info[0:2] < (3, 5): - raise RuntimeError('Flopy requires Python >= 3.5') - -# local import of package variables in flopy/version.py -# imports __version__, __pakname__, __author__, __author_email__ -exec(open(os.path.join("flopy", "version.py")).read()) - -try: - import pypandoc - - fpth = os.path.join('docs', 'PyPi_release.md') - long_description = pypandoc.convert(fpth, 'rst') -except ImportError: - long_description = '' - -setup(name=__pakname__, - description='FloPy is a Python package to create, run, and ' + - 'post-process MODFLOW-based models.', - long_description=long_description, - author=__author__, - author_email=__author_email__, - url='https://github.com/modflowpy/flopy/', - license='CC0', - platforms='Windows, Mac OS-X, Linux', - install_requires=['numpy'], - packages=['flopy', 'flopy.modflow', 'flopy.modflowlgr', 'flopy.modpath', - 'flopy.mt3d', 'flopy.seawat', 'flopy.utils', 'flopy.plot', - 'flopy.pest', 'flopy.export', 'flopy.discretization', - 'flopy.mf6', 'flopy.mf6.coordinates', 'flopy.mf6.data', - 'flopy.mf6.modflow', 'flopy.mf6.utils'], - include_package_data=True, # includes files listed in MANIFEST.in - # use this version ID if .svn data cannot be found - version=__version__, - classifiers=['Topic :: Scientific/Engineering :: Hydrology']) +import os +import sys +from setuptools import setup + +# ensure minimum version of Python is running +if sys.version_info[0:2] < (3, 5): + raise RuntimeError('Flopy requires Python >= 3.5') + +# local import of package variables in flopy/version.py +# imports __version__, __pakname__, __author__, __author_email__ +exec(open(os.path.join("flopy", "version.py")).read()) + +try: + import pypandoc + + fpth = os.path.join('docs', 'PyPi_release.md') + long_description = pypandoc.convert(fpth, 'rst') +except ImportError: + long_description = '' + +setup(name=__pakname__, + description='FloPy is a Python package to create, run, and ' + + 'post-process MODFLOW-based models.', + long_description=long_description, + author=__author__, + author_email=__author_email__, + url='https://github.com/modflowpy/flopy/', + license='CC0', + platforms='Windows, Mac OS-X, Linux', + install_requires=['numpy'], + packages=['flopy', 'flopy.modflow', 'flopy.modflowlgr', 'flopy.modpath', + 'flopy.mt3d', 'flopy.seawat', 'flopy.utils', 'flopy.plot', + 'flopy.pest', 'flopy.export', 'flopy.discretization', + 'flopy.mf6', 'flopy.mf6.coordinates', 'flopy.mf6.data', + 'flopy.mf6.modflow', 'flopy.mf6.utils'], + include_package_data=True, # includes files listed in MANIFEST.in + # use this version ID if .svn data cannot be found + version=__version__, + classifiers=['Topic :: Scientific/Engineering :: Hydrology'])