Skip to content

Commit

Permalink
refactor: fixes for numpy-2.0 deprecation warnings, require numpy>=1.…
Browse files Browse the repository at this point in the history
…20.3 (#2237)

This PR resolves several deprecation warnings from NumPy 2.0 (xref #2153):

* np.in1d() → np.isin()
* "a16" → "S16" to describe 16-length string of bytes
* The numpy.core.records module is private, so use numpy.rec instead, which seems to work with older versions

This also bumps the minimum version of numpy from 1.15.0 to 1.20.3, since this is the minimum for pandas 2.0.0, thus the minimum versions were incompatible.
  • Loading branch information
mwtoews authored Jun 17, 2024
1 parent d9ebd81 commit 1e44b3f
Show file tree
Hide file tree
Showing 18 changed files with 55 additions and 61 deletions.
2 changes: 1 addition & 1 deletion .docs/Notebooks/zonebudget_example.py
Original file line number Diff line number Diff line change
Expand Up @@ -111,7 +111,7 @@
inyrbud = inyr.get_budget()

names = ["FROM_RECHARGE"]
rowidx = np.in1d(cmdbud["name"], names)
rowidx = np.isin(cmdbud["name"], names)
colidx = "ZONE_1"

print(f"{cmdbud[rowidx][colidx][0]:,.1f} cubic meters/day")
Expand Down
2 changes: 1 addition & 1 deletion README.md
Original file line number Diff line number Diff line change
Expand Up @@ -35,7 +35,7 @@ Installation
FloPy requires **Python** 3.8+ with:

```
numpy >=1.15.0,<2.0.0
numpy >=1.20.3,<2.0.0
matplotlib >=1.4.0
pandas >=2.0.0
```
Expand Down
2 changes: 1 addition & 1 deletion autotest/test_mf6.py
Original file line number Diff line number Diff line change
Expand Up @@ -102,7 +102,7 @@ def write_head(
("kper", "i4"),
("pertim", "f8"),
("totim", "f8"),
("text", "a16"),
("text", "S16"),
("ncol", "i4"),
("nrow", "i4"),
("ilay", "i4"),
Expand Down
2 changes: 1 addition & 1 deletion autotest/test_mp6.py
Original file line number Diff line number Diff line change
Expand Up @@ -175,7 +175,7 @@ def test_get_destination_data(function_tmpdir, mp6_test_path):
np.array(well_pthld)[["k", "i", "j"]].tolist(),
dtype=starting_locs.dtype,
)
assert np.all(np.in1d(starting_locs, pathline_locs))
assert np.all(np.isin(starting_locs, pathline_locs))

# test writing a shapefile of endpoints
epd.write_shapefile(
Expand Down
14 changes: 7 additions & 7 deletions autotest/test_particledata.py
Original file line number Diff line number Diff line change
Expand Up @@ -83,7 +83,7 @@ def test_particledata_structured_ctor_with_partlocs_as_list_of_tuples():
assert isinstance(data.particledata, pd.DataFrame)
assert np.array_equal(
data.particledata.to_records(index=False),
np.core.records.fromrecords(
np.rec.fromrecords(
[
(0, 1, 1, 0.5, 0.5, 0.5, 0.0, 0),
(0, 1, 2, 0.5, 0.5, 0.5, 0.0, 0),
Expand All @@ -102,7 +102,7 @@ def test_particledata_structured_ctor_with_partlocs_as_ndarray():
assert isinstance(data.particledata, pd.DataFrame)
assert np.array_equal(
data.particledata.to_records(index=False),
np.core.records.fromrecords(
np.rec.fromrecords(
[
(0, 1, 1, 0.5, 0.5, 0.5, 0.0, 0),
(0, 1, 2, 0.5, 0.5, 0.5, 0.0, 0),
Expand All @@ -121,7 +121,7 @@ def test_particledata_unstructured_ctor_with_partlocs_as_ndarray():
assert isinstance(data.particledata, pd.DataFrame)
assert np.array_equal(
data.particledata.to_records(index=False),
np.core.records.fromrecords(
np.rec.fromrecords(
[
(0, 0.5, 0.5, 0.5, 0.0, 0),
(1, 0.5, 0.5, 0.5, 0.0, 0),
Expand All @@ -141,7 +141,7 @@ def test_particledata_unstructured_ctor_with_partlocs_as_list():
assert isinstance(data.particledata, pd.DataFrame)
assert np.array_equal(
data.particledata.to_records(index=False),
np.core.records.fromrecords(
np.rec.fromrecords(
[
(0, 0.5, 0.5, 0.5, 0.0, 0),
(1, 0.5, 0.5, 0.5, 0.0, 0),
Expand All @@ -161,7 +161,7 @@ def test_particledata_unstructured_ctor_with_partlocs_as_ndarray():
assert isinstance(data.particledata, pd.DataFrame)
assert np.array_equal(
data.particledata.to_records(index=False),
np.core.records.fromrecords(
np.rec.fromrecords(
[
(0, 0.5, 0.5, 0.5, 0.0, 0),
(1, 0.5, 0.5, 0.5, 0.0, 0),
Expand All @@ -181,7 +181,7 @@ def test_particledata_structured_ctor_with_partlocs_as_list_of_lists():
assert isinstance(data.particledata, pd.DataFrame)
assert np.array_equal(
data.particledata.to_records(index=False),
np.core.records.fromrecords(
np.rec.fromrecords(
[
(0, 1, 1, 0.5, 0.5, 0.5, 0.0, 0),
(0, 1, 2, 0.5, 0.5, 0.5, 0.0, 0),
Expand Down Expand Up @@ -212,7 +212,7 @@ def test_particledata_to_prp_dis_1():
) # each coord should be a tuple (irpt, k, i, j, x, y, z)

# expected
exp = np.core.records.fromrecords(
exp = np.rec.fromrecords(
[
(0, 1, 1, 0.5, 0.5, 0.5, 0.0, 0),
(0, 1, 2, 0.5, 0.5, 0.5, 0.0, 0),
Expand Down
4 changes: 2 additions & 2 deletions etc/environment.yml
Original file line number Diff line number Diff line change
Expand Up @@ -6,8 +6,9 @@ dependencies:

# required
- python>=3.8
- numpy>=1.15.0,<2.0.0
- numpy>=1.20.3,<2.0.0
- matplotlib>=1.4.0
- pandas>=2.0.0

# lint
- cffconvert
Expand All @@ -33,7 +34,6 @@ dependencies:
# optional
- affine
- scipy
- pandas
- netcdf4
- pyshp
- rasterio
Expand Down
4 changes: 2 additions & 2 deletions flopy/modflow/mffhb.py
Original file line number Diff line number Diff line change
Expand Up @@ -210,7 +210,7 @@ def __init__(
ds5 = ds5.to_records(index=False)
# convert numpy array to a recarray
if ds5.dtype != dtype:
ds5 = np.core.records.fromarrays(ds5.transpose(), dtype=dtype)
ds5 = np.rec.fromarrays(ds5.transpose(), dtype=dtype)

# assign dataset 5
self.ds5 = ds5
Expand All @@ -229,7 +229,7 @@ def __init__(
ds7 = ds7.to_records(index=False)
# convert numpy array to a recarray
if ds7.dtype != dtype:
ds7 = np.core.records.fromarrays(ds7.transpose(), dtype=dtype)
ds7 = np.rec.fromarrays(ds7.transpose(), dtype=dtype)

# assign dataset 7
self.ds7 = ds7
Expand Down
2 changes: 1 addition & 1 deletion flopy/modflow/mfgage.py
Original file line number Diff line number Diff line change
Expand Up @@ -131,7 +131,7 @@ def __init__(
# convert gage_data to a recarray, if necessary
if isinstance(gage_data, np.ndarray):
if not gage_data.dtype == dtype:
gage_data = np.core.records.fromarrays(
gage_data = np.rec.fromarrays(
gage_data.transpose(), dtype=dtype
)
elif isinstance(gage_data, pd.DataFrame):
Expand Down
6 changes: 2 additions & 4 deletions flopy/modflow/mfsfr2.py
Original file line number Diff line number Diff line change
Expand Up @@ -1372,9 +1372,7 @@ def get_variable_by_stress_period(self, varname):
all_data[inds, per] = self.segment_data[per][varname]
dtype.append((f"{varname}{per}", float))
isvar = all_data.sum(axis=1) != 0
ra = np.core.records.fromarrays(
all_data[isvar].transpose().copy(), dtype=dtype
)
ra = np.rec.fromarrays(all_data[isvar].transpose().copy(), dtype=dtype)
segs = self.segment_data[0].nseg[isvar]
isseg = np.array(
[True if s in segs else False for s in self.reach_data.iseg]
Expand All @@ -1387,7 +1385,7 @@ def get_variable_by_stress_period(self, varname):
return ra.view(np.recarray)

def repair_outsegs(self):
isasegment = np.in1d(
isasegment = np.isin(
self.segment_data[0].outseg, self.segment_data[0].nseg
)
isasegment = isasegment | (self.segment_data[0].outseg < 0)
Expand Down
8 changes: 2 additions & 6 deletions flopy/modflow/mfstr.py
Original file line number Diff line number Diff line change
Expand Up @@ -371,9 +371,7 @@ def __init__(
)
assert d.dtype == self.dtype, e
elif isinstance(d, np.ndarray):
d = np.core.records.fromarrays(
d.transpose(), dtype=self.dtype
)
d = np.rec.fromarrays(d.transpose(), dtype=self.dtype)
elif isinstance(d, int):
if model.verbose:
if d < 0:
Expand Down Expand Up @@ -404,9 +402,7 @@ def __init__(
)
assert d.dtype == self.dtype2, e
elif isinstance(d, np.ndarray):
d = np.core.records.fromarrays(
d.transpose(), dtype=self.dtype2
)
d = np.rec.fromarrays(d.transpose(), dtype=self.dtype2)
elif isinstance(d, int):
if model.verbose:
if d < 0:
Expand Down
6 changes: 3 additions & 3 deletions flopy/plot/plotutil.py
Original file line number Diff line number Diff line change
Expand Up @@ -2732,7 +2732,7 @@ def to_mp7_pathlines(
data = data.to_records(index=False)

# build mp7 format recarray
ret = np.core.records.fromarrays(
ret = np.rec.fromarrays(
[
data[seqn_key],
data["iprp"],
Expand Down Expand Up @@ -2841,7 +2841,7 @@ def to_mp7_endpoints(
endpts = endpts.to_records(index=False)

# build mp7 format recarray
ret = np.core.records.fromarrays(
ret = np.rec.fromarrays(
[
endpts["sequencenumber"],
endpts["iprp"],
Expand Down Expand Up @@ -2928,7 +2928,7 @@ def to_prt_pathlines(
data = data.to_records(index=False)

# build prt format recarray
ret = np.core.records.fromarrays(
ret = np.rec.fromarrays(
[
data["stressperiod"],
data["timestep"],
Expand Down
12 changes: 6 additions & 6 deletions flopy/utils/binaryfile.py
Original file line number Diff line number Diff line change
Expand Up @@ -299,7 +299,7 @@ def binaryread_struct(file, vartype, shape=(1,), charlen=16):
# find the number of bytes for one value
numbytes = vartype(1).nbytes
# find the number of values
nval = np.core.fromnumeric.prod(shape)
nval = np.prod(shape)
fmt = str(nval) + fmt
s = file.read(numbytes * nval)
result = struct.unpack(fmt, s)
Expand Down Expand Up @@ -1138,7 +1138,7 @@ def _set_precision(self, precision="single"):
h1dt = [
("kstp", "i4"),
("kper", "i4"),
("text", "a16"),
("text", "S16"),
("ncol", "i4"),
("nrow", "i4"),
("nlay", "i4"),
Expand All @@ -1161,10 +1161,10 @@ def _set_precision(self, precision="single"):
("delt", ffmt),
("pertim", ffmt),
("totim", ffmt),
("modelnam", "a16"),
("paknam", "a16"),
("modelnam2", "a16"),
("paknam2", "a16"),
("modelnam", "S16"),
("paknam", "S16"),
("modelnam2", "S16"),
("paknam2", "S16"),
]
self.header1_dtype = np.dtype(h1dt)
self.header2_dtype0 = np.dtype(h2dt0)
Expand Down
12 changes: 6 additions & 6 deletions flopy/utils/datafile.py
Original file line number Diff line number Diff line change
Expand Up @@ -43,7 +43,7 @@ def __init__(self, filetype=None, precision="single"):
("kper", "i4"),
("pertim", floattype),
("totim", floattype),
("text", "a16"),
("text", "S16"),
("ncol", "i4"),
("nrow", "i4"),
("ilay", "i4"),
Expand All @@ -56,7 +56,7 @@ def __init__(self, filetype=None, precision="single"):
("kper", "i4"),
("pertim", floattype),
("totim", floattype),
("text", "a16"),
("text", "S16"),
("ncol", "i4"),
("nrow", "i4"),
("ilay", "i4"),
Expand All @@ -69,7 +69,7 @@ def __init__(self, filetype=None, precision="single"):
("kstp", "i4"),
("kper", "i4"),
("totim", floattype),
("text", "a16"),
("text", "S16"),
("ncol", "i4"),
("nrow", "i4"),
("ilay", "i4"),
Expand All @@ -82,7 +82,7 @@ def __init__(self, filetype=None, precision="single"):
("kper", "i4"),
("pertim", floattype),
("totim", floattype),
("text", "a16"),
("text", "S16"),
("m1", "i4"),
("m2", "i4"),
("m3", "i4"),
Expand All @@ -95,7 +95,7 @@ def __init__(self, filetype=None, precision="single"):
("kper", "i4"),
("pertim", floattype),
("totim", floattype),
("text", "a16"),
("text", "S16"),
("m1", "i4"),
("m2", "i4"),
("m3", "i4"),
Expand All @@ -108,7 +108,7 @@ def __init__(self, filetype=None, precision="single"):
("kper", "i4"),
("pertim", floattype),
("totim", floattype),
("text", "a16"),
("text", "S16"),
("m1", "i4"),
("m2", "i4"),
("m3", "i4"),
Expand Down
6 changes: 3 additions & 3 deletions flopy/utils/modpathfile.py
Original file line number Diff line number Diff line change
Expand Up @@ -124,12 +124,12 @@ def intersect(
cells = t

cells = np.array(cells, dtype=raslice.dtype)
inds = np.in1d(raslice, cells)
inds = np.isin(raslice, cells)
epdest = self._data[inds].copy().view(np.recarray)

if to_recarray:
# use particle ids to get the rest of the paths
inds = np.in1d(self._data["particleid"], epdest.particleid)
inds = np.isin(self._data["particleid"], epdest.particleid)
series = self._data[inds].copy()
series.sort(order=["particleid", "time"])
series = series.view(np.recarray)
Expand Down Expand Up @@ -693,7 +693,7 @@ def get_destination_endpoint_data(self, dest_cells, source=False):
dtype = np.dtype(dtype)
dest_cells = np.array(dest_cells, dtype=dtype)

inds = np.in1d(raslice, dest_cells)
inds = np.isin(raslice, dest_cells)
return data[inds].copy().view(np.recarray)

def write_shapefile(
Expand Down
4 changes: 2 additions & 2 deletions flopy/utils/recarray_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -62,7 +62,7 @@ def ra_slice(ra, cols):
--------
>>> import numpy as np
>>> from flopy.utils import ra_slice
>>> a = np.core.records.fromrecords([("a", 1, 1.1), ("b", 2, 2.1)])
>>> a = np.rec.fromrecords([("a", 1, 1.1), ("b", 2, 2.1)])
>>> ra_slice(a, ['f0', 'f1'])
rec.array([('a', 1), ('b', 2)],
dtype=[('f0', '<U1'), ('f1', '<i4')])
Expand All @@ -75,7 +75,7 @@ def recarray(array, dtype):
Convert a list of lists or tuples to a recarray.
.. deprecated:: 3.5
Use numpy.core.records.fromrecords instead
Use numpy.rec.fromrecords instead
Parameters
----------
Expand Down
2 changes: 1 addition & 1 deletion flopy/utils/util_list.py
Original file line number Diff line number Diff line change
Expand Up @@ -419,7 +419,7 @@ def __cast_ndarray(self, kper, d):
f"dtype len: {len(self.dtype)}"
)
try:
self.__data[kper] = np.core.records.fromarrays(
self.__data[kper] = np.rec.fromarrays(
d.transpose(), dtype=self.dtype
)
except Exception as e:
Expand Down
Loading

0 comments on commit 1e44b3f

Please sign in to comment.