Skip to content

Commit

Permalink
ENH: support for sinks in csv format for RAMSES frontend
Browse files Browse the repository at this point in the history
Co-authored-by: Corentin Cadiou <contact@cphyc.me>
Co-authored-by: Clément Robert <cr52@protonmail.com>
  • Loading branch information
3 people committed Jul 25, 2023
1 parent 6a91640 commit 65ed224
Show file tree
Hide file tree
Showing 5 changed files with 192 additions and 21 deletions.
17 changes: 17 additions & 0 deletions yt/frontends/ramses/fields.py
Original file line number Diff line number Diff line change
Expand Up @@ -131,6 +131,23 @@ class RAMSESFieldInfo(FieldInfoContainer):
("particle_metallicity", ("", [], None)),
("particle_family", ("", [], None)),
("particle_tag", ("", [], None)),
# sink field parameters
("particle_mass", ("code_mass", [], None)),
("particle_angular_momentum_x", (ang_mom_units, [], None)),
("particle_angular_momentum_y", (ang_mom_units, [], None)),
("particle_angular_momentum_z", (ang_mom_units, [], None)),
("particle_formation_time", ("code_time", [], None)),
("particle_accretion_Rate", ("code_mass/code_time", [], None)),
("particle_delta_mass", ("code_mass", [], None)),
("particle_rho_gas", (rho_units, [], None)),
("particle_cs**2", (vel_units, [], None)),
("particle_etherm", (ener_units, [], None)),
("particle_velocity_x_gas", (vel_units, [], None)),
("particle_velocity_y_gas", (vel_units, [], None)),
("particle_velocity_z_gas", (vel_units, [], None)),
("particle_mass_bh", ("code_mass", [], None)),
("particle_level", ("", [], None)),
("particle_radius_star", ("code_length", [], None)),
)

known_sink_fields: KnownFieldsT = (
Expand Down
142 changes: 125 additions & 17 deletions yt/frontends/ramses/io.py
Original file line number Diff line number Diff line change
Expand Up @@ -72,17 +72,13 @@ def convert_ramses_conformal_time_to_physical_age(
return (tsim - t) * t_scale


def _ramses_particle_file_handler(fname, foffsets, data_types, subset, fields, count):
"""General file handler, called by _read_particle_subset
def _ramses_particle_binary_file_handler(particle, subset, fields, count):
"""General file handler for binary file, called by _read_particle_subset
Parameters
----------
fname : string
filename to read from
foffsets: dict
Offsets in file of the fields
data_types: dict
Data type of the fields
particle : ``ParticleFileHandler``
the particle class we want to read
subset: ``RAMSESDomainSubset``
A RAMSES domain subset object
fields: list of tuple
Expand All @@ -93,6 +89,9 @@ def _ramses_particle_file_handler(fname, foffsets, data_types, subset, fields, c
tr = {}
ds = subset.domain.ds
current_time = ds.current_time.in_units("code_time").v
foffsets = particle.field_offsets
fname = particle.fname
data_types = particle.field_types
with FortranFile(fname) as fd:
# We do *all* conversion into boxlen here.
# This means that no other conversions need to be applied to convert
Expand All @@ -118,6 +117,50 @@ def _ramses_particle_file_handler(fname, foffsets, data_types, subset, fields, c
return tr


def _ramses_particle_csv_file_handler(particle, subset, fields, count):
"""General file handler for csv file, called by _read_particle_subset
Parameters
----------
particle : ``ParticleFileHandler``
the particle class we want to read
subset: ``RAMSESDomainSubset``
A RAMSES domain subset object
fields: list of tuple
The fields to read
count: integer
The number of elements to count
"""
tr = {}
ds = subset.domain.ds
current_time = ds.current_time.in_units("code_time").v
foffsets = particle.field_offsets
fname = particle.fname

from yt.utilities.on_demand_imports import _pandas as pd

for ind, field in enumerate(sorted(fields, key=lambda a: foffsets[a])):
ind = foffsets[field]
dat = pd.read_csv(
fname, delimiter=",", usecols=[ind], skiprows=2, header=None
) # read only selected fields
tr[field] = np.array(dat[ind].to_list())

if field[1].startswith("particle_position"):
np.divide(tr[field], ds["boxlen"], tr[field])
if ds.cosmological_simulation and field[1] == "particle_birth_time":
conformal_time = tr[field]
physical_age = convert_ramses_conformal_time_to_physical_age(
ds, conformal_time
)
tr[field] = current_time - physical_age
# arbitrarily set particles with zero conformal_age to zero
# particle_age. This corresponds to DM particles.
tr[field][conformal_time == 0] = 0

return tr


class IOHandlerRAMSES(BaseIOHandler):
_dataset_type = "ramses"

Expand Down Expand Up @@ -189,12 +232,29 @@ def _read_particle_fields(self, chunks, ptf, selector):
fields = [
(ptype, fname) for ptype, field_list in ptf.items() for fname in field_list
]

for ptype, field_list in sorted(ptf.items()):
for ax in "xyz":
if pn % ax not in field_list:
fields.append((ptype, pn % ax))
for chunk in chunks:
for subset in chunk.objs:

if ptype != "sink_csv":
for chunk in chunks:
for subset in chunk.objs:
rv = self._read_particle_subset(subset, fields)
for ptype, field_list in sorted(ptf.items()):
x, y, z = (
np.asarray(rv[ptype, pn % ax], "=f8") for ax in "xyz"
)
mask = selector.select_points(x, y, z, 0.0)
if mask is None:
mask = []
for field in field_list:
data = np.asarray(rv.pop((ptype, field))[mask], "=f8")
yield (ptype, field), data

else:
subset = chunks[0].objs[0]
rv = self._read_particle_subset(subset, fields)
for ptype, field_list in sorted(ptf.items()):
x, y, z = (np.asarray(rv[ptype, pn % ax], "=f8") for ax in "xyz")
Expand All @@ -217,7 +277,6 @@ def _read_particle_subset(self, subset, fields):
ok = False
for ph in subset.domain.particle_handlers:
if ph.ptype == ptype:
fname = ph.fname
foffsets = ph.field_offsets
data_types = ph.field_types
ok = True
Expand All @@ -235,16 +294,12 @@ def _read_particle_subset(self, subset, fields):
ptype, "particle_birth_time"
]

tr.update(
_ramses_particle_file_handler(
fname, foffsets, data_types, subset, subs_fields, count=count
)
)
tr.update(ph.reader(subset, subs_fields, count))

return tr


def _read_part_file_descriptor(fname: Union[str, "os.PathLike[str]"]):
def _read_part_binary_file_descriptor(fname: Union[str, "os.PathLike[str]"]):
"""
Read a file descriptor and returns the array of the fields found.
"""
Expand Down Expand Up @@ -300,6 +355,59 @@ def _read_part_file_descriptor(fname: Union[str, "os.PathLike[str]"]):
return fields


def _read_part_csv_file_descriptor(fname: Union[str, "os.PathLike[str]"]):
"""
Read the file from the csv sink particles output.
"""
from yt.utilities.on_demand_imports import _pandas as pd

# Fields name from the default csv RAMSES sink algorithm in the yt default convention
mapping_list = [
(" # id", "particle_identifier"),
("msink", "particle_mass"),
("x", "particle_position_x"),
("y", "particle_position_y"),
("z", "particle_position_z"),
("vx", "particle_velocity_x"),
("vy", "particle_velocity_y"),
("vz", "particle_velocity_z"),
("lx", "particle_angular_momentum_x"),
("ly", "particle_angular_momentum_y"),
("lz", "particle_angular_momentum_z"),
("tform", "particle_formation_time"),
("acc_Rate", "particle_accretion_Rate"),
("del_mass", "particle_delta_mass"),
("rho_gas", "particle_rho_gas"),
("cs**2", "particle_sound_speed"),
("etherm", "particle_etherm"),
("vx_gas", "particle_velocity_x_gas"),
("vy_gas", "particle_velocity_y_gas"),
("vz_gas", "particle_velocity_z_gas"),
("mbh", "particle_mass_bh"),
("level", "particle_level"),
("rsink_star", "particle_radius_star"),
]

# Convert to dictionary
mapping = {k: v for k, v in mapping_list}

dat = pd.read_csv(
fname, delimiter=","
) # read the all file to get the number of particle
fields = []
local_particle_count = len(dat)

for varname in dat.columns:
if varname in mapping:
varname = mapping[varname]
else:
varname = f"particle_{varname}"

fields.append(varname)

return fields, local_particle_count


def _read_fluid_file_descriptor(fname: Union[str, "os.PathLike[str]"], *, prefix: str):
"""
Read a file descriptor and returns the array of the fields found.
Expand Down
46 changes: 43 additions & 3 deletions yt/frontends/ramses/particle_handlers.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,12 @@
from yt.utilities.cython_fortran_utils import FortranFile

from .field_handlers import HandlerMixin
from .io import _read_part_file_descriptor
from .io import (
_ramses_particle_binary_file_handler,
_ramses_particle_csv_file_handler,
_read_part_binary_file_descriptor,
_read_part_csv_file_descriptor,
)

PARTICLE_HANDLERS: Set[Type["ParticleFileHandler"]] = set()

Expand Down Expand Up @@ -101,6 +106,7 @@ class DefaultParticleFileHandler(ParticleFileHandler):
fname = "part_{iout:05d}.out{icpu:05d}"
file_descriptor = "part_file_descriptor.txt"
config_field = "ramses-particles"
reader = _ramses_particle_binary_file_handler

attrs = (
("ncpu", 1, "i"),
Expand Down Expand Up @@ -144,7 +150,7 @@ def read_header(self):
extra_particle_fields = self.ds._extra_particle_fields

if self.has_descriptor:
particle_fields = _read_part_file_descriptor(self.file_descriptor)
particle_fields = _read_part_binary_file_descriptor(self.file_descriptor)
else:
particle_fields = list(self.known_fields)

Expand Down Expand Up @@ -202,6 +208,7 @@ class SinkParticleFileHandler(ParticleFileHandler):
fname = "sink_{iout:05d}.out{icpu:05d}"
file_descriptor = "sink_file_descriptor.txt"
config_field = "ramses-sink-particles"
reader = _ramses_particle_binary_file_handler

attrs = (("nsink", 1, "i"), ("nindsink", 1, "i"))

Expand Down Expand Up @@ -258,7 +265,7 @@ def read_header(self):

# Read the fields + add the sink properties
if self.has_descriptor:
fields = _read_part_file_descriptor(self.file_descriptor)
fields = _read_part_binary_file_descriptor(self.file_descriptor)
else:
fields = list(self.known_fields)

Expand All @@ -282,3 +289,36 @@ def read_header(self):
self.field_offsets = field_offsets
self.field_types = _pfields
fd.close()


class SinkParticleFileHandlerCsv(ParticleFileHandler):
"""Handle sink files from a csv file, the format from the sink particule in ramses"""

ptype = "sink_csv"
fname = "sink_{iout:05d}.csv"
file_descriptor = None
config_field = "ramses-sink-particles"
reader = _ramses_particle_csv_file_handler
attrs = (("nsink", 1, "i"), ("nindsink", 1, "i"))
second_read = False

def read_header(self):
if not self.exists:
self.field_offsets = {}
self.field_types = {}
self.local_particle_count = 0
return
field_offsets = {}
_pfields = {}

fields, self.local_particle_count = _read_part_csv_file_descriptor(self.fname)

for ind, field in enumerate(fields):
field_offsets[self.ptype, field] = ind
_pfields[self.ptype, field] = "d"

self.field_offsets = field_offsets
self.field_types = _pfields

def sec_read(self):
pass
6 changes: 6 additions & 0 deletions yt/utilities/on_demand_imports.py
Original file line number Diff line number Diff line change
Expand Up @@ -485,6 +485,12 @@ def concat(self):
from pandas import concat

return concat

@safe_import
def read_csv(self):
from pandas import read_csv

return read_csv


_pandas = pandas_imports()
Expand Down

0 comments on commit 65ed224

Please sign in to comment.