diff --git a/pyproject.toml b/pyproject.toml index 92babb0e6572..0ebf4190c842 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -19,3 +19,4 @@ preview = true profile = "black" line_length = 80 skip_gitignore = true +known_first_party = ["spectre"] diff --git a/src/Visualization/Python/Plot.py b/src/Visualization/Python/Plot.py index eb05dbafd3b8..2bebaa1fdf28 100644 --- a/src/Visualization/Python/Plot.py +++ b/src/Visualization/Python/Plot.py @@ -12,6 +12,10 @@ logger = logging.getLogger(__name__) +DEFAULT_MPL_STYLESHEET = os.path.join( + os.path.dirname(__file__), "plots.mplstyle" +) + def apply_stylesheet_command(): """Add a CLI option to apply a stylesheet for plotting""" @@ -35,9 +39,7 @@ def decorator(f): @functools.wraps(f) def command(stylesheet, **kwargs): # Apply the default stylesheet and the user-provided one - stylesheets = [ - os.path.join(os.path.dirname(__file__), "plots.mplstyle") - ] + stylesheets = [DEFAULT_MPL_STYLESHEET] if stylesheet is not None: stylesheets.append(stylesheet) plt.style.use(stylesheets) diff --git a/src/Visualization/Python/PlotCce.py b/src/Visualization/Python/PlotCce.py index dba2fd941cae..c9fb37ea5760 100755 --- a/src/Visualization/Python/PlotCce.py +++ b/src/Visualization/Python/PlotCce.py @@ -34,97 +34,18 @@ def _parse_modes(ctx, param, all_modes): return result -@click.command(name="cce") -@click.argument( - "h5_filename", - type=click.Path(exists=True, file_okay=True, dir_okay=False, readable=True), - nargs=1, -) -@click.option( - "--modes", - "-m", - multiple=True, - callback=_parse_modes, - required=True, - help=( - "Which mode to plot. Specified as 'l,m' (e.g. '--modes 2,2'). Will plot" - " both real and imaginary components unless '--real' or '--imag' are" - " specified. Can be specified multiple times." - ), -) -@click.option( - "--real", - is_flag=True, - default=False, - help="Plot only real modes. Mutually exclusive with '--imag'.", -) -@click.option( - "--imag", - is_flag=True, - default=False, - help="Plot only imaginary modes. Mutually exclusive with '--real'.", -) -@click.option( - "--extraction-radius", - "-r", - type=int, - help=( - "Extraction radius of data to plot as an int. If there is only one Cce" - " subfile, that one will be used and this option does not need to be" - " specified. The expected form of the Cce subfile is 'SpectreRXXXX.cce'" - " where XXXX is the zero-padded integer extraction radius. This option" - " is ignored if the backwards compatibility option '--cce-group'/'-d'" - " is specified." - ), -) -@click.option( - "--list-extraction-radii", - "-l", - is_flag=True, - default=False, - help="List Cce subfiles in the 'h5_filename' and exit.", -) -@click.option( - "--cce-group", - "-d", - "backward_cce_group", - help=( - "Option for backwards compatibility with an old version of CCE data." - " This is the group name of the CCE data in the 'h5_filename'" - " (typically Cce). This option should only be used if your CCE data was" - " produced with a version of SpECTRE prior to this Pull Request:" - " https://github.com/sxs-collaboration/spectre/pull/5985." - ), -) -# Plotting options -@click.option( - "--x-bounds", - type=float, - nargs=2, - help="The lower and upper bounds of the x-axis.", -) -@click.option( - "--x-label", - help="The label on the x-axis.", -) -@click.option( - "--title", - "-t", - help="Title of the graph.", -) -@apply_stylesheet_command() -@show_or_save_plot_command() -def plot_cce_command( +def plot_cce( h5_filename: str, modes: Sequence[str], - real: bool, - imag: bool, - extraction_radius: Optional[int], - list_extraction_radii: bool, - backward_cce_group: Optional[str], - x_bounds: Optional[Sequence[float]], - x_label: Optional[str], - title: Optional[str], + real: bool = False, + imag: bool = False, + extraction_radius: Optional[int] = None, + list_extraction_radii: bool = False, + backward_cce_group: Optional[str] = None, + x_bounds: Optional[Sequence[float]] = None, + x_label: Optional[str] = None, + title: Optional[str] = None, + fig: Optional[plt.Figure] = None, ): """ Plot the Strain, News, and Psi0-Psi4 from the output of a SpECTRE CCE run. @@ -233,8 +154,9 @@ def plot_cce_command( data = data[(data.index >= x_bounds[0]) & (data.index <= x_bounds[1])] # Set up the plots - fig, axes = plt.subplots(len(plot_quantities), 1, sharex=True) - axes = list(axes) + if fig is None: + fig = plt.figure(figsize=(8, 2 * len(plot_quantities))) + axes = list(fig.subplots(len(plot_quantities), 1, sharex=True)) # Make the legend look a bit nicer divisor = 1 if (real or imag) else 2 num_col = min(4, len(modes) / divisor) @@ -276,9 +198,94 @@ def plot_cce_command( axes[-1].set_xlabel(x_label) # Plot needs to be fairly big - fig.set_size_inches(8, 2 * len(plot_quantities)) if title: plt.suptitle(title) + return fig + + +@click.command(name="cce", help=plot_cce.__doc__) +@click.argument( + "h5_filename", + type=click.Path(exists=True, file_okay=True, dir_okay=False, readable=True), + nargs=1, +) +@click.option( + "--modes", + "-m", + multiple=True, + callback=_parse_modes, + required=True, + help=( + "Which mode to plot. Specified as 'l,m' (e.g. '--modes 2,2'). Will plot" + " both real and imaginary components unless '--real' or '--imag' are" + " specified. Can be specified multiple times." + ), +) +@click.option( + "--real", + is_flag=True, + default=False, + help="Plot only real modes. Mutually exclusive with '--imag'.", +) +@click.option( + "--imag", + is_flag=True, + default=False, + help="Plot only imaginary modes. Mutually exclusive with '--real'.", +) +@click.option( + "--extraction-radius", + "-r", + type=int, + help=( + "Extraction radius of data to plot as an int. If there is only one Cce" + " subfile, that one will be used and this option does not need to be" + " specified. The expected form of the Cce subfile is 'SpectreRXXXX.cce'" + " where XXXX is the zero-padded integer extraction radius. This option" + " is ignored if the backwards compatibility option '--cce-group'/'-d'" + " is specified." + ), +) +@click.option( + "--list-extraction-radii", + "-l", + is_flag=True, + default=False, + help="List Cce subfiles in the 'h5_filename' and exit.", +) +@click.option( + "--cce-group", + "-d", + "backward_cce_group", + help=( + "Option for backwards compatibility with an old version of CCE data." + " This is the group name of the CCE data in the 'h5_filename'" + " (typically Cce). This option should only be used if your CCE data was" + " produced with a version of SpECTRE prior to this Pull Request:" + " https://github.com/sxs-collaboration/spectre/pull/5985." + ), +) +# Plotting options +@click.option( + "--x-bounds", + type=float, + nargs=2, + help="The lower and upper bounds of the x-axis.", +) +@click.option( + "--x-label", + help="The label on the x-axis.", +) +@click.option( + "--title", + "-t", + help="Title of the graph.", +) +@apply_stylesheet_command() +@show_or_save_plot_command() +def plot_cce_command(**kwargs): + _rich_traceback_guard = True # Hide traceback until here + plot_cce(**kwargs) if __name__ == "__main__": diff --git a/src/Visualization/Python/PlotControlSystem.py b/src/Visualization/Python/PlotControlSystem.py index 5311568238a0..9b9a4c8bce0b 100755 --- a/src/Visualization/Python/PlotControlSystem.py +++ b/src/Visualization/Python/PlotControlSystem.py @@ -22,69 +22,14 @@ logger = logging.getLogger(__name__) -@click.command(name="control-system") -@click.argument( - "reduction_files", - type=click.Path(exists=True, file_okay=True, dir_okay=False, readable=True), - nargs=-1, - required=True, -) -@click.option( - "--with-shape/--without-shape", - default=True, - show_default=True, - help="Wether or not to plot shape control.", -) -@click.option( - "--shape-l_max", - "-l", - type=int, - default=2, - show_default=True, - help=( - "The max number of spherical harmonics to show on the plot. Since" - " higher ell can have a lot of components, it may be desirable to show" - " fewer components. Never plots l=0,1 since we don't control these" - " components. Only used if '--with-shape'." - ), -) -@click.option( - "--show-all-m", - is_flag=True, - default=False, - show_default=True, - help=( - "When plotting shape control, for a given ell, plot all m components." - " Default is, for a given ell, to plot the L2 norm over all the m" - " components. Only used if '--with-shape'." - ), -) -# Plotting options -@click.option( - "--x-bounds", - type=float, - nargs=2, - help="The lower and upper bounds of the x-axis.", -) -@click.option( - "--x-label", - help="The label on the x-axis.", -) -@click.option( - "--title", - "-t", - help="Title of the graph.", -) -@apply_stylesheet_command() -@show_or_save_plot_command() -def plot_control_system_command( +def plot_control_system( reduction_files: Sequence[str], - with_shape: str, - show_all_m: bool, - shape_l_max: int, - x_bounds: Optional[Sequence[float]], - x_label: Optional[str], - title: Optional[str], + with_shape: bool = True, + show_all_m: bool = False, + shape_l_max: int = 2, + x_bounds: Optional[Sequence[float]] = None, + x_label: Optional[str] = None, + title: Optional[str] = None, ): """ Plot diagnostic information regarding all control systems except size @@ -278,6 +223,67 @@ def extract_relevant_columns( if title: fig.suptitle(title) axes[1].legend(loc="center left", bbox_to_anchor=(1.01, 1.1)) + return fig + + +@click.command(name="control-system", help=plot_control_system.__doc__) +@click.argument( + "reduction_files", + type=click.Path(exists=True, file_okay=True, dir_okay=False, readable=True), + nargs=-1, + required=True, +) +@click.option( + "--with-shape/--without-shape", + default=True, + show_default=True, + help="Wether or not to plot shape control.", +) +@click.option( + "--shape-l_max", + "-l", + type=int, + default=2, + show_default=True, + help=( + "The max number of spherical harmonics to show on the plot. Since" + " higher ell can have a lot of components, it may be desirable to show" + " fewer components. Never plots l=0,1 since we don't control these" + " components. Only used if '--with-shape'." + ), +) +@click.option( + "--show-all-m", + is_flag=True, + default=False, + show_default=True, + help=( + "When plotting shape control, for a given ell, plot all m components." + " Default is, for a given ell, to plot the L2 norm over all the m" + " components. Only used if '--with-shape'." + ), +) +# Plotting options +@click.option( + "--x-bounds", + type=float, + nargs=2, + help="The lower and upper bounds of the x-axis.", +) +@click.option( + "--x-label", + help="The label on the x-axis.", +) +@click.option( + "--title", + "-t", + help="Title of the graph.", +) +@apply_stylesheet_command() +@show_or_save_plot_command() +def plot_control_system_command(**kwargs): + _rich_traceback_guard = True # Hide traceback until here + return plot_control_system(**kwargs) if __name__ == "__main__": diff --git a/src/Visualization/Python/PlotEllipticConvergence.py b/src/Visualization/Python/PlotEllipticConvergence.py index 985df5eb3d88..13a390ad0390 100644 --- a/src/Visualization/Python/PlotEllipticConvergence.py +++ b/src/Visualization/Python/PlotEllipticConvergence.py @@ -36,7 +36,7 @@ def split_iteration_sequence(data: pd.DataFrame) -> List[pd.DataFrame]: def plot_elliptic_convergence( h5_file, - axes=None, + fig=None, linear_residuals_subfile_name="GmresResiduals.dat", nonlinear_residuals_subfile_name="NewtonRaphsonResiduals.dat", ): @@ -44,8 +44,7 @@ def plot_elliptic_convergence( Arguments: h5_file: The H5 reductions file. - axes: Optional. The matplotlib axes to plot on. Should be a tuple of two - axes, the first for the residuals and the second for the walltime. + fig: Optional. The matplotlib figure to plot in. linear_residuals_subfile_name: The name of the subfile containing the linear solver residuals. nonlinear_residuals_subfile_name: The name of the subfile containing the @@ -82,12 +81,11 @@ def plot_elliptic_convergence( else linear_residuals )[0]["Residual"].iloc[0] # Plot nonlinear solver residuals - if axes is None: - fig, (ax_residual, ax_time) = plt.subplots( - nrows=2, ncols=1, sharex=True, gridspec_kw={"height_ratios": [3, 1]} - ) - else: - ax_residual, ax_time = axes + if fig is None: + fig = plt.figure() + ax_residual, ax_time = fig.subplots( + nrows=2, ncols=1, sharex=True, gridspec_kw={"height_ratios": [3, 1]} + ) if nonlinear_residuals is not None: m = 0 for i, residuals in enumerate(nonlinear_residuals): @@ -150,7 +148,6 @@ def plot_elliptic_convergence( ) # Configure the axes - ax_residual.set_title("Elliptic solver convergence") ax_residual.set_yscale("log") ax_residual.grid() ax_residual.legend() @@ -161,6 +158,7 @@ def plot_elliptic_convergence( ax_time.set_xlabel("Cumulative linear solver iteration") # Allow only integer ticks for the x-axis ax_time.xaxis.set_major_locator(MaxNLocator(integer=True)) + return fig @click.command(name="elliptic-convergence") @@ -185,7 +183,7 @@ def plot_elliptic_convergence( def plot_elliptic_convergence_command(**kwargs): """Plot elliptic solver convergence""" _rich_traceback_guard = True # Hide traceback until here - plot_elliptic_convergence(**kwargs) + return plot_elliptic_convergence(**kwargs) if __name__ == "__main__": diff --git a/src/Visualization/Python/PlotSizeControl.py b/src/Visualization/Python/PlotSizeControl.py index eea91fdea5d9..62ec4f11ab74 100755 --- a/src/Visualization/Python/PlotSizeControl.py +++ b/src/Visualization/Python/PlotSizeControl.py @@ -22,46 +22,12 @@ logger = logging.getLogger(__name__) -@click.command(name="size-control") -@click.argument( - "reduction_files", - type=click.Path(exists=True, file_okay=True, dir_okay=False, readable=True), - nargs=-1, - required=True, -) -@click.option( - "--object-label", - "-d", - required=True, - help=( - "Which object to plot. This is either 'A', 'B', or 'None'. 'None' is" - " used when there is only one black hole in the simulation." - ), -) -# Plotting options -@click.option( - "--x-bounds", - type=float, - nargs=2, - help="The lower and upper bounds of the x-axis.", -) -@click.option( - "--x-label", - help="The label on the x-axis.", -) -@click.option( - "--title", - "-t", - help="Title of the graph.", -) -@apply_stylesheet_command() -@show_or_save_plot_command() -def plot_size_control_command( +def plot_size_control( reduction_files: Sequence[str], object_label: str, - x_bounds: Optional[Sequence[float]], - x_label: Optional[str], - title: Optional[str], + x_bounds: Optional[Sequence[float]] = None, + x_label: Optional[str] = None, + title: Optional[str] = None, ): """ Plot diagnostic information regarding the Size control system. @@ -224,6 +190,46 @@ def check_diagnostics_file(h5_filename): ) else: axes[i].legend(loc="center left", bbox_to_anchor=(1, 0.5)) + return fig + + +@click.command(name="size-control", help=plot_size_control.__doc__) +@click.argument( + "reduction_files", + type=click.Path(exists=True, file_okay=True, dir_okay=False, readable=True), + nargs=-1, + required=True, +) +@click.option( + "--object-label", + "-d", + required=True, + help=( + "Which object to plot. This is either 'A', 'B', or 'None'. 'None' is" + " used when there is only one black hole in the simulation." + ), +) +# Plotting options +@click.option( + "--x-bounds", + type=float, + nargs=2, + help="The lower and upper bounds of the x-axis.", +) +@click.option( + "--x-label", + help="The label on the x-axis.", +) +@click.option( + "--title", + "-t", + help="Title of the graph.", +) +@apply_stylesheet_command() +@show_or_save_plot_command() +def plot_size_control_command(**kwargs): + _rich_traceback_guard = True # Hide traceback until here + return plot_size_control(**kwargs) if __name__ == "__main__": diff --git a/src/Visualization/Python/PlotTrajectories.py b/src/Visualization/Python/PlotTrajectories.py index 669d159d73b3..e6d824aac3cd 100644 --- a/src/Visualization/Python/PlotTrajectories.py +++ b/src/Visualization/Python/PlotTrajectories.py @@ -17,7 +17,11 @@ ) -def import_A_and_B(filenames, subfile_name_aha, subfile_name_ahb): +def import_A_and_B( + filenames, + subfile_name_aha="ApparentHorizons/ControlSystemAhA_Centers.dat", + subfile_name_ahb="ApparentHorizons/ControlSystemAhB_Centers.dat", +): A_data = [] B_data = [] @@ -25,12 +29,12 @@ def import_A_and_B(filenames, subfile_name_aha, subfile_name_ahb): with h5py.File(filename, "r") as file: A_data.append( np.array( - file[subfile_name_aha][:, [4, 5, 6]] + file[subfile_name_aha][:, [0, 4, 5, 6]] ) # 0 ->time, 4 -> x, 5 -> y, 6 -> z ) B_data.append( np.array( - file[subfile_name_ahb][:, [4, 5, 6]] + file[subfile_name_ahb][:, [0, 4, 5, 6]] ) # 0 ->time, 4 -> x, 5 -> y, 6 -> z ) @@ -54,10 +58,10 @@ def plot_trajectory(AhA: np.ndarray, AhB: np.ndarray, fig=None): value of 15, to speed up the plots and avoid memory error. Arguments: - AhA: Array of shape (num_points, 3) with the coordinates of - the first object. - AhB: Array of shape (num_points, 3) with the coordinates of - the second object. + AhA: Array of shape (num_points, 4) with the time and coordinates of + the first object. + AhB: Array of shape (num_points, 4) with the time and coordinates of + the second object. fig: Matplotlib figure object used for plotting. Subplots will be added to this figure. If None, a new figure will be created. """ @@ -66,10 +70,10 @@ def plot_trajectory(AhA: np.ndarray, AhB: np.ndarray, fig=None): # Plot 3D trajectories ax1 = fig.add_subplot(2, 2, 1, projection="3d") - ax1.plot(*AhA.T, color="C0", label="AhA") - ax1.scatter(*AhA[-1], color="C0") - ax1.plot(*AhB.T, color="C1", label="AhB") - ax1.scatter(*AhB[-1], color="C1") + ax1.plot(*AhA[:, 1:].T, color="C0", label="AhA") + ax1.scatter(*AhA[-1, 1:], color="C0") + ax1.plot(*AhB[:, 1:].T, color="C1", label="AhB") + ax1.scatter(*AhB[-1, 1:], color="C1") ax1.set_xlabel("X") ax1.set_ylabel("Y") ax1.set_zlabel("Z") @@ -77,7 +81,7 @@ def plot_trajectory(AhA: np.ndarray, AhB: np.ndarray, fig=None): ax1.legend() # Calculate coordinate separation in 3D - separation_3d = AhA - AhB + separation_3d = AhA[:, 1:] - AhB[:, 1:] # Plot 3D coordinate separation ax2 = fig.add_subplot(2, 2, 2, projection="3d") @@ -90,10 +94,10 @@ def plot_trajectory(AhA: np.ndarray, AhB: np.ndarray, fig=None): # Plot 2D trajectories ax3 = fig.add_subplot(2, 2, 3) - ax3.plot(*AhA[:, 0:2].T, label="AhA", color="C0") - ax3.scatter(*AhA[-1, 0:2], color="C0") - ax3.plot(*AhB[:, 0:2].T, label="AhB", color="C1") - ax3.scatter(*AhB[-1, 0:2], color="C1") + ax3.plot(*AhA[:, 1:3].T, label="AhA", color="C0") + ax3.scatter(*AhA[-1, 1:3], color="C0") + ax3.plot(*AhB[:, 1:3].T, label="AhB", color="C1") + ax3.scatter(*AhB[-1, 1:3], color="C1") ax3.set_xlabel("x") ax3.set_ylabel("y") ax3.legend() @@ -102,7 +106,7 @@ def plot_trajectory(AhA: np.ndarray, AhB: np.ndarray, fig=None): ax3.grid(True) # Add gridlines # Calculate coordinate separation in 2D - separation_2d = AhA[:, 0:2] - AhB[:, 0:2] + separation_2d = AhA[:, 1:3] - AhB[:, 1:3] # Plot coordinate separation in 2D ax4 = fig.add_subplot(2, 2, 4) ax4.plot(*separation_2d.T, color="black") @@ -114,6 +118,7 @@ def plot_trajectory(AhA: np.ndarray, AhB: np.ndarray, fig=None): ax4.grid(True) # Add gridlines plt.subplots_adjust(wspace=0.3, hspace=0.3) + return fig @click.command(name="trajectories") @@ -169,7 +174,7 @@ def plot_trajectories_command( """ AhA, AhB = import_A_and_B(h5_files, subfile_name_aha, subfile_name_ahb) fig = plt.figure(figsize=figsize) - plot_trajectory(AhA[::sample_rate], AhB[::sample_rate], fig) + return plot_trajectory(AhA[::sample_rate], AhB[::sample_rate], fig) if __name__ == "__main__": diff --git a/src/Visualization/Python/Render3D/Domain.py b/src/Visualization/Python/Render3D/Domain.py index 559c51035163..7a0481f78dae 100644 --- a/src/Visualization/Python/Render3D/Domain.py +++ b/src/Visualization/Python/Render3D/Domain.py @@ -1,6 +1,8 @@ # Distributed under the MIT License. # See LICENSE.txt for details. +from typing import Optional + import click import numpy as np import rich @@ -16,89 +18,18 @@ def _parse_step(ctx, param, value): return int(value) -@click.command(name="domain") -@click.argument( - "xmf_file", - type=click.Path(exists=True, file_okay=True, dir_okay=False, readable=True), -) -@click.argument( - "hi_res_xmf_file", - type=click.Path(exists=True, file_okay=True, dir_okay=False, readable=True), - required=False, -) -@click.option( - "--output", - "-o", - type=click.Path(writable=True), - required=True, - help="Output file. Include extension such as '.png'.", -) -@click.option( - "--time-step", - "-t", - callback=_parse_step, - default="first", - show_default=True, - help=( - "Select a time step. Specify '-1' or 'last' to select the last time" - " step." - ), -) -@click.option( - "--animate", is_flag=True, help="Produce an animation of all time steps." -) -@click.option("zoom_factor", "--zoom", help="Zoom factor.", default=1.0) -@click.option( - "--camera-theta", - type=float, - default=0.0, - help="Viewing angle from the z-axis in degrees.", - show_default=True, -) -@click.option( - "--camera-phi", - type=float, - default=0.0, - help="Viewing angle around the z-axis in degrees.", - show_default=True, -) -@click.option( - "--clip-origin", - "--slice-origin", - nargs=3, - type=float, - default=(0.0, 0.0, 0.0), - help="Origin of the clipping plane", - show_default=True, -) -@click.option( - "--clip-normal", - "--slice-normal", - nargs=3, - type=float, - default=(0.0, 0.0, 1.0), - help="Normal of the clipping plane", - show_default=True, -) -@click.option( - "--slice/--clip", - "slice", - default=False, - help="Use a slice instead of a clip.", - show_default=True, -) -def render_domain_command( - xmf_file, - hi_res_xmf_file, - output, - time_step, - animate, - zoom_factor, - camera_theta, - camera_phi, - clip_origin, - clip_normal, - slice, +def render_domain( + xmf_file: str, + output: str, + hi_res_xmf_file: Optional[str] = None, + time_step: int = 0, + animate: bool = False, + zoom_factor: float = 1.0, + camera_theta: float = 0.0, + camera_phi: float = 0.0, + clip_origin: tuple[float, float, float] = (0.0, 0.0, 0.0), + clip_normal: tuple[float, float, float] = (0.0, 0.0, 1.0), + slice: bool = False, ): """Renders a 3D domain with elements and grid lines @@ -185,8 +116,85 @@ def slice_or_clip(triangulate, **kwargs): pv.SaveAnimation(output, render_view) else: render_view.ViewTime = volume_data.TimestepValues[time_step] + pv.Render() pv.SaveScreenshot(output, render_view) +@click.command(name="domain", help=render_domain.__doc__) +@click.argument( + "xmf_file", + type=click.Path(exists=True, file_okay=True, dir_okay=False, readable=True), +) +@click.argument( + "hi_res_xmf_file", + type=click.Path(exists=True, file_okay=True, dir_okay=False, readable=True), + required=False, +) +@click.option( + "--output", + "-o", + type=click.Path(writable=True), + required=True, + help="Output file. Include extension such as '.png'.", +) +@click.option( + "--time-step", + "-t", + callback=_parse_step, + default="first", + show_default=True, + help=( + "Select a time step. Specify '-1' or 'last' to select the last time" + " step." + ), +) +@click.option( + "--animate", is_flag=True, help="Produce an animation of all time steps." +) +@click.option("zoom_factor", "--zoom", help="Zoom factor.", default=1.0) +@click.option( + "--camera-theta", + type=float, + default=0.0, + help="Viewing angle from the z-axis in degrees.", + show_default=True, +) +@click.option( + "--camera-phi", + type=float, + default=0.0, + help="Viewing angle around the z-axis in degrees.", + show_default=True, +) +@click.option( + "--clip-origin", + "--slice-origin", + nargs=3, + type=float, + default=(0.0, 0.0, 0.0), + help="Origin of the clipping plane", + show_default=True, +) +@click.option( + "--clip-normal", + "--slice-normal", + nargs=3, + type=float, + default=(0.0, 0.0, 1.0), + help="Normal of the clipping plane", + show_default=True, +) +@click.option( + "--slice/--clip", + "slice", + default=False, + help="Use a slice instead of a clip.", + show_default=True, +) +def render_domain_command(**kwargs): + _rich_traceback_guard = True # Hide traceback until here + render_domain(**kwargs) + + if __name__ == "__main__": render_domain_command(help_option_names=["-h", "--help"]) diff --git a/support/Pipelines/EccentricityControl/EccentricityControl.py b/support/Pipelines/EccentricityControl/EccentricityControl.py index cb7cab68886c..636b73c10b92 100644 --- a/support/Pipelines/EccentricityControl/EccentricityControl.py +++ b/support/Pipelines/EccentricityControl/EccentricityControl.py @@ -120,10 +120,7 @@ def compute_separation(h5_file, subfile_name_aha, subfile_name_ahb): ) # Compute separation - num_obs = len(ObjectA_centers[:, 0]) - - if len(ObjectB_centers[:, 0]) < num_obs: - num_obs = len(ObjectB_centers[:, 0]) + num_obs = min(len(ObjectA_centers[:, 0]), len(ObjectB_centers[:, 0])) # Separation vector separation_vec = ( @@ -132,7 +129,7 @@ def compute_separation(h5_file, subfile_name_aha, subfile_name_ahb): # Compute separation norm separation_norm = np.zeros((num_obs, 2)) - separation_norm[:, 0] = ObjectA_centers[:, 0] + separation_norm[:, 0] = ObjectA_centers[:num_obs, 0] separation_norm[:, 1] = np.linalg.norm(separation_vec, axis=1) return separation_norm @@ -221,16 +218,15 @@ def compute_coord_sep_updates( def coordinate_separation_eccentricity_control_digest( - h5_file, x, y, data, functions, output=None + h5_file, x, y, data, functions, fig=None ): - """Print and output for eccentricity control""" + """Print and plot for eccentricity control""" - if output is not None: + if fig is not None: traw = data[:, 0] sraw = data[:, 1] # Plot coordinate separation - fig, axes = plt.subplots(2, 2) - ((ax1, ax2), (ax3, ax4)) = axes + ((ax1, ax2), (ax3, ax4)) = fig.subplots(2, 2) fig.suptitle( h5_file, color="b", @@ -256,7 +252,6 @@ def coordinate_separation_eccentricity_control_digest( f" {rms:4.3g} ====" ) - style = "--" F = func["function"] eccentricity = func["fit result"]["eccentricity"] @@ -300,13 +295,12 @@ def coordinate_separation_eccentricity_control_digest( ) # Plot - if output is not None: + if fig is not None: errfunc = lambda p, x, y: F(p, x) - y # Plot dD/dt ax1.plot( x, F(p, x), - style, label=( f"{expression:s} \n rms = {rms:2.1e}, ecc =" f" {eccentricity:4.5f}" @@ -315,18 +309,11 @@ def coordinate_separation_eccentricity_control_digest( ax_handles, ax_labels = ax1.get_legend_handles_labels() # Plot residual - ax3.plot(x, errfunc(p, x, y), style, label=expression) + ax3.plot(x, errfunc(p, x, y), label=expression) ax3.set_title("Residual") ax4.legend(ax_handles, ax_labels) - plt.tight_layout() - - if output is not None: - plt.savefig(output, format="pdf") - - return - def coordinate_separation_eccentricity_control( h5_file, @@ -336,7 +323,7 @@ def coordinate_separation_eccentricity_control( tmax, angular_velocity_from_xcts, expansion_from_xcts, - output=None, + fig: plt.Figure = None, ): """Compute updates based on fits to the coordinate separation for manual eccentricity control @@ -357,8 +344,8 @@ def coordinate_separation_eccentricity_control( The updates are computed using Newtonian estimates. See ArXiv:gr-qc/0702106 and ArXiv:0710.0158 for more details. - A summary is printed to screen and if an output file is provided, a plot - is generated. The latter is useful to decide between the updates of + A summary is printed to screen and if a matplotlib figure is provided, a + plot is generated. The latter is useful to decide between the updates of different models (look for small residuals at early times). See OmegaDoEccRemoval.py in SpEC for improved eccentricity control. @@ -497,7 +484,7 @@ def coordinate_separation_eccentricity_control( y=dsdt, data=data, functions=functions, - output=output, + fig=fig, ) return functions @@ -553,13 +540,8 @@ def coordinate_separation_eccentricity_control( nargs=1, help="Value of the expansion velocity (adot) used in the Xcts file.", ) -@click.option( - "--output", - "-o", - type=click.Path(file_okay=True, dir_okay=False, writable=True), - help="Name of the output plot file in pdf.", -) @apply_stylesheet_command() +@show_or_save_plot_command() def eccentricity_control_command( h5_file, subfile_name_aha, @@ -568,7 +550,6 @@ def eccentricity_control_command( tmax, angular_velocity_from_xcts, expansion_from_xcts, - output, ): """Compute updates based on fits to the coordinate separation for manual eccentricity control @@ -603,6 +584,7 @@ def eccentricity_control_command( See OmegaDoEccRemoval.py in SpEC for improved eccentricity control. """ + fig = plt.figure() functions = coordinate_separation_eccentricity_control( h5_file=h5_file, subfile_name_aha=subfile_name_aha, @@ -611,7 +593,6 @@ def eccentricity_control_command( tmax=tmax, angular_velocity_from_xcts=angular_velocity_from_xcts, expansion_from_xcts=expansion_from_xcts, - output=output, + fig=fig, ) - - return + return fig diff --git a/tests/support/Pipelines/EccentricityControl/Test_EccentricityControl.py b/tests/support/Pipelines/EccentricityControl/Test_EccentricityControl.py index 7d7fa375d5b2..3464f1ffd2ac 100644 --- a/tests/support/Pipelines/EccentricityControl/Test_EccentricityControl.py +++ b/tests/support/Pipelines/EccentricityControl/Test_EccentricityControl.py @@ -186,7 +186,7 @@ def test_output_parameters(self): tmax=1200.0, angular_velocity_from_xcts=mock_angular_velocity_from_xcts, expansion_from_xcts=mock_expansion_from_xcts, - output=None, + fig=None, ) for name, func in functions.items(): diff --git a/tools/Status/Dashboard.py b/tools/Status/Dashboard.py new file mode 100644 index 000000000000..49d2fc252bfd --- /dev/null +++ b/tools/Status/Dashboard.py @@ -0,0 +1,193 @@ +# Distributed under the MIT License. +# See LICENSE.txt for details. + +"""Experimental dashboard for monitoring simulations. + +This script provides a [Streamlit](https://streamlit.io) dashboard for +monitoring simulations. It uses the 'spectre.tools.Status' module to fetch +information about the jobs and then displays lots of plots and metrics that are +updated in real time. + +To use this experimental feature, first make sure you have the following Python +packages installed in your environment in addition to the regular Python +dependencies: + +```sh +pip install streamlit streamlit-autorefresh plotly +``` + +To spin up the dashboard, run the following command: + +```sh +$BUILD_DIR/bin/python-spectre -m streamlit \ + run $SPECTRE_HOME/tools/Status/Dashboard.py +``` + +The dashboard will be available at the following URL by default: + +- Dashboard: http://localhost:8501 + +You can forward the port through your SSH connection to open the dashboard in +your local browser. Note that VSCode forwards ports automatically and also +provides a you with a simple browser to view the dashboard within VSCode if you +prefer. + +See the [Streamlit docs](https://docs.streamlit.io/develop/api-reference/cli/run) +for details. +""" + +import logging +from functools import partial +from pathlib import Path + +import matplotlib.pyplot as plt +import pandas as pd +import streamlit as st +import yaml +from streamlit_autorefresh import st_autorefresh + +from spectre.tools.Status.Status import ( + DEFAULT_COLUMNS, + fetch_status, + match_executable_status, +) +from spectre.Visualization.Plot import DEFAULT_MPL_STYLESHEET + +logger = logging.getLogger(__name__) + +plt.style.use(DEFAULT_MPL_STYLESHEET) + +STATE_ICONS = { + "RUNNING": ":material/arrow_forward:", + "COMPLETED": ":material/check_circle:", + "PENDING": ":material/pending:", + "FAILED": ":material/block:", + "TIMEOUT": ":material/hourglass_disabled:", + "CANCELLED": ":material/cancel:", +} + + +def _render_page(job): + st.header(job["JobName"]) + + # Print run directory + run_dir = Path(job["WorkDir"]) + st.write(run_dir) + + # Print README + readme_files = [ + run_dir / "README.md", + ] + if job["SegmentsDir"]: + readme_files += [ + Path(job["SegmentsDir"]) / "README.md", + Path(job["SegmentsDir"]) / "../README.md", + ] + for readme_file in readme_files[::-1]: + if readme_file.exists(): + st.markdown(readme_file.read_text()) + + # Organize into tabs + status_tab, input_file_tab, outfile_tab = st.tabs( + ["Status", Path(job["InputFile"]).name, "spectre.out"] + ) + + # Print input file + with input_file_tab: + st.code( + Path(job["InputFile"]).read_text(), + language="yaml", + line_numbers=True, + ) + + # Print outfile + with outfile_tab: + outfile = run_dir / "spectre.out" + if outfile.exists(): + # Show only the last 50 lines because showing the full file can be + # slow. We could show a button to load more lines. + with open(outfile, "r") as open_outfile: + st.code("".join(["...\n"] + open_outfile.readlines()[-50:])) + else: + st.write("No spectre.out file found.") + + # Render job status + with status_tab: + columns = list(DEFAULT_COLUMNS) + columns.remove("User") + columns.remove("JobName") + st.table(pd.DataFrame([job[columns]]).set_index("JobID")) + + # Render status metrics + executable_status = match_executable_status(job["ExecutableName"]) + with open(job["InputFile"], "r") as open_input_file: + _, input_file = yaml.safe_load_all(open_input_file) + status = executable_status.status(input_file, job["WorkDir"]) + for (field, unit), col in zip( + executable_status.fields.items(), + st.columns(len(executable_status.fields)), + ): + col.metric( + (field + f" [{unit}]") if unit else field, + ( + executable_status.format(field, status[field]) + if field in status + else "-" + ), + ) + + # Executable-specific dashboard + executable_status.render_dashboard(job, input_file) + + +# Fetch the job data +job_data = fetch_status( + user=None, + allusers=st.sidebar.toggle("Show all users", False), + starttime=st.sidebar.text_input("Start time", "now-1day"), +) +if len(job_data) > 0: + job_data.sort_values("JobID", inplace=True, ascending=False) + job_data.dropna(subset=["WorkDir", "ExecutableName"], inplace=True) + +# Refresh the page regularly +if st.sidebar.toggle("Auto-refresh", True): + refresh_interval = st.sidebar.number_input( + "Refresh interval [s]", min_value=1, value=30, format="%d" + ) + count = st_autorefresh(interval=refresh_interval * 1000) + +# Each job gets its own page. Jobs are grouped by user. +pages = {} +for username, user_data in job_data.groupby("User"): + pages_user = [] + for _, job in user_data.iterrows(): + # Get a somewhat descriptive title for the page. This can be improved. + # We probably want to make the job name more descriptive and show that + # here. + display_dir = ( + Path(job["SegmentsDir"]) + if job["SegmentsDir"] + else Path(job["WorkDir"]) + ) + title = ( + job["JobName"] + if job["JobName"] != job["ExecutableName"] + else (display_dir.resolve().parent.name + " / " + display_dir.name) + ) + pages_user.append( + st.Page( + partial(_render_page, job=job), + title=f"{title} ({job['JobID']})", + icon=STATE_ICONS.get(job["State"]), + url_path=f"/{job['JobID']}", + ) + ) + pages[username] = pages_user + +# Set up navigation +if len(pages) > 0: + page = st.navigation(pages) + page.run() +else: + st.write("No jobs found.") diff --git a/tools/Status/ExecutableStatus/CMakeLists.txt b/tools/Status/ExecutableStatus/CMakeLists.txt index 88c243848159..cc7215b2f4ad 100644 --- a/tools/Status/ExecutableStatus/CMakeLists.txt +++ b/tools/Status/ExecutableStatus/CMakeLists.txt @@ -6,6 +6,7 @@ spectre_python_add_module( MODULE_PATH tools/Status PYTHON_FILES __init__.py + CharacteristicExtract.py EvolveGhBinaryBlackHole.py EvolveGhSingleBlackHole.py ExecutableStatus.py diff --git a/tools/Status/ExecutableStatus/CharacteristicExtract.py b/tools/Status/ExecutableStatus/CharacteristicExtract.py new file mode 100644 index 000000000000..263c7842c7ac --- /dev/null +++ b/tools/Status/ExecutableStatus/CharacteristicExtract.py @@ -0,0 +1,64 @@ +# Distributed under the MIT License. +# See LICENSE.txt for details. + +import logging +from pathlib import Path + +import h5py +import matplotlib.pyplot as plt +import numpy as np +import pandas as pd + +from spectre.Visualization.ReadH5 import to_dataframe + +from .ExecutableStatus import ExecutableStatus + +logger = logging.getLogger(__name__) + + +class CharacteristicExtract(ExecutableStatus): + executable_name_patterns = [r"^CharacteristicExtract"] + fields = { + "Time": "M", + } + + def status(self, input_file, work_dir): + try: + open_reductions_file = h5py.File( + Path(work_dir) + / (input_file["Observers"]["ReductionFileName"] + ".h5"), + "r", + ) + except: + logger.debug("Unable to open reductions file.", exc_info=True) + return {} + with open_reductions_file: + try: + time_steps = to_dataframe( + open_reductions_file["Cce/CceTimeStep.dat"] + ) + except: + logger.debug("Unable to read CCE time steps.", exc_info=True) + return {} + return { + "Time": time_steps.iloc[-1]["Time"], + } + + def format(self, field, value): + if field in ["Time", "Speed"]: + return f"{value:g}" + raise ValueError + + def render_dashboard(self, job: dict, input_file: dict): + import plotly.express as px + import streamlit as st + + from spectre.Visualization.PlotCce import plot_cce + + fig = plot_cce( + Path(job["WorkDir"]) + / (input_file["Observers"]["ReductionFileName"] + ".h5"), + modes=["Real Y_2,2", "Imag Y_2,2"], + x_label="Time [M]", + ) + st.pyplot(fig) diff --git a/tools/Status/ExecutableStatus/EvolveGhBinaryBlackHole.py b/tools/Status/ExecutableStatus/EvolveGhBinaryBlackHole.py index e0d43d02974b..ff052fc439b2 100644 --- a/tools/Status/ExecutableStatus/EvolveGhBinaryBlackHole.py +++ b/tools/Status/ExecutableStatus/EvolveGhBinaryBlackHole.py @@ -1,15 +1,18 @@ # Distributed under the MIT License. # See LICENSE.txt for details. +import glob import logging import os +from pathlib import Path import h5py import numpy as np +import pandas as pd from spectre.Visualization.ReadH5 import to_dataframe -from .ExecutableStatus import EvolutionStatus +from .ExecutableStatus import EvolutionStatus, list_reduction_files logger = logging.getLogger(__name__) @@ -94,3 +97,194 @@ def format(self, field, value): elif field == "3-Index Constraint": return f"{value:.2e}" return super().format(field, value) + + def render_dashboard(self, job: dict, input_file: dict): + import matplotlib.pyplot as plt + import plotly.express as px + import streamlit as st + + from spectre.Pipelines.EccentricityControl.EccentricityControl import ( + coordinate_separation_eccentricity_control, + ) + from spectre.Visualization.GenerateXdmf import generate_xdmf + from spectre.Visualization.PlotControlSystem import plot_control_system + from spectre.Visualization.PlotSizeControl import plot_size_control + from spectre.Visualization.PlotTrajectories import ( + import_A_and_B, + plot_trajectory, + ) + + run_dir = Path(job["WorkDir"]) + reduction_files = list_reduction_files(job=job, input_file=input_file) + + # Common horizon + with h5py.File(run_dir / reduction_files[-1], "r") as open_h5file: + ahc_subfile = open_h5file.get("ObservationAhC.dat") + if ahc_subfile is not None: + ahc_data = to_dataframe(ahc_subfile).iloc[0] + st.success( + "Found a common horizon with mass" + f" {ahc_data['ChristodoulouMass']:g} and spin" + f" {ahc_data['DimensionlessSpinMagnitude']:g} at time" + f" {ahc_data['Time']:g} M." + ) + + # Plot style + legend_layout = dict( + title=None, + orientation="h", + yanchor="bottom", + y=1, + xanchor="left", + x=0, + ) + + # Constraints + st.subheader("Constraints") + + def get_constraints_data(reductions_file): + with h5py.File(reductions_file, "r") as open_h5file: + return to_dataframe(open_h5file["Norms.dat"]).set_index("Time") + + constraints = pd.concat(map(get_constraints_data, reduction_files)) + constraints.sort_index(inplace=True) + constraints = constraints[ + [col for col in constraints.columns if "Constraint" in col] + ] + fig = px.line(constraints.iloc[1:], log_y=True) + fig.update_layout(xaxis_title="Time [M]", legend=legend_layout) + fig.update_yaxes(exponentformat="e", title=None) + st.plotly_chart(fig) + + # Horizon parameters + st.subheader("Horizon parameters") + + def get_horizons_data(reductions_file): + with h5py.File(reductions_file, "r") as open_h5file: + horizons_data = [] + for ab in "AB": + ah_subfile = open_h5file.get(f"ObservationAh{ab}.dat") + if ah_subfile is not None: + horizons_data.append( + to_dataframe(ah_subfile) + .set_index("Time") + .add_prefix(f"Ah{ab} ") + ) + if not horizons_data: + return None + return pd.concat(horizons_data, axis=1) + + horizon_params = pd.concat(map(get_horizons_data, reduction_files)) + for label, name, col in zip( + ["AhA mass", "AhB mass", "AhA spin", "AhB spin"], + [ + "AhA ChristodoulouMass", + "AhB ChristodoulouMass", + "AhA DimensionlessSpinMagnitude", + "AhB DimensionlessSpinMagnitude", + ], + st.columns(4), + ): + col.metric(label, f"{horizon_params.iloc[-1][name]:.4g}") + fig = px.line( + np.abs(horizon_params.iloc[1:] - horizon_params.iloc[0]), + log_y=True, + ) + fig.update_layout(xaxis_title="Time [M]", legend=legend_layout) + fig.update_yaxes( + exponentformat="e", title="Difference to initial values" + ) + st.plotly_chart(fig) + + # Time steps + super().render_time_steps(input_file, reduction_files) + + # Trajectories + st.subheader("Trajectories") + traj_A, traj_B = import_A_and_B(reduction_files) + st.pyplot(plot_trajectory(traj_A, traj_B)) + coord_separation = np.linalg.norm(traj_A[:, 1:] - traj_B[:, 1:], axis=1) + fig = px.line( + x=traj_A[:, 0], + y=coord_separation, + labels={"y": "Coordinate separation [M]"}, + ) + fig.update_layout(xaxis_title="Time [M]", legend=legend_layout) + fig.update_yaxes(title=None) + st.plotly_chart(fig) + + # Grid + st.subheader("Grid") + + @st.experimental_fragment + def render_grid(): + if st.button("Render grid"): + from spectre.Visualization.Render3D.Domain import render_domain + + volume_files = glob.glob(str(run_dir / "BbhVolume*.h5")) + generate_xdmf( + volume_files, + output=str(run_dir / "Bbh.xmf"), + subfile_name="VolumeData", + ) + render_domain( + str(run_dir / "Bbh.xmf"), + output=str(run_dir / "domain.png"), + slice=True, + zoom_factor=50.0, + time_step=-1, + ) + if (run_dir / "domain.png").exists(): + st.image(str(run_dir / "domain.png")) + + render_grid() + + # Control systems + st.subheader("Control systems") + + @st.experimental_fragment + def render_control_systems(): + if st.checkbox("Show control systems"): + st.pyplot( + plot_control_system( + reduction_files, + with_shape=st.checkbox("With shape", True), + show_all_m=st.checkbox("Show all m", False), + shape_l_max=st.number_input( + "Shape l max", value=2, min_value=0 + ), + ) + ) + with st.expander("Size control A", expanded=False): + st.pyplot(plot_size_control(reduction_files, "A")) + with st.expander("Size control B", expanded=False): + st.pyplot(plot_size_control(reduction_files, "B")) + + render_control_systems() + + # Eccentricity + st.subheader("Eccentricity") + + @st.experimental_fragment + def render_eccentricity(): + if st.checkbox("Show eccentricity"): + col_tmin, col_tmax = st.columns(2) + fig = plt.figure(figsize=(10, 8), layout="tight") + ecc_control_result = coordinate_separation_eccentricity_control( + reduction_files[0], + "ApparentHorizons/ControlSystemAhA_Centers.dat", + "ApparentHorizons/ControlSystemAhB_Centers.dat", + tmin=col_tmin.number_input("tmin", value=600, min_value=0), + tmax=col_tmax.number_input("tmax", value=2000, min_value=0), + angular_velocity_from_xcts=None, + expansion_from_xcts=None, + fig=fig, + )["H4"]["fit result"] + st.pyplot(fig) + st.metric( + "Eccentricity", + f"{ecc_control_result['eccentricity']:e}", + ) + st.write(ecc_control_result) + + render_eccentricity() diff --git a/tools/Status/ExecutableStatus/ExecutableStatus.py b/tools/Status/ExecutableStatus/ExecutableStatus.py index 2dcb0986e74b..2a6dd354c403 100644 --- a/tools/Status/ExecutableStatus/ExecutableStatus.py +++ b/tools/Status/ExecutableStatus/ExecutableStatus.py @@ -3,18 +3,32 @@ import logging import os +from pathlib import Path from typing import Any, Dict, List, Optional import h5py +import matplotlib.pyplot as plt import numpy as np import pandas as pd +from spectre.support.DirectoryStructure import list_segments from spectre.Visualization.ReadH5 import to_dataframe from spectre.Visualization.ReadInputFile import find_event logger = logging.getLogger(__name__) +def list_reduction_files(job: dict, input_file: dict): + reductions_file_name = input_file["Observers"]["ReductionFileName"] + ".h5" + if job["SegmentsDir"]: + return [ + segment_dir.path / reductions_file_name + for segment_dir in list_segments(job["SegmentsDir"]) + ] + else: + return [Path(job["WorkDir"]) / reductions_file_name] + + class ExecutableStatus: """Base class for providing status information for SpECTRE executables. @@ -66,6 +80,25 @@ def format(self, field: str, value: Any) -> str: """ raise NotImplementedError + def render_dashboard(self, job: dict, input_file: dict): + """Render a dashboard for the job (experimental). + + Arguments: + job: A dictionary of job information, including the input file and + working directory. See the 'spectre.tools.Status.fetch_status' + function for details. + input_file: The input file read in as a dictionary. + + This method can be overridden by subclasses to provide a custom + dashboard for the job. The default implementation does nothing. + """ + import streamlit as st + + st.warning( + "No dashboard available for this executable. Add an implementation" + " to the 'ExecutableStatus' subclass." + ) + class EvolutionStatus(ExecutableStatus): """An 'ExecutableStatus' subclass that matches all evolution executables. @@ -171,6 +204,71 @@ def format(self, field, value): return f"{value:g}" raise ValueError + def render_time_steps(self, input_file: dict, reduction_files: list): + import plotly.express as px + import streamlit as st + + observe_time_event = find_event("ObserveTimeStep", input_file) + if observe_time_event is None: + st.warning("No 'ObserveTimeStep' event found in input file.") + return + subfile_name = observe_time_event["SubfileName"] + ".dat" + logger.debug(f"Reading time steps from subfile: '{subfile_name}'") + + def get_time_steps(reductions_file): + with h5py.File(reductions_file, "r") as open_h5file: + return to_dataframe(open_h5file[subfile_name]).set_index("Time") + + # Plot style + legend_layout = dict( + title=None, + orientation="h", + yanchor="bottom", + y=1, + xanchor="left", + x=0, + ) + + st.subheader("Time steps") + time_steps = pd.concat(map(get_time_steps, reduction_files)) + fig = px.line( + time_steps, + y=[ + "Effective time step", + "Minimum time step", + "Maximum time step", + "Slab size", + ], + log_y=True, + ) + fig.update_layout( + legend=legend_layout, + xaxis=dict(title="Time [M]"), + yaxis=dict(exponentformat="e", title=None), + ) + st.plotly_chart(fig) + run_speed = ( + ( + time_steps.index.diff() / time_steps["Maximum Walltime"].diff() + + time_steps.index.diff() + / time_steps["Minimum Walltime"].diff() + ) + / 2 + * 3600 + ) + fig = px.line(run_speed.rolling(30, min_periods=1).mean()) + fig.update_layout( + xaxis_title="Time [M]", + yaxis_title="Simulation speed [M/h]", + showlegend=False, + ) + st.plotly_chart(fig) + + def render_dashboard(self, job: dict, input_file: dict): + return self.render_time_steps( + input_file, list_reduction_files(job, input_file) + ) + class EllipticStatus(ExecutableStatus): """An 'ExecutableStatus' subclass that matches all elliptic executables. @@ -238,3 +336,23 @@ def format(self, field, value): elif field in ["Residual"]: return f"{value:.1e}" raise ValueError + + def render_solver_convergence(self, job: dict, input_file: dict): + import plotly.express as px + import streamlit as st + + from spectre.Visualization.PlotEllipticConvergence import ( + plot_elliptic_convergence, + ) + + st.subheader("Elliptic solver convergence") + fig = plt.figure(figsize=(8, 6), layout="tight") + plot_elliptic_convergence( + Path(job["WorkDir"]) + / (input_file["Observers"]["ReductionFileName"] + ".h5"), + fig=fig, + ) + st.pyplot(fig) + + def render_dashboard(self, job: dict, input_file: dict): + return self.render_solver_convergence(job, input_file) diff --git a/tools/Status/ExecutableStatus/SolveXcts.py b/tools/Status/ExecutableStatus/SolveXcts.py index abc0411f1abc..b68e08101fe6 100644 --- a/tools/Status/ExecutableStatus/SolveXcts.py +++ b/tools/Status/ExecutableStatus/SolveXcts.py @@ -3,8 +3,12 @@ import logging import os +from pathlib import Path import h5py +import matplotlib.pyplot as plt +import numpy as np +import pandas as pd from .ExecutableStatus import EllipticStatus @@ -49,3 +53,58 @@ def format(self, field, value): return super().format("Iteration", value) elif "residual" in field: return super().format("Residual", value) + + def render_dashboard(self, job: dict, input_file: dict): + import plotly.express as px + import streamlit as st + + super().render_solver_convergence(job, input_file) + + # Parameter control + control_outfile = Path(job["WorkDir"]) / "ControlParamsData.txt" + if control_outfile.exists(): + st.subheader("Parameter control") + control_data = np.loadtxt(control_outfile, delimiter=",") + df = pd.DataFrame( + control_data, + columns=[ + "Iteration", + "Mass A", + "Mass B", + "Spin A x", + "Spin A y", + "Spin A z", + "Spin B x", + "Spin B y", + "Spin B z", + "Residual Mass A", + "Residual Mass B", + "Residual Spin A x", + "Residual Spin A y", + "Residual Spin A z", + "Residual Spin B x", + "Residual Spin B y", + "Residual Spin B z", + ], + ).set_index("Iteration") + # Print current params + params = df.iloc[-1] + mass_A, mass_B = params["Mass A"], params["Mass B"] + spin_A, spin_B = [ + np.linalg.norm(params[[f"Spin {AB} {xyz}" for xyz in "xyz"]]) + for AB in "AB" + ] + for label, val, col in zip( + ["Mass A", "Mass B", "Spin A", "Spin B"], + [mass_A, mass_B, spin_A, spin_B], + st.columns(4), + ): + col.metric(label, f"{val:.4g}") + # Plot convergence + fig = px.line( + np.abs(df[[col for col in df.columns if "Residual" in col]]), + log_y=True, + markers=True, + ) + fig.update_yaxes(exponentformat="e", title=None) + st.plotly_chart(fig) diff --git a/tools/Status/ExecutableStatus/__init__.py b/tools/Status/ExecutableStatus/__init__.py index 3c8fa6eb0b08..1f1e33985fbc 100644 --- a/tools/Status/ExecutableStatus/__init__.py +++ b/tools/Status/ExecutableStatus/__init__.py @@ -4,6 +4,7 @@ import logging import re +from .CharacteristicExtract import CharacteristicExtract from .EvolveGhBinaryBlackHole import EvolveGhBinaryBlackHole from .EvolveGhSingleBlackHole import EvolveGhSingleBlackHole from .ExecutableStatus import EllipticStatus, EvolutionStatus, ExecutableStatus @@ -13,6 +14,7 @@ # Subclasses are matched in this order. Add new subclasses here. executable_status_subclasses = [ + CharacteristicExtract, EvolveGhBinaryBlackHole, EvolveGhSingleBlackHole, EvolutionStatus, diff --git a/tools/Status/Status.py b/tools/Status/Status.py index ac4096cc160a..df3e57e499b5 100644 --- a/tools/Status/Status.py +++ b/tools/Status/Status.py @@ -197,7 +197,7 @@ def _state_order(state): return None -def _format(field: str, value: Any, state_styles: dict) -> str: +def _format(field: str, value: Any, state_styles: dict = {}) -> str: # The SLURM field "NodeList" returns "None assigned" if a job is pending if pd.isnull(value) or value == "None assigned": return "-" @@ -264,16 +264,27 @@ def input_column_callback(ctx, param, value): return result -@rich.console.group() -def render_status( - show_paths, - show_unidentified, - show_deleted, - show_all_segments, - state_styles, - columns, +def fetch_status( + show_deleted=False, + show_all_segments=False, **kwargs, ): + """Fetches job data and processes it for display. + + See 'fetch_job_data' for how jobs are fetched. This function processes the + data to make it more human-readable and filters it, e.g. related to deleted + jobs and segments. + + Arguments: + show_deleted: Show jobs that ran in directories that are now deleted. + (default: False) + show_all_segments: Show all segments as individual jobs instead of just + the latest. (default: False) + kwargs: Arguments to pass to 'fetch_job_data'. + + Returns: Pandas DataFrame with the job data sorted by JobID (most recently + submitted job first). + """ columns_to_fetch = list(AVAILABLE_COLUMNS) columns_to_fetch.remove("SegmentId") @@ -284,7 +295,7 @@ def render_status( # Do nothing if job list is empty if len(job_data) == 0: - return + return job_data # Remove deleted jobs if not show_deleted: @@ -361,6 +372,23 @@ def render_status( # Add metadata so jobs can be grouped by state job_data["StateOrder"] = job_data["State"].apply(_state_order) + return job_data + + +@rich.console.group() +def render_status( + show_paths, + show_unidentified, + state_styles, + columns, + **kwargs, +): + job_data = fetch_status(**kwargs) + + # Do nothing if job list is empty + if len(job_data) == 0: + return + # Remove the user column unless we're specifying all users if not kwargs["allusers"]: if "User" in columns: