diff --git a/docs/changelog.rst b/docs/changelog.rst index f322a9fb..d500a182 100644 --- a/docs/changelog.rst +++ b/docs/changelog.rst @@ -36,6 +36,16 @@ New Features Enhancements ++++++++++++ +- (536b) ``v1.AtomicResult.convert_v`` learned a ``external_input_data`` option to inject that field (if known) rather than using incomplete reconstruction from the v1 Result. may not be the final sol'n. +- (536b) ``v2.FailedOperation`` gained schema_name and schema_version=2. +- (536b) ``v2.AtomicResult`` no longer inherits from ``v2.AtomicInput``. It gained a ``input_data`` field for the corresponding ``AtomicInput`` and independent ``id`` and ``molecule`` fields (the latter being equivalvent to ``v1.AtomicResult.molecule`` with the frame of the results; ``v2.AtomicResult.input_data.molecule`` is new, preserving the input frame). Gained independent ``extras`` +- (536b) Both v1/v2 ``AtomicResult.convert_v()`` learned to handle the new ``input_data`` layout. +- (:pr:`357`, :issue:`536`) ``v2.AtomicResult``, ``v2.OptimizationResult``, and ``v2.TorsionDriveResult`` have the ``success`` field enforced to ``True``. Previously it could be set T/F. Now validation errors if not T. Likewise ``v2.FailedOperation.success`` is enforced to ``False``. +- (:pr:`357`, :issue:`536`) ``v2.AtomicResult``, ``v2.OptimizationResult``, and ``v2.TorsionDriveResult`` have the ``error`` field removed. This isn't used now that ``success=True`` and failure should be routed to ``FailedOperation``. +- (:pr:`357`) ``v1.Molecule`` had its schema_version changed to a Literal[2] (remember Mol is one-ahead of general numbering scheme) so new instances will be 2 even if another value is passed in. Ditto ``v2.BasisSet.schema_version=2``. Ditto ``v1.BasisSet.schema_version=1`` Ditto ``v1.QCInputSpecification.schema_version=1`` and ``v1.OptimizationSpecification.schema_version=1``. +- (:pr:`357`) ``v2.AtomicResultProperties``, ``v2.QCInputSpecification``, ``v2.OptimizationSpecification`` lost its schema_version until we see if its really needed. +- (:pr:`357`) ``v2.OptimizationSpecification`` gained extras field +- (:pr:`357`) ``v1.FailedOperation.extras`` and ``v2.FailedOperation.extras`` default changed from None to {} * Fix a lot of warnings originating in this project. * `Molecule.extras` now defaults to `{}` rather than None in both v1 and v2. Input None converts to {} upon instantiation. * ``v2.FailedOperation`` field `id` is becoming `Optional[str]` instead of plain `str` so that the default validates. diff --git a/qcelemental/models/types.py b/qcelemental/models/types.py index 346ec5ae..9c53f736 100644 --- a/qcelemental/models/types.py +++ b/qcelemental/models/types.py @@ -10,6 +10,6 @@ DeprecationWarning, ) -# Array = qcelemental.models.v1.Array +Array = qcelemental.models.v1.Array # ArrayMeta = qcelemental.models.v1.ArrayMeta # TypedArray = qcelemental.models.v1.TypedArray diff --git a/qcelemental/models/v1/__init__.py b/qcelemental/models/v1/__init__.py index 0ba964b6..4271ee5a 100644 --- a/qcelemental/models/v1/__init__.py +++ b/qcelemental/models/v1/__init__.py @@ -1,4 +1,4 @@ -from . import types +from . import types # ever used? from .align import AlignmentMill from .basemodels import AutodocBaseSettings # remove when QCFractal merges `next` from .basemodels import ProtoModel @@ -8,6 +8,7 @@ from .procedures import Optimization # scheduled for removal from .procedures import ( OptimizationInput, + OptimizationProtocols, OptimizationResult, OptimizationSpecification, QCInputSpecification, @@ -18,7 +19,8 @@ from .results import Result # scheduled for removal from .results import ResultInput # scheduled for removal from .results import ResultProperties # scheduled for removal -from .results import AtomicInput, AtomicResult, AtomicResultProperties, AtomicResultProtocols +from .results import AtomicInput, AtomicResult, AtomicResultProperties, AtomicResultProtocols, WavefunctionProperties +from .types import Array def qcschema_models(): diff --git a/qcelemental/models/v1/basis.py b/qcelemental/models/v1/basis.py index c7d1c4b8..eb834bac 100644 --- a/qcelemental/models/v1/basis.py +++ b/qcelemental/models/v1/basis.py @@ -1,6 +1,12 @@ from enum import Enum from typing import Dict, List, Optional +try: + from typing import Literal +except ImportError: + # remove when minimum py38 + from typing_extensions import Literal + from pydantic.v1 import ConstrainedInt, Field, constr, validator from ...exceptions import ValidationError @@ -155,7 +161,7 @@ class BasisSet(ProtoModel): "qcschema_basis", description=(f"The QCSchema specification to which this model conforms. Explicitly fixed as qcschema_basis."), ) - schema_version: int = Field( # type: ignore + schema_version: Literal[1] = Field( # type: ignore 1, description="The version number of :attr:`~qcelemental.models.BasisSet.schema_name` to which this model conforms.", ) @@ -175,6 +181,10 @@ class Config(ProtoModel.Config): def schema_extra(schema, model): schema["$schema"] = qcschema_draft + @validator("schema_version", pre=True) + def _version_stamp(cls, v): + return 1 + @validator("atom_map") def _check_atom_map(cls, v, values): sv = set(v) diff --git a/qcelemental/models/v1/common_models.py b/qcelemental/models/v1/common_models.py index 04ce48d0..c20900dc 100644 --- a/qcelemental/models/v1/common_models.py +++ b/qcelemental/models/v1/common_models.py @@ -120,7 +120,7 @@ class FailedOperation(ProtoModel): ":class:`ComputeError` for more details.", ) extras: Optional[Dict[str, Any]] = Field( # type: ignore - None, + {}, description="Additional information to bundle with the failed operation. Details which pertain specifically " "to a thrown error should be contained in the `error` field. See :class:`ComputeError` for details.", ) @@ -139,6 +139,7 @@ def convert_v( dself = self.dict() if version == 2: + # TODO if FailedOp gets a schema_version, add a validator self_vN = qcel.models.v2.FailedOperation(**dself) return self_vN diff --git a/qcelemental/models/v1/molecule.py b/qcelemental/models/v1/molecule.py index bc873adb..05c9fd3b 100644 --- a/qcelemental/models/v1/molecule.py +++ b/qcelemental/models/v1/molecule.py @@ -9,6 +9,12 @@ from pathlib import Path from typing import TYPE_CHECKING, Any, Dict, Iterable, List, Optional, Tuple, Union, cast +try: + from typing import Literal +except ImportError: + # remove when minimum py38 + from typing_extensions import Literal + import numpy as np from pydantic.v1 import ConstrainedFloat, ConstrainedInt, Field, constr, validator @@ -119,7 +125,7 @@ class Molecule(ProtoModel): f"The QCSchema specification to which this model conforms. Explicitly fixed as {qcschema_molecule_default}." ), ) - schema_version: int = Field( # type: ignore + schema_version: Literal[2] = Field( # type: ignore 2, description="The version number of :attr:`~qcelemental.models.Molecule.schema_name` to which this model conforms.", ) @@ -370,6 +376,12 @@ def __init__(self, orient: bool = False, validate: Optional[bool] = None, **kwar elif validate or geometry_prep: values["geometry"] = float_prep(values["geometry"], geometry_noise) + @validator("schema_version", pre=True) + def _version_stamp(cls, v): + # seemingly unneeded, this lets conver_v re-label the model w/o discarding model and + # submodel version fields first. + return 2 + @validator("geometry") def _must_be_3n(cls, v, values, **kwargs): n = len(values["symbols"]) diff --git a/qcelemental/models/v1/procedures.py b/qcelemental/models/v1/procedures.py index 7b9717bc..7a7eaa76 100644 --- a/qcelemental/models/v1/procedures.py +++ b/qcelemental/models/v1/procedures.py @@ -60,7 +60,7 @@ class QCInputSpecification(ProtoModel): """ schema_name: constr(strip_whitespace=True, regex=qcschema_input_default) = qcschema_input_default # type: ignore - schema_version: int = 1 # TODO + schema_version: Literal[1] = 1 driver: DriverEnum = Field(DriverEnum.gradient, description=str(DriverEnum.__doc__)) model: Model = Field(..., description=str(Model.__doc__)) @@ -71,6 +71,10 @@ class QCInputSpecification(ProtoModel): description="Additional information to bundle with the computation. Use for schema development and scratch space.", ) + @validator("schema_version", pre=True) + def _version_stamp(cls, v): + return 1 + class OptimizationInput(ProtoModel): id: Optional[str] = None @@ -110,6 +114,7 @@ def convert_v( dself = self.dict() if version == 2: + dself["input_specification"].pop("schema_version", None) self_vN = qcel.models.v2.OptimizationInput(**dself) return self_vN @@ -171,8 +176,15 @@ def convert_v( if check_convertible_version(version, error="OptimizationResult") == "self": return self + trajectory_class = self.trajectory[0].__class__ dself = self.dict() if version == 2: + # remove harmless empty error field that v2 won't accept. if populated, pydantic will catch it. + dself.pop("error", None) + + dself["trajectory"] = [trajectory_class(**atres).convert_v(version) for atres in dself["trajectory"]] + dself["input_specification"].pop("schema_version", None) + self_vN = qcel.models.v2.OptimizationResult(**dself) return self_vN @@ -189,12 +201,16 @@ class OptimizationSpecification(ProtoModel): """ schema_name: constr(strip_whitespace=True, regex="qcschema_optimization_specification") = "qcschema_optimization_specification" # type: ignore - schema_version: int = 1 # TODO + schema_version: Literal[1] = 1 procedure: str = Field(..., description="Optimization procedure to run the optimization with.") keywords: Dict[str, Any] = Field({}, description="The optimization specific keywords to be used.") protocols: OptimizationProtocols = Field(OptimizationProtocols(), description=str(OptimizationProtocols.__doc__)) + @validator("schema_version", pre=True) + def _version_stamp(cls, v): + return 1 + @validator("procedure") def _check_procedure(cls, v): return v.lower() @@ -282,6 +298,9 @@ def convert_v( dself = self.dict() if version == 2: + dself["input_specification"].pop("schema_version", None) + dself["optimization_spec"].pop("schema_version", None) + self_vN = qcel.models.v2.TorsionDriveInput(**dself) return self_vN @@ -332,8 +351,19 @@ def convert_v( if check_convertible_version(version, error="TorsionDriveResult") == "self": return self + opthist_class = next(iter(self.optimization_history.values()))[0].__class__ dself = self.dict() if version == 2: + # remove harmless empty error field that v2 won't accept. if populated, pydantic will catch it. + dself.pop("error", None) + + dself["input_specification"].pop("schema_version", None) + dself["optimization_spec"].pop("schema_version", None) + dself["optimization_history"] = { + k: [opthist_class(**res).convert_v(version) for res in lst] + for k, lst in dself["optimization_history"].items() + } + self_vN = qcel.models.v2.TorsionDriveResult(**dself) return self_vN diff --git a/qcelemental/models/v1/results.py b/qcelemental/models/v1/results.py index 386ff2cb..f43a0103 100644 --- a/qcelemental/models/v1/results.py +++ b/qcelemental/models/v1/results.py @@ -797,9 +797,29 @@ def _native_file_protocol(cls, value, values): return ret def convert_v( - self, version: int + self, + version: int, + *, + external_input_data: Optional[Any] = None, ) -> Union["qcelemental.models.v1.AtomicResult", "qcelemental.models.v2.AtomicResult"]: - """Convert to instance of particular QCSchema version.""" + """Convert to instance of particular QCSchema version. + + Parameters + ---------- + version + The version to convert to. + external_input_data + Since self contains data merged from input, this allows passing in the original input, particularly for `molecule` and `extras` fields. + Can be model or dictionary and should be *already* converted to the desired version. + Replaces ``input_data`` field entirely (not merges with extracts from self) and w/o consistency checking. + + Returns + ------- + AtomicResult + Returns self (not a copy) if ``version`` already satisfied. + Returns a new AtomicResult of ``version`` otherwise. + + """ import qcelemental as qcel if check_convertible_version(version, error="AtomicResult") == "self": @@ -807,6 +827,29 @@ def convert_v( dself = self.dict() if version == 2: + # remove harmless empty error field that v2 won't accept. if populated, pydantic will catch it. + dself.pop("error", None) + + input_data = { + k: dself.pop(k) for k in list(dself.keys()) if k in ["driver", "keywords", "model", "protocols"] + } + input_data["molecule"] = dself["molecule"] # duplicate since input mol has been overwritten + # any input provenance has been overwritten + input_data["extras"] = { + k: dself["extras"].pop(k) for k in list(dself["extras"].keys()) if k in [] + } # sep any merged extras + if external_input_data: + # Note: overwriting with external, not updating. reconsider? + dself["input_data"] = external_input_data + in_extras = ( + external_input_data.get("extras", {}) + if isinstance(external_input_data, dict) + else external_input_data.extras + ) + dself["extras"] = {k: v for k, v in dself["extras"].items() if (k, v) not in in_extras.items()} + else: + dself["input_data"] = input_data + self_vN = qcel.models.v2.AtomicResult(**dself) return self_vN diff --git a/qcelemental/models/v2/__init__.py b/qcelemental/models/v2/__init__.py index 9c0e74bd..c500f840 100644 --- a/qcelemental/models/v2/__init__.py +++ b/qcelemental/models/v2/__init__.py @@ -4,8 +4,17 @@ from .basis import BasisSet from .common_models import ComputeError, DriverEnum, FailedOperation, Model, Provenance from .molecule import Molecule -from .procedures import OptimizationInput, OptimizationResult, TorsionDriveInput, TorsionDriveResult -from .results import AtomicInput, AtomicResult, AtomicResultProperties, AtomicResultProtocols +from .procedures import ( + OptimizationInput, + OptimizationProtocols, + OptimizationResult, + OptimizationSpecification, + QCInputSpecification, + TDKeywords, + TorsionDriveInput, + TorsionDriveResult, +) +from .results import AtomicInput, AtomicResult, AtomicResultProperties, AtomicResultProtocols, WavefunctionProperties def qcschema_models(): diff --git a/qcelemental/models/v2/basis.py b/qcelemental/models/v2/basis.py index 54ff278f..1c91dc97 100644 --- a/qcelemental/models/v2/basis.py +++ b/qcelemental/models/v2/basis.py @@ -1,6 +1,12 @@ from enum import Enum from typing import Dict, List, Optional +try: + from typing import Literal +except ImportError: + # remove when minimum py38 + from typing_extensions import Literal + from pydantic import Field, constr, field_validator from typing_extensions import Annotated @@ -171,7 +177,7 @@ class BasisSet(ProtoModel): "qcschema_basis", description=f"The QCSchema specification to which this model conforms. Explicitly fixed as qcschema_basis.", ) - schema_version: int = Field( # type: ignore + schema_version: Literal[2] = Field( # type: ignore 2, description="The version number of :attr:`~qcelemental.models.BasisSet.schema_name` " "to which this model conforms.", @@ -245,3 +251,7 @@ def _calculate_nbf(cls, atom_map, center_data) -> int: ret += center_count[center] return ret + + @field_validator("schema_version", mode="before") + def _version_stamp(cls, v): + return 2 diff --git a/qcelemental/models/v2/common_models.py b/qcelemental/models/v2/common_models.py index 04a94cc8..8bcac02f 100644 --- a/qcelemental/models/v2/common_models.py +++ b/qcelemental/models/v2/common_models.py @@ -1,8 +1,14 @@ from enum import Enum from typing import TYPE_CHECKING, Any, Dict, Optional, Sequence, Tuple, Union +try: + from typing import Literal +except ImportError: + # remove when minimum py38 + from typing_extensions import Literal + import numpy as np -from pydantic import Field +from pydantic import Field, field_validator from .basemodels import ProtoModel, qcschema_draft from .basis import BasisSet @@ -95,6 +101,16 @@ class FailedOperation(ProtoModel): and containing the reason and input data which generated the failure. """ + schema_name: Literal["qcschema_failed_operation"] = Field( + "qcschema_failed_operation", + description=( + f"The QCSchema specification this model conforms to. Explicitly fixed as qcschema_failed_operation." + ), + ) + schema_version: Literal[2] = Field( + 2, + description="The version number of :attr:`~qcelemental.models.FailedOperation.schema_name` to which this model conforms.", + ) id: Optional[str] = Field( # type: ignore None, description="A unique identifier which links this FailedOperation, often of the same Id of the operation " @@ -106,7 +122,7 @@ class FailedOperation(ProtoModel): description="The input data which was passed in that generated this failure. This should be the complete " "input which when attempted to be run, caused the operation to fail.", ) - success: bool = Field( # type: ignore + success: Literal[False] = Field( # type: ignore False, description="A boolean indicator that the operation failed consistent with the model of successful operations. " "Should always be False. Allows programmatic assessment of all operations regardless of if they failed or " @@ -118,7 +134,7 @@ class FailedOperation(ProtoModel): ":class:`ComputeError` for more details.", ) extras: Optional[Dict[str, Any]] = Field( # type: ignore - None, + {}, description="Additional information to bundle with the failed operation. Details which pertain specifically " "to a thrown error should be contained in the `error` field. See :class:`ComputeError` for details.", ) @@ -126,6 +142,10 @@ class FailedOperation(ProtoModel): def __repr_args__(self) -> "ReprArgs": return [("error", self.error)] + @field_validator("schema_version", mode="before") + def _version_stamp(cls, v): + return 2 + def convert_v( self, version: int ) -> Union["qcelemental.models.v1.FailedOperation", "qcelemental.models.v2.FailedOperation"]: @@ -137,6 +157,9 @@ def convert_v( dself = self.model_dump() if version == 1: + dself.pop("schema_name") + dself.pop("schema_version") + self_vN = qcel.models.v1.FailedOperation(**dself) return self_vN diff --git a/qcelemental/models/v2/molecule.py b/qcelemental/models/v2/molecule.py index 5dc3c58f..3de67b05 100644 --- a/qcelemental/models/v2/molecule.py +++ b/qcelemental/models/v2/molecule.py @@ -126,7 +126,7 @@ class Molecule(ProtoModel): ), ) schema_version: int = Field( # type: ignore - 2, + 2, # TODO Turn to Literal[3] = Field(3) description="The version number of :attr:`~qcelemental.models.Molecule.schema_name` to which this model conforms.", ) validated: bool = Field( # type: ignore @@ -402,6 +402,12 @@ def __init__(self, orient: bool = False, validate: Optional[bool] = None, **kwar elif validate or geometry_prep: values["geometry"] = float_prep(values["geometry"], geometry_noise) + @field_validator("schema_version", mode="before") + def _version_stamp(cls, v): + # seemingly unneeded, this lets conver_v re-label the model w/o discarding model and + # submodel version fields first. + return 2 # TODO 3 + @field_validator("geometry") @classmethod def _must_be_3n(cls, v, info): diff --git a/qcelemental/models/v2/procedures.py b/qcelemental/models/v2/procedures.py index 9f0ae956..9e6d942a 100644 --- a/qcelemental/models/v2/procedures.py +++ b/qcelemental/models/v2/procedures.py @@ -59,7 +59,7 @@ class QCInputSpecification(ProtoModel): """ schema_name: constr(strip_whitespace=True, pattern=qcschema_input_default) = qcschema_input_default # type: ignore - schema_version: int = 1 # TODO + # TRIAL schema_version: int = 1 # TODO driver: DriverEnum = Field(DriverEnum.gradient, description=str(DriverEnum.__doc__)) model: Model = Field(..., description=str(Model.__doc__)) @@ -111,6 +111,7 @@ def convert_v( dself = self.model_dump() if version == 1: + dself["input_specification"].pop("schema_version", None) self_vN = qcel.models.v1.OptimizationInput(**dself) return self_vN @@ -133,10 +134,9 @@ class OptimizationResult(OptimizationInput): stdout: Optional[str] = Field(None, description="The standard output of the program.") stderr: Optional[str] = Field(None, description="The standard error of the program.") - success: bool = Field( - ..., description="The success of a given programs execution. If False, other fields may be blank." + success: Literal[True] = Field( + True, description="The success of a given programs execution. If False, other fields may be blank." ) - error: Optional[ComputeError] = Field(None, description=str(ComputeError.__doc__)) provenance: Provenance = Field(..., description=str(Provenance.__doc__)) @field_validator("trajectory") @@ -175,8 +175,12 @@ def convert_v( if check_convertible_version(version, error="OptimizationResult") == "self": return self + trajectory_class = self.trajectory[0].__class__ dself = self.model_dump() if version == 1: + dself["trajectory"] = [trajectory_class(**atres).convert_v(version) for atres in dself["trajectory"]] + dself["input_specification"].pop("schema_version", None) + self_vN = qcel.models.v1.OptimizationResult(**dself) return self_vN @@ -195,11 +199,15 @@ class OptimizationSpecification(ProtoModel): schema_name: constr( strip_whitespace=True, pattern="qcschema_optimization_specification" ) = "qcschema_optimization_specification" # type: ignore - schema_version: int = 1 # TODO + # TRIAL schema_version: int = 1 # TODO procedure: str = Field(..., description="Optimization procedure to run the optimization with.") keywords: Dict[str, Any] = Field({}, description="The optimization specific keywords to be used.") protocols: OptimizationProtocols = Field(OptimizationProtocols(), description=str(OptimizationProtocols.__doc__)) + extras: Dict[str, Any] = Field( + {}, + description="Additional information to bundle with the computation. Use for schema development and scratch space.", + ) @field_validator("procedure") @classmethod @@ -292,6 +300,9 @@ def convert_v( dself = self.model_dump() if version == 1: + if dself["optimization_spec"].pop("extras", None): + pass + self_vN = qcel.models.v1.TorsionDriveInput(**dself) return self_vN @@ -325,10 +336,9 @@ class TorsionDriveResult(TorsionDriveInput): stdout: Optional[str] = Field(None, description="The standard output of the program.") stderr: Optional[str] = Field(None, description="The standard error of the program.") - success: bool = Field( - ..., description="The success of a given programs execution. If False, other fields may be blank." + success: Literal[True] = Field( + True, description="The success of a given programs execution. If False, other fields may be blank." ) - error: Optional[ComputeError] = Field(None, description=str(ComputeError.__doc__)) provenance: Provenance = Field(..., description=str(Provenance.__doc__)) @field_validator("schema_version", mode="before") @@ -344,8 +354,17 @@ def convert_v( if check_convertible_version(version, error="TorsionDriveResult") == "self": return self + opthist_class = next(iter(self.optimization_history.values()))[0].__class__ dself = self.model_dump() if version == 1: + if dself["optimization_spec"].pop("extras", None): + pass + + dself["optimization_history"] = { + k: [opthist_class(**res).convert_v(version) for res in lst] + for k, lst in dself["optimization_history"].items() + } + self_vN = qcel.models.v1.TorsionDriveResult(**dself) return self_vN diff --git a/qcelemental/models/v2/results.py b/qcelemental/models/v2/results.py index fe11258d..1aab40dc 100644 --- a/qcelemental/models/v2/results.py +++ b/qcelemental/models/v2/results.py @@ -46,10 +46,10 @@ class AtomicResultProperties(ProtoModel): f"The QCSchema specification this model conforms to. Explicitly fixed as qcschema_atomicproperties." ), ) - schema_version: Literal[2] = Field( - 2, - description="The version number of :attr:`~qcelemental.models.AtomicResultProperties.schema_name` to which this model conforms.", - ) + # TRIAL schema_version: Literal[2] = Field( + # TRIAL 2, + # TRIAL description="The version number of :attr:`~qcelemental.models.AtomicResultProperties.schema_name` to which this model conforms.", + # TRIAL ) # Calcinfo calcinfo_nbasis: Optional[int] = Field(None, description="The number of basis functions for the computation.") @@ -311,9 +311,9 @@ def _validate_derivs(cls, v, info): raise ValueError(f"Derivative must be castable to shape {shape}!") return v - @field_validator("schema_version", mode="before") - def _version_stamp(cls, v): - return 2 + # TRIAL @field_validator("schema_version", mode="before") + # TRIAL def _version_stamp(cls, v): + # TRIAL return 2 def dict(self, *args, **kwargs): # pure-json dict repr for QCFractal compliance, see https://github.com/MolSSI/QCFractal/issues/579 @@ -723,7 +723,7 @@ def convert_v( return self_vN -class AtomicResult(AtomicInput): +class AtomicResult(ProtoModel): r"""Results from a CMS program execution.""" schema_name: constr(strip_whitespace=True, pattern=r"^(qc\_?schema_output)$") = Field( # type: ignore @@ -736,6 +736,9 @@ class AtomicResult(AtomicInput): 2, description="The version number of :attr:`~qcelemental.models.AtomicResult.schema_name` to which this model conforms.", ) + id: Optional[str] = Field(None, description="The optional ID for the computation.") + input_data: AtomicInput = Field(..., description=str(AtomicInput.__doc__)) + molecule: Molecule = Field(..., description="The molecule with frame and orientation of the results.") properties: AtomicResultProperties = Field(..., description=str(AtomicResultProperties.__doc__)) wavefunction: Optional[WavefunctionProperties] = Field(None, description=str(WavefunctionProperties.__doc__)) @@ -751,9 +754,14 @@ class AtomicResult(AtomicInput): stderr: Optional[str] = Field(None, description="The standard error of the program execution.") native_files: Dict[str, Any] = Field({}, description="DSL files.") - success: bool = Field(..., description="The success of program execution. If False, other fields may be blank.") - error: Optional[ComputeError] = Field(None, description=str(ComputeError.__doc__)) + success: Literal[True] = Field( + True, description="The success of program execution. If False, other fields may be blank." + ) provenance: Provenance = Field(..., description=str(Provenance.__doc__)) + extras: Dict[str, Any] = Field( + {}, + description="Additional information to bundle with the computation. Use for schema development and scratch space.", + ) @field_validator("schema_name", mode="before") @classmethod @@ -773,12 +781,17 @@ def _version_stamp(cls, v): @field_validator("return_result") @classmethod def _validate_return_result(cls, v, info): - if info.data["driver"] == "energy": + print(info) + # Do not propagate validation errors + if "input_data" not in info.data: + raise ValueError("Input_data was not properly formed.") + driver = info.data["input_data"].driver + if driver == "energy": if isinstance(v, np.ndarray) and v.size == 1: v = v.item(0) - elif info.data["driver"] == "gradient": + elif driver == "gradient": v = np.asarray(v).reshape(-1, 3) - elif info.data["driver"] == "hessian": + elif driver == "hessian": v = np.asarray(v) nsq = int(v.size**0.5) v.shape = (nsq, nsq) @@ -799,8 +812,8 @@ def _wavefunction_protocol(cls, value, info): raise ValueError("wavefunction must be None, a dict, or a WavefunctionProperties object.") # Do not propagate validation errors - if "protocols" not in info.data: - raise ValueError("Protocols was not properly formed.") + if "input_data" not in info.data: + raise ValueError("Input_data was not properly formed.") # Handle restricted restricted = wfn.get("restricted", None) @@ -813,7 +826,7 @@ def _wavefunction_protocol(cls, value, info): wfn.pop(k) # Handle protocols - wfnp = info.data["protocols"].wavefunction + wfnp = info.data["input_data"].protocols.wavefunction return_keep = None if wfnp == "all": pass @@ -860,10 +873,10 @@ def _wavefunction_protocol(cls, value, info): @classmethod def _stdout_protocol(cls, value, info): # Do not propagate validation errors - if "protocols" not in info.data: - raise ValueError("Protocols was not properly formed.") + if "input_data" not in info.data: + raise ValueError("Input_data was not properly formed.") - outp = info.data["protocols"].stdout + outp = info.data["input_data"].protocols.stdout if outp is True: return value elif outp is False: @@ -874,7 +887,11 @@ def _stdout_protocol(cls, value, info): @field_validator("native_files") @classmethod def _native_file_protocol(cls, value, info): - ancp = info.data["protocols"].native_files + # Do not propagate validation errors + if "input_data" not in info.data: + raise ValueError("Input_data was not properly formed.") + + ancp = info.data["input_data"].protocols.native_files if ancp == "all": return value elif ancp == "none": @@ -904,6 +921,12 @@ def convert_v( dself = self.model_dump() if version == 1: + # input_data = self.input_data.convert_v(1) # TODO probably later + input_data = dself.pop("input_data") + input_data.pop("molecule", None) # discard + input_data.pop("provenance", None) # discard + dself["extras"] = {**input_data.pop("extras", {}), **dself.pop("extras", {})} # merge + dself = {**input_data, **dself} self_vN = qcel.models.v1.AtomicResult(**dself) return self_vN diff --git a/qcelemental/tests/addons.py b/qcelemental/tests/addons.py index 743e70a7..54f62d91 100644 --- a/qcelemental/tests/addons.py +++ b/qcelemental/tests/addons.py @@ -1,5 +1,6 @@ import json import socket +import sys from contextlib import contextmanager from pathlib import Path @@ -42,6 +43,13 @@ def internet_connection(): reason="Not detecting module py3Dmol. Install package if necessary and add to envvar PYTHONPATH", ) +using_qcmb = pytest.mark.skipif( + which_import("qcmanybody", return_bool=True) is False, + reason="Not detecting module QCManyBody. Install package if necessary and add to envvar PYTHONPATH", +) + +py37_skip = pytest.mark.skipif(sys.version_info.minor < 8, reason="Needs Python 3.8 features") + serialize_extensions = [ "json", "json-ext", diff --git a/qcelemental/tests/test_model_results.py b/qcelemental/tests/test_model_results.py index 17f4e995..1a65058d 100644 --- a/qcelemental/tests/test_model_results.py +++ b/qcelemental/tests/test_model_results.py @@ -8,7 +8,7 @@ import qcelemental as qcel -from .addons import drop_qcsk, schema_versions +from .addons import drop_qcsk, schema_versions, using_qcmb center_data = { "bs_sto3g_h": { @@ -93,7 +93,7 @@ @pytest.fixture(scope="function") -def result_data_fixture(schema_versions): +def result_data_fixture(schema_versions, request): Molecule = schema_versions.Molecule mol = Molecule.from_data( @@ -104,25 +104,39 @@ def result_data_fixture(schema_versions): """ ) - return { - "molecule": mol, - "driver": "energy", - "model": {"method": "UFF"}, - "return_result": 5, - "success": True, - "properties": {}, - "provenance": {"creator": "qcel"}, - "stdout": "I ran.", - } + if "v2" in request.node.name: + return { + "molecule": mol, + "input_data": {"molecule": mol, "model": {"method": "UFF"}, "driver": "energy"}, + "return_result": 5, + "success": True, + "properties": {}, + "provenance": {"creator": "qcel"}, + "stdout": "I ran.", + } + else: + return { + "molecule": mol, + "driver": "energy", + "model": {"method": "UFF"}, + "return_result": 5, + "success": True, + "properties": {}, + "provenance": {"creator": "qcel"}, + "stdout": "I ran.", + } @pytest.fixture(scope="function") -def wavefunction_data_fixture(result_data_fixture, schema_versions): +def wavefunction_data_fixture(result_data_fixture, schema_versions, request): BasisSet = schema_versions.basis.BasisSet bas = BasisSet(name="custom_basis", center_data=center_data, atom_map=["bs_sto3g_o", "bs_sto3g_h", "bs_sto3g_h"]) c_matrix = np.random.rand(bas.nbf, bas.nbf) - result_data_fixture["protocols"] = {"wavefunction": "all"} + if "v2" in request.node.name: + result_data_fixture["input_data"]["protocols"] = {"wavefunction": "all"} + else: + result_data_fixture["protocols"] = {"wavefunction": "all"} result_data_fixture["wavefunction"] = { "basis": bas, "restricted": True, @@ -134,8 +148,11 @@ def wavefunction_data_fixture(result_data_fixture, schema_versions): @pytest.fixture(scope="function") -def native_data_fixture(result_data_fixture): - result_data_fixture["protocols"] = {"native_files": "all"} +def native_data_fixture(result_data_fixture, request): + if "v2" in request.node.name: + result_data_fixture["input_data"]["protocols"] = {"native_files": "all"} + else: + result_data_fixture["protocols"] = {"native_files": "all"} result_data_fixture["native_files"] = { "input": """ echo @@ -191,6 +208,95 @@ def optimization_data_fixture(result_data_fixture): return ret +@pytest.fixture(scope="function") +def ethane_data_fixture(): + # from QCEngine stock_mols + return { + "geometry": [ + [+1.54034068369141, -1.01730823913235, +0.93128102073425], + [+4.07197633001232, -0.09756825926424, -0.02203578938791], + [+0.00025636057017, +0.00139534039687, +0.00111211603233], + [+1.30983130616505, -3.03614919350581, +0.54918567185649], + [+1.38003941036405, -0.71812565437083, +2.97078783593882], + [+5.61209917480096, -1.11612498901607, +0.90799157528946], + [+4.30241880148479, +1.92102238874847, +0.36057345099335], + [+4.23222331256867, -0.39619160402976, -2.06158817835790], + ], + "symbols": ["C", "C", "H", "H", "H", "H", "H", "H"], + "connectivity": [[0, 1, 1], [0, 2, 1], [0, 3, 1], [0, 4, 1], [1, 5, 1], [1, 6, 1], [1, 7, 1]], + } + + +@pytest.fixture(scope="function") +def torsiondrive_data_fixture(ethane_data_fixture, optimization_data_fixture): + ethane = ethane_data_fixture.copy() + optres = optimization_data_fixture.copy() + + input_data = { + "keywords": {"dihedrals": [(2, 0, 1, 5)], "grid_spacing": [180]}, + "input_specification": {"driver": "gradient", "model": {"method": "UFF", "basis": None}}, + "initial_molecule": [ethane] * 2, + "optimization_spec": { + "procedure": "geomeTRIC", + "keywords": { + "coordsys": "hdlc", + "maxiter": 500, + "program": "rdkit", + }, + }, + } + + ret = { + "success": True, + "provenance": {"creator": "qcel"}, + "final_energies": {"180": -2.3, "0": -4.5}, + "final_molecules": {"180": ethane, "0": ethane}, + "optimization_history": {"180": [optres, optres], "0": [optres]}, + **input_data, + } + + return ret + + +@pytest.fixture(scope="function") +def manybody_data_fixture(): + input_data = { + "molecule": { + "symbols": ["ne", "ne", "ne"], + "geometry": [[0, 0, 0], [0, 0, 2], [0, 0, 4]], + "fragments": [[0], [1], [2]], + }, + "specification": { + "keywords": { + "bsse_type": ["nocp"], + "levels": {3: "(any)"}, + "supersystem_ie_only": True, + "return_total_data": True, + }, + "driver": "energy", + "specification": { + "(any)": {"program": "psi4", "driver": "energy", "model": {"method": "mp2", "basis": "cc-pvdz"}}, + }, + }, + } + + ret = { + "success": True, + "provenance": {"creator": "me"}, + "input_data": input_data, + "return_result": -22, + "properties": {"calcinfo_nmc": 1, "return_energy": -22}, + "component_properties": { + '["(any)", [1, 2, 3], [1, 2, 3]]': {"calcinfo_natom": 3, "return_energy": -383.7231560517324}, + '["(any)", [1], [1, 2, 3]]': {"calcinfo_natom": 3, "return_energy": -128.68201344613635}, + '["(any)", [2], [1, 2, 3]]': {"calcinfo_natom": 3, "return_energy": -128.68610979339851}, + '["(any)", [3], [1, 2, 3]]': {"calcinfo_natom": 3, "return_energy": -128.68201344613036}, + }, + } + + return ret + + @pytest.mark.parametrize("center_name", center_data.keys()) def test_basis_shell_centers(center_name, schema_versions): BasisCenter = schema_versions.basis.BasisCenter @@ -279,7 +385,10 @@ def test_result_build(result_data_fixture, request, schema_versions): def test_result_build_wavefunction_delete(wavefunction_data_fixture, request, schema_versions): AtomicResult = schema_versions.AtomicResult - del wavefunction_data_fixture["protocols"] + if "v2" in request.node.name: + del wavefunction_data_fixture["input_data"]["protocols"] + else: + del wavefunction_data_fixture["protocols"] ret = AtomicResult(**wavefunction_data_fixture) drop_qcsk(ret, request.node.name) assert ret.wavefunction is None @@ -355,9 +464,15 @@ def test_wavefunction_protocols( wfn_data = wavefunction_data_fixture["wavefunction"] if protocol is None: - wavefunction_data_fixture.pop("protocols") + if "v2" in request.node.name: + wavefunction_data_fixture["input_data"].pop("protocols") + else: + wavefunction_data_fixture.pop("protocols") else: - wavefunction_data_fixture["protocols"]["wavefunction"] = protocol + if "v2" in request.node.name: + wavefunction_data_fixture["input_data"]["protocols"]["wavefunction"] = protocol + else: + wavefunction_data_fixture["protocols"]["wavefunction"] = protocol wfn_data["restricted"] = restricted bas = wfn_data["basis"] @@ -397,9 +512,15 @@ def test_native_protocols(protocol, provided, expected, native_data_fixture, req native_data = native_data_fixture["native_files"] if protocol is None: - native_data_fixture.pop("protocols") + if "v2" in request.node.name: + native_data_fixture["input_data"].pop("protocols") + else: + native_data_fixture.pop("protocols") else: - native_data_fixture["protocols"]["native_files"] = protocol + if "v2" in request.node.name: + native_data_fixture["input_data"]["protocols"]["native_files"] = protocol + else: + native_data_fixture["protocols"]["native_files"] = protocol for name in list(native_data.keys()): if name not in provided: @@ -445,12 +566,16 @@ def test_error_correction_protocol( policy["default_policy"] = default if defined is not None: policy["policies"] = defined - result_data_fixture["protocols"] = {"error_correction": policy} + if "v2" in request.node.name: + result_data_fixture["input_data"]["protocols"] = {"error_correction": policy} + else: + result_data_fixture["protocols"] = {"error_correction": policy} res = AtomicResult(**result_data_fixture) drop_qcsk(res, request.node.name) - assert res.protocols.error_correction.default_policy == default_result - assert res.protocols.error_correction.policies == defined_result + base = res.input_data if "v2" in request.node.name else res + assert base.protocols.error_correction.default_policy == default_result + assert base.protocols.error_correction.policies == defined_result def test_error_correction_logic(schema_versions): @@ -477,7 +602,10 @@ def test_error_correction_logic(schema_versions): def test_result_build_stdout_delete(result_data_fixture, request, schema_versions): AtomicResult = schema_versions.AtomicResult - result_data_fixture["protocols"] = {"stdout": False} + if "v2" in request.node.name: + result_data_fixture["input_data"]["protocols"] = {"stdout": False} + else: + result_data_fixture["protocols"] = {"stdout": False} ret = AtomicResult(**result_data_fixture) drop_qcsk(ret, request.node.name) assert ret.stdout is None @@ -560,65 +688,478 @@ def test_result_derivatives_array(request, schema_versions): assert obj.model_dump().keys() == {"calcinfo_natom", "return_gradient", "scf_total_hessian"} -@pytest.mark.parametrize( - "smodel", ["molecule", "atomicresultproperties", "atomicinput", "atomicresult", "optimizationresult", "basisset"] -) -def test_model_dictable(result_data_fixture, optimization_data_fixture, smodel, schema_versions, request): - qcsk_ver = "v2" if ("v2" in request.node.name) else "v1" - - if smodel == "molecule": - model = schema_versions.Molecule - data = result_data_fixture["molecule"].model_dump() - sver = (2, 2) # TODO , 3) - - elif smodel == "atomicresultproperties": - model = schema_versions.AtomicResultProperties - data = {"scf_one_electron_energy": "-5.0", "scf_dipole_moment": [1, 2, 3], "ccsd_dipole_moment": None} - sver = (None, 2) - - elif smodel == "atomicinput": - model = schema_versions.AtomicInput - data = {k: result_data_fixture[k] for k in ["molecule", "model", "driver"]} - sver = (1, 2) - - elif smodel == "atomicresult": - model = schema_versions.AtomicResult - data = result_data_fixture - sver = (1, 2) - - elif smodel == "optimizationresult": - model = schema_versions.OptimizationResult - data = optimization_data_fixture - sver = (1, 2) - - elif smodel == "basisset": - model = schema_versions.BasisSet - data = {"name": "custom", "center_data": center_data, "atom_map": ["bs_sto3g_o", "bs_sto3g_h", "bs_sto3g_h"]} - sver = (1, 2) - - def ver_tests(qcsk_ver): - if qcsk_ver == "v1": - if sver[0] is not None: - assert instance.schema_version == sver[0] - assert isinstance(instance, pydantic.v1.BaseModel) - elif qcsk_ver == "v2": - if sver[1] is not None: - assert instance.schema_version == sver[1] - assert isinstance(instance, pydantic.BaseModel) +@pytest.fixture(scope="function") +def every_model_fixture(request): + datas = {} + + smodel = "Molecule-A" + data = request.getfixturevalue("result_data_fixture") + data = data["molecule"].model_dump() + datas[smodel] = data + + smodel = "Molecule-B" + data = {"symbols": ["O", "H", "H"], "geometry": [0, 0, 0, 0, 0, 2, 0, 2, 0]} + datas[smodel] = data + + smodel = "BasisSet" + data = {"name": "custom", "center_data": center_data, "atom_map": ["bs_sto3g_o", "bs_sto3g_h", "bs_sto3g_h"]} + datas[smodel] = data + + smodel = "FailedOperation" + data = { + "input_data": request.getfixturevalue("result_data_fixture"), + "error": {"error_type": "expected_testing_error", "error_message": "If you see this, its all good"}, + } + datas[smodel] = data + + smodel = "AtomicInput" + data = request.getfixturevalue("result_data_fixture") + if "v2" in request.node.name: + data = data["input_data"] + else: + data = {k: data[k] for k in ["molecule", "model", "driver"]} + datas[smodel] = data + + smodel = "QCInputSpecification" # TODO "AtomicSpecification" + data = {"driver": "hessian", "model": {"basis": "def2-svp", "method": "CC"}} + datas[smodel] = data + + smodel = "AtomicResultProtocols" # TODO "AtomicProtocols" + data = {"wavefunction": "occupations_and_eigenvalues"} + datas[smodel] = data + + smodel = "AtomicResult" + data = request.getfixturevalue("result_data_fixture") + datas[smodel] = data + + smodel = "AtomicResultProperties" # TODO "AtomicProperties" + data = {"scf_one_electron_energy": "-5.0", "scf_dipole_moment": [1, 2, 3], "ccsd_dipole_moment": None} + datas[smodel] = data + + smodel = "WavefunctionProperties" + data = request.getfixturevalue("wavefunction_data_fixture") + data = data["wavefunction"] + datas[smodel] = data + + smodel = "OptimizationInput" + data = request.getfixturevalue("optimization_data_fixture") + data = {k: data[k] for k in ["initial_molecule", "input_specification"]} + datas[smodel] = data + + smodel = "OptimizationSpecification" + data = {"procedure": "pyberny"} + datas[smodel] = data + + smodel = "OptimizationProtocols" + data = {"trajectory": "initial_and_final"} + datas[smodel] = data + + smodel = "OptimizationResult" + data = request.getfixturevalue("optimization_data_fixture") + datas[smodel] = data + + smodel = "OptimizationProperties" # TODO actually collect + data = {"optimization_iterations": 14} + datas[smodel] = data + + smodel = "TorsionDriveInput" + data = request.getfixturevalue("torsiondrive_data_fixture") + data = {k: data[k] for k in ["initial_molecule", "input_specification", "optimization_spec", "keywords"]} + datas[smodel] = data + + # smodel = "TorsionDriveSpecification" # DNE + + smodel = "TDKeywords" # TODO "TorsionDriveKeywords" + data = {"dihedrals": [(2, 0, 1, 5)], "grid_spacing": [180]} + datas[smodel] = data + + # smodel = "TorsionDriveProtocols" # DNE + + smodel = "TorsionDriveResult" + data = request.getfixturevalue("torsiondrive_data_fixture") + datas[smodel] = data + + # smodel = "TorsionDriveProperties" # DNE + + smodel = "ManyBodyInput" + data = request.getfixturevalue("manybody_data_fixture") + data = data["input_data"] + datas[smodel] = data + + smodel = "ManyBodySpecification" + data = request.getfixturevalue("manybody_data_fixture") + data = data["input_data"]["specification"] + datas[smodel] = data + + smodel = "ManyBodyKeywords" + data = {"bsse_type": "ssfc", "levels": {2: "md"}} + datas[smodel] = data + + smodel = "ManyBodyProtocols" + data = {"component_results": "all"} + datas[smodel] = data + + smodel = "ManyBodyResult" + data = request.getfixturevalue("manybody_data_fixture") + datas[smodel] = data + + smodel = "ManyBodyResultProperties" # "ManyBodyProperties" + data = request.getfixturevalue("manybody_data_fixture") + data = data["properties"] + datas[smodel] = data + + return datas + + +# fmt: off +_model_classes_struct = [ + # v1_class, v2_class, test ID + pytest.param("Molecule-A", "Molecule-A", id="Mol-A"), + pytest.param("Molecule-B", "Molecule-B", id="Mol-B"), + pytest.param("BasisSet", "BasisSet", id="BasisSet"), + pytest.param("FailedOperation", "FailedOperation", id="FailedOp"), + pytest.param("AtomicInput", "AtomicInput", id="AtIn"), + pytest.param("QCInputSpecification", "QCInputSpecification", id="AtSpec"), # TODO AtomicSpecification + pytest.param("AtomicResultProtocols", "AtomicResultProtocols", id="AtPtcl"), # TODO AtomicProtocols + pytest.param("AtomicResult", "AtomicResult", id="AtRes"), + pytest.param("AtomicResultProperties", "AtomicResultProperties", id="AtProp"), # TODO AtomicProperties + pytest.param("WavefunctionProperties", "WavefunctionProperties", id="WfnProp"), + pytest.param("OptimizationInput", "OptimizationInput", id="OptIn"), + pytest.param("OptimizationSpecification", "OptimizationSpecification", id="OptSpec"), + pytest.param("OptimizationProtocols", "OptimizationProtocols", id="OptPtcl"), + pytest.param("OptimizationResult", "OptimizationResult", id="OptRes"), + # pytest.param(None, "OptimizationProperties", id="OptProp"), + pytest.param("TorsionDriveInput", "TorsionDriveInput", id="TDIn"), + # pytest.param(None, "TorsionDriveSpecification", id="TDSpec"), + pytest.param("TDKeywords", "TDKeywords", id="TDKw"), # TODO TorsionDriveKeywords + # pytest.param(None, "TorsionDriveProtocols", id="TDPtcl"), + pytest.param("TorsionDriveResult", "TorsionDriveResult", id="TDRes"), + # pytest.param(None, "TorsionDriveProperties", id="TDProp"), + pytest.param("ManyBodyInput", None, id="MBIn", marks=using_qcmb), + pytest.param("ManyBodySpecification", None, id="MBSpec", marks=using_qcmb), + pytest.param("ManyBodyKeywords", None, id="MBKw", marks=using_qcmb), + pytest.param("ManyBodyProtocols", None, id="MBPtcl", marks=using_qcmb), + pytest.param("ManyBodyResult", None, id="MBRes", marks=using_qcmb), + pytest.param("ManyBodyResultProperties", None, id="MBProp", marks=using_qcmb), # TODO ManyBodyProperties +] +# fmt: on + + +@pytest.mark.parametrize("smodel1,smodel2", _model_classes_struct) +def test_model_survey_success(smodel1, smodel2, every_model_fixture, request, schema_versions): + anskey = request.node.callspec.id.replace("None", "v1") + # fmt: off + ans = { + "v1-Mol-A" : None, "v2-Mol-A" : None, + "v1-Mol-B" : None, "v2-Mol-B" : None, + "v1-BasisSet" : None, "v2-BasisSet" : None, + "v1-FailedOp" : False, "v2-FailedOp" : False, + "v1-AtIn" : None, "v2-AtIn" : None, + "v1-AtSpec" : None, "v2-AtSpec" : None, + "v1-AtPtcl" : None, "v2-AtPtcl" : None, + "v1-AtRes" : True, "v2-AtRes" : True, + "v1-AtProp" : None, "v2-AtProp" : None, + "v1-WfnProp" : None, "v2-WfnProp" : None, + "v1-OptIn" : None, "v2-OptIn" : None, + "v1-OptSpec" : None, "v2-OptSpec" : None, + "v1-OptPtcl" : None, "v2-OptPtcl" : None, + "v1-OptRes" : True, "v2-OptRes" : True, + "v1-OptProp" : None, "v2-OptProp" : None, # v1 DNE + "v1-TDIn" : None, "v2-TDIn" : None, + "v1-TDSpec" : None, "v2-TDSpec" : None, # v1 DNE + "v1-TDKw" : None, "v2-TDKw" : None, + "v1-TDPtcl" : None, "v2-TDPtcl" : None, # v1 DNE + "v1-TDRes" : True, "v2-TDRes" : True, + "v1-TDProp" : None, "v2-TDProp" : None, # v1 DNE + "v1-MBIn" : None, "v2-MBIn" : None, # v2 DNE + "v1-MBSpec" : None, "v2-MBSpec" : None, # v2 DNE + "v1-MBKw" : None, "v2-MBKw" : None, # v2 DNE + "v1-MBPtcl" : None, "v2-MBPtcl" : None, # v2 DNE + "v1-MBRes" : True, "v2-MBRes" : True, # v2 DNE + "v1-MBProp" : None, "v2-MBProp" : None, # v2 DNE + }[anskey] + # fmt: on + + fieldsattr = "model_fields" if "v2" in anskey else "__fields__" + smodel = smodel2 if "v2" in anskey else smodel1 + if smodel is None: + pytest.skip("model not available for this schema version") + if "ManyBody" in smodel: + import qcmanybody + + model = getattr(qcmanybody.models, smodel.split("-")[0]) + else: + model = getattr(schema_versions, smodel.split("-")[0]) + data = every_model_fixture[smodel] + + # check default success set + instance = model(**data) + fld = "success" + if ans is None: + cptd = getattr(instance, fieldsattr) + assert fld not in cptd, f"[a] field {fld} unexpectedly present: {cptd}" + # py38: assert fld not in (cptd := getattr(instance, fieldsattr)), f"[a] field {fld} unexpectedly present: {cptd}" + else: + cptd = getattr(instance, fld, "not found!") + assert cptd == ans, f"[a] field {fld} = {cptd} != {ans}" + # py38: assert (cptd := getattr(instance, fld, "not found!")) == ans, f"[a] field {fld} = {cptd} != {ans}" + + # check success override + if ans is not None: + data["success"] = not ans + if "v2" in anskey: + # v2 has enforced T/F + with pytest.raises(pydantic.ValidationError) as e: + instance = model(**data) + cptd = getattr(instance, fld, "not found!") + assert cptd == ans, f"[b] field {fld} = {cptd} != {ans}" + # py38: assert (cptd := getattr(instance, fld, "not found!")) == ans, f"[b] field {fld} = {cptd} != {ans}" + else: + # v1 can be reset to T/F + instance = model(**data) + cptd = getattr(instance, fld, "not found!") + assert cptd == (not ans), f"[b] field {fld} = {cptd} != {not ans}" + # py38: assert (cptd := getattr(instance, fld, "not found!")) == (not ans), f"[b] field {fld} = {cptd} != {not ans}" + + +@pytest.mark.parametrize("smodel1,smodel2", _model_classes_struct) +def test_model_survey_schema_version(smodel1, smodel2, every_model_fixture, request, schema_versions): + anskey = request.node.callspec.id.replace("None", "v1") + # fmt: off + ans = { + # v2: In/Res + Mol/BasisSet/FailedOp, yes! Kw/Ptcl, no. Prop/Spec uncertain. + "v1-Mol-A" : 2, "v2-Mol-A" : 2, # TODO 3 + "v1-Mol-B" : 2, "v2-Mol-B" : 2, # TODO 3 + "v1-BasisSet" : 1, "v2-BasisSet" : 2, # TODO change for v2? + "v1-FailedOp" : None, "v2-FailedOp" : 2, + "v1-AtIn" : 1, "v2-AtIn" : 2, + "v1-AtSpec" : 1, "v2-AtSpec" : None, # WAS 1, # TODO 2 + "v1-AtPtcl" : None, "v2-AtPtcl" : None, + "v1-AtRes" : 1, "v2-AtRes" : 2, + "v1-AtProp" : None, "v2-AtProp" : None, # WAS 2, + "v1-WfnProp" : None, "v2-WfnProp" : None, # TODO 2 + "v1-OptIn" : 1, "v2-OptIn" : 2, + "v1-OptSpec" : 1, "v2-OptSpec" : None, # WAS 1, # TODO 2 + "v1-OptPtcl" : None, "v2-OptPtcl" : None, + "v1-OptRes" : 1, "v2-OptRes" : 2, + "v1-OptProp" : None, "v2-OptProp" : None, # WAS 2, # v1 DNE + "v1-TDIn" : 1, "v2-TDIn" : 2, + "v1-TDSpec" : None, "v2-TDSpec" : None, # v1 DNE + "v1-TDKw" : None, "v2-TDKw" : None, # TODO 2 + "v1-TDPtcl" : None, "v2-TDPtcl" : None, # v1 DNE + "v1-TDRes" : 1, "v2-TDRes" : 2, + "v1-TDProp" : None, "v2-TDProp" : None, # v1 DNE + "v1-MBIn" : 1, "v2-MBIn" : 2, # v2 DNE + "v1-MBSpec" : 1, "v2-MBSpec" : 2, # v2 DNE + "v1-MBKw" : 1, "v2-MBKw" : 2, # v2 DNE + "v1-MBPtcl" : None, "v2-MBPtcl" : None, # v2 DNE + "v1-MBRes" : 1, "v2-MBRes" : 2, # v2 DNE + "v1-MBProp" : 1, "v2-MBProp" : None, # v2 DNE + }[anskey] + # fmt: on + + fieldsattr = "model_fields" if "v2" in anskey else "__fields__" + smodel = smodel2 if "v2" in anskey else smodel1 + if smodel is None: + pytest.skip("model not available for this schema version") + if "ManyBody" in smodel: + import qcmanybody + + model = getattr(qcmanybody.models, smodel.split("-")[0]) + else: + model = getattr(schema_versions, smodel.split("-")[0]) + data = every_model_fixture[smodel] + + # check default version set + instance = model(**data) + fld = "schema_version" + if ans is None: + cptd = getattr(instance, fieldsattr) + assert fld not in cptd, f"[a] field {fld} unexpectedly present: {cptd}" + # py38: assert fld not in (cptd := getattr(instance, fieldsattr)), f"[a] field {fld} unexpectedly present: {cptd}" + else: + cptd = getattr(instance, fld, "not found!") + assert cptd == ans, f"[a] field {fld} = {cptd} != {ans}" + # py38: assert (cptd := getattr(instance, fld, "not found!")) == ans, f"[a] field {fld} = {cptd} != {ans}" + + # check version override + if ans is not None: + data["schema_version"] = 7 + if "Molecule-B" in smodel: + # TODO fix mol validated pathway when upgrade Mol + with pytest.raises(qcel.ValidationError) as e: + instance = model(**data) + else: + instance = model(**data) + # "v1" used to be changeable, but now the version is a stamp, not a signal + cptd = getattr(instance, fld, "not found!") + assert cptd == ans, f"[b] field {fld} = {cptd} != {ans}" + # py38: assert (cptd := getattr(instance, fld, "not found!")) == ans, f"[b] field {fld} = {cptd} != {ans}" + + +@pytest.mark.parametrize("smodel1,smodel2", _model_classes_struct) +def test_model_survey_extras(smodel1, smodel2, every_model_fixture, request, schema_versions): + anskey = request.node.callspec.id.replace("None", "v1") + # fmt: off + ans = { + # v2: Ptcl/Prop/Kw + BasisSet, no! others, yes. + "v1-Mol-A" : {}, "v2-Mol-A" : {}, + "v1-Mol-B" : {}, "v2-Mol-B" : {}, + "v1-BasisSet" : None, "v2-BasisSet" : None, + "v1-FailedOp" : {}, "v2-FailedOp" : {}, + "v1-AtIn" : {}, "v2-AtIn" : {}, + "v1-AtSpec" : {}, "v2-AtSpec" : {}, + "v1-AtPtcl" : None, "v2-AtPtcl" : None, + "v1-AtRes" : {}, "v2-AtRes" : {}, + "v1-AtProp" : None, "v2-AtProp" : None, + "v1-WfnProp" : None, "v2-WfnProp" : None, + "v1-OptIn" : {}, "v2-OptIn" : {}, + "v1-OptSpec" : None, "v2-OptSpec" : {}, + "v1-OptPtcl" : None, "v2-OptPtcl" : None, + "v1-OptRes" : {}, "v2-OptRes" : {}, + "v1-OptProp" : None, "v2-OptProp" : None, # v1 DNE + "v1-TDIn" : {}, "v2-TDIn" : {}, + "v1-TDSpec" : None, "v2-TDSpec" : {}, # v1 DNE + "v1-TDKw" : None, "v2-TDKw" : None, + "v1-TDPtcl" : None, "v2-TDPtcl" : None, # v1 DNE + "v1-TDRes" : {}, "v2-TDRes" : {}, + "v1-TDProp" : None, "v2-TDProp" : None, # v1 DNE + "v1-MBIn" : {}, "v2-MBIn" : {}, # v2 DNE + "v1-MBSpec" : {}, "v2-MBSpec" : {}, # v2 DNE + "v1-MBKw" : None, "v2-MBKw" : None, # v2 DNE + "v1-MBPtcl" : None, "v2-MBPtcl" : None, # v2 DNE + "v1-MBRes" : {}, "v2-MBRes" : {}, # v2 DNE + "v1-MBProp" : None, "v2-MBProp" : None, # v2 DNE + }[anskey] + # fmt: on + + fieldsattr = "model_fields" if "v2" in anskey else "__fields__" + smodel = smodel2 if "v2" in anskey else smodel1 + if smodel is None: + pytest.skip("model not available for this schema version") + if "ManyBody" in smodel: + import qcmanybody + + model = getattr(qcmanybody.models, smodel.split("-")[0]) + else: + model = getattr(schema_versions, smodel.split("-")[0]) + data = every_model_fixture[smodel] + # check default extras dict + instance = model(**data) + fld = "extras" + if ans is None: + cptd = getattr(instance, fieldsattr) + assert fld not in cptd, f"[a] field {fld} unexpectedly present: {cptd}" + # py38: assert fld not in (cptd := getattr(instance, fieldsattr)), f"[a] field {fld} unexpectedly present: {cptd}" + else: + cptd = getattr(instance, fld, "not found!") + assert cptd == ans, f"[a] field {fld} = {cptd} != {ans}" + # py38: assert (cptd := getattr(instance, fld, "not found!")) == ans, f"[a] field {fld} = {cptd} != {ans}" + + +@pytest.mark.parametrize("smodel1,smodel2", _model_classes_struct) +def test_model_survey_dictable(smodel1, smodel2, every_model_fixture, request, schema_versions): + anskey = request.node.callspec.id.replace("None", "v1") + + fieldsattr = "model_fields" if "v2" in anskey else "__fields__" + smodel = smodel2 if "v2" in anskey else smodel1 + if smodel is None: + pytest.skip("model not available for this schema version") + if "ManyBody" in smodel: + import qcmanybody + + model = getattr(qcmanybody.models, smodel.split("-")[0]) + else: + model = getattr(schema_versions, smodel.split("-")[0]) + data = every_model_fixture[smodel] + + # check inheritance + instance = model(**data) + if "v2" in anskey: + assert isinstance( + instance, pydantic.BaseModel + ), f"type({instance.__class__.__name__}) = {type(instance)} ⊄ BaseModel (Pyd v2)" + assert isinstance( + instance, qcel.models.v2.basemodels.ProtoModel + ), f"type({instance.__class__.__name__}) = {type(instance)} ⊄ v2.ProtoModel" + else: + assert isinstance( + instance, pydantic.v1.BaseModel + ), f"type({instance.__class__.__name__}) = {type(instance)} ⊄ v1.BaseModel (Pyd v1)" + assert isinstance( + instance, qcel.models.v1.basemodels.ProtoModel + ), f"type({instance.__class__.__name__}) = {type(instance)} ⊄ v1.ProtoModel" + + # check dict-ability instance = model(**data) - ver_tests(qcsk_ver) with warnings.catch_warnings(): warnings.simplefilter("ignore") instance = model(**instance.dict()) assert instance - ver_tests(qcsk_ver) - instance2 = model(**data) - ver_tests(qcsk_ver) - instance2 = model(**instance2.model_dump()) - assert instance2 - ver_tests(qcsk_ver) + # check model_dump-ability + instance = model(**data) + instance = model(**instance.model_dump()) + assert instance + + +@pytest.mark.parametrize("smodel1,smodel2", _model_classes_struct) +def test_model_survey_convertable(smodel1, smodel2, every_model_fixture, request, schema_versions): + anskey = request.node.callspec.id.replace("None", "v1") + # fmt: off + ans = [ + # "v1-Mol-A" , "v2-Mol-A" , + # "v1-Mol-B" , "v2-Mol-B" , + # "v1-BasisSet" , "v2-BasisSet", + "v1-FailedOp" , "v2-FailedOp", + "v1-AtIn" , "v2-AtIn" , + # "v1-AtSpec" , "v2-AtSpec" , + # "v1-AtPtcl" , "v2-AtPtcl" , + "v1-AtRes" , "v2-AtRes" , + # "v1-AtProp" , "v2-AtProp" , + # "v1-WfnProp" , "v2-WfnProp" , + "v1-OptIn" , "v2-OptIn" , + # "v1-OptSpec" , "v2-OptSpec" , + # "v1-OptPtcl" , "v2-OptPtcl" , + "v1-OptRes" , "v2-OptRes" , + # "v1-OptProp" , "v2-OptProp" , + "v1-TDIn" , "v2-TDIn" , + # "v1-TDSpec" , "v2-TDSpec" , + # "v1-TDKw" , "v2-TDKw" , + # "v1-TDPtcl" , "v2-TDPtcl" , + "v1-TDRes" , "v2-TDRes" , + # "v1-TDProp" , "v2-TDProp" , + # "v1-MBIn" , "v2-MBIn" , + # "v1-MBSpec" , "v2-MBSpec" , + # "v1-MBKw" , "v2-MBKw" , + # "v1-MBPtcl" , "v2-MBPtcl" , + # "v1-MBRes" . "v2-MBRes" , + # "v1-MBProp" , "v2-MBProp" , + ] + # fmt: on + + smodel_fro = smodel2 if "v2" in anskey else smodel1 + smodel_to = smodel1 if "v2" in anskey else smodel2 + if smodel_fro is None or smodel_to is None: + pytest.skip("model not available for this schema version") + if anskey not in ans: + pytest.skip("model not yet convert_v()-able") + if "ManyBody" in smodel_fro: + import qcmanybody + + # TODO + model = getattr(qcmanybody.models, smodel_fro.split("-")[0]) + else: + model_fro = getattr(schema_versions, smodel_fro.split("-")[0]) + models_to = qcel.models.v1 if "v2" in anskey else qcel.models.v2 + model_to = getattr(models_to, smodel_to.split("-")[0]) + data = every_model_fixture[smodel_fro] + + # check converts and converts to expected class + instance_fro = model_fro(**data) + instance_to = instance_fro.convert_v(1 if "v2" in anskey else 2) + assert isinstance(instance_to, model_to), f"instance {model_fro} failed to convert to {model_to}" def test_result_model_deprecations(result_data_fixture, optimization_data_fixture, request): diff --git a/qcelemental/tests/test_utils.py b/qcelemental/tests/test_utils.py index 65613981..b8d5f2c4 100644 --- a/qcelemental/tests/test_utils.py +++ b/qcelemental/tests/test_utils.py @@ -313,7 +313,7 @@ def test_serialization(obj, encoding): @pytest.fixture -def atomic_result_data(): +def atomic_result_data(request): """Mock AtomicResult output which can be tested against for complex serialization methods""" data = { @@ -383,8 +383,16 @@ def atomic_result_data(): "stderr": None, "native_files": {}, "success": True, - "error": None, } + if "v2" in request.node.name: + data["input_data"] = { + "molecule": data["molecule"], + "driver": data.pop("driver"), + "model": data.pop("model"), + "keywords": data.pop("keywords"), + "protocols": data.pop("protocols"), + } + return data diff --git a/qcelemental/tests/test_zqcschema.py b/qcelemental/tests/test_zqcschema.py index d84324d6..49ef3140 100644 --- a/qcelemental/tests/test_zqcschema.py +++ b/qcelemental/tests/test_zqcschema.py @@ -17,7 +17,9 @@ def qcschema_models(): @pytest.mark.parametrize("fl", files, ids=ids) -def test_qcschema(fl, qcschema_models): +def test_qcschema(fl, qcschema_models, request): + if "v2" in request.node.name: + pytest.skip() # TODO v2 schema above import jsonschema model = fl.parent.stem