From f8b3187b4931e6deee32b9b95f202326738f0c6b Mon Sep 17 00:00:00 2001 From: syandroo Date: Tue, 2 Aug 2022 14:45:02 -0700 Subject: [PATCH 1/4] infer env params :) --- launch/client.py | 14 +++++++++++--- launch/utils.py | 44 +++++++++++++++++++++++++++++++++++++++++++- 2 files changed, 54 insertions(+), 4 deletions(-) diff --git a/launch/client.py b/launch/client.py index 1bae2350..adbd1401 100644 --- a/launch/client.py +++ b/launch/client.py @@ -39,7 +39,7 @@ SyncEndpoint, ) from launch.request_validation import validate_task_request -from launch.utils import trim_kwargs +from launch.utils import infer_env_params, trim_kwargs DEFAULT_NETWORK_TIMEOUT_SEC = 120 @@ -198,9 +198,10 @@ def create_model_bundle_from_dirs( model_bundle_name: str, base_paths: List[str], requirements_path: str, - env_params: Dict[str, str], load_predict_fn_module_path: str, load_model_fn_module_path: str, + env_params: Optional[Dict[str, str]], + env_selector: Optional[str], app_config: Optional[Union[Dict[str, Any], str]] = None, ) -> ModelBundle: """ @@ -275,6 +276,9 @@ def create_model_bundle_from_dirs( with open(requirements_path, "r", encoding="utf-8") as req_f: requirements = req_f.read().splitlines() + if env_params is None: + env_params = infer_env_params(env_selector) + tmpdir = tempfile.mkdtemp() try: zip_path = os.path.join(tmpdir, "bundle.zip") @@ -331,7 +335,8 @@ def create_model_bundle_from_dirs( def create_model_bundle( # pylint: disable=too-many-statements self, model_bundle_name: str, - env_params: Dict[str, str], + env_params: Optional[Dict[str, str]], + env_selector: Optional[str], *, load_predict_fn: Optional[ Callable[[LaunchModel_T], Callable[[Any], Any]] @@ -435,6 +440,9 @@ def create_model_bundle( # pylint: disable=too-many-statements ) # TODO should we try to catch when people intentionally pass both model and load_model_fn as None? + if env_params is None: + env_params = infer_env_params(env_selector) + if requirements is None: # TODO explore: does globals() actually work as expected? Should we use globals_copy instead? requirements_inferred = find_packages_from_imports(globals()) diff --git a/launch/utils.py b/launch/utils.py index 08d4fb75..5ce92802 100644 --- a/launch/utils.py +++ b/launch/utils.py @@ -1,4 +1,4 @@ -from typing import Any, Dict +from typing import Any, Dict, Optional def trim_kwargs(kwargs_dict: Dict[Any, Any]): @@ -7,3 +7,45 @@ def trim_kwargs(kwargs_dict: Dict[Any, Any]): """ dict_copy = {k: v for k, v in kwargs_dict.items() if v is not None} return dict_copy + + +def infer_env_params(env_selector: Optional[str]): + """ + Returns an env_params dict from the env_selector. + + env_selector: str - Either "pytorch" or "tensorflow" + """ + if env_selector == "pytorch": + import torch + + try: + ver = torch.__version__.split("+") + torch_version = ver[0] + cuda_version = ver[1][2:] if len(ver) > 1 else "113" + if ( + len(cuda_version) < 3 + ): # we can only parse cuda versions in the double digits + raise ValueError( + "PyTorch version parsing does not support CUDA versions below 10.0" + ) + tag = f"{torch_version}-cuda{cuda_version[:2]}.{cuda_version[2:]}-cudnn8-runtime" + return { + "framework_type": "pytorch", + "pytorch_image_tag": tag, + } + except Exception as e: + raise ValueError( + "Failed to parse correct PyTorch version, try setting your own env_params." + ) + elif env_selector == "tensorflow": + import tensorflow as tf + + ver = tf.__version__ + return { + "framework_type": "tensorflow", + "tensorflow_version": ver, + } + else: + raise ValueError( + "Unsupported env_selector, please set to pytorch or tensorflow, or set your own env_params." + ) From 7d6168c27a707d34b16a4c4daa4035bf072fa168 Mon Sep 17 00:00:00 2001 From: syandroo Date: Tue, 2 Aug 2022 14:53:41 -0700 Subject: [PATCH 2/4] show torch version --- launch/utils.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/launch/utils.py b/launch/utils.py index 5ce92802..ad335b22 100644 --- a/launch/utils.py +++ b/launch/utils.py @@ -33,9 +33,9 @@ def infer_env_params(env_selector: Optional[str]): "framework_type": "pytorch", "pytorch_image_tag": tag, } - except Exception as e: + except: raise ValueError( - "Failed to parse correct PyTorch version, try setting your own env_params." + f"Failed to parse PyTorch version {torch.__version__}, try setting your own env_params." ) elif env_selector == "tensorflow": import tensorflow as tf From 54ec28e564574a38ed02e40eb0248bba71adeee5 Mon Sep 17 00:00:00 2001 From: syandroo Date: Tue, 2 Aug 2022 15:06:11 -0700 Subject: [PATCH 3/4] python version too --- launch/utils.py | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/launch/utils.py b/launch/utils.py index ad335b22..9275f51c 100644 --- a/launch/utils.py +++ b/launch/utils.py @@ -17,18 +17,24 @@ def infer_env_params(env_selector: Optional[str]): """ if env_selector == "pytorch": import torch + import sys try: ver = torch.__version__.split("+") torch_version = ver[0] cuda_version = ver[1][2:] if len(ver) > 1 else "113" + python_minor = sys.version_info.minor if ( len(cuda_version) < 3 ): # we can only parse cuda versions in the double digits raise ValueError( "PyTorch version parsing does not support CUDA versions below 10.0" ) - tag = f"{torch_version}-cuda{cuda_version[:2]}.{cuda_version[2:]}-cudnn8-runtime" + if sys.version_info.major < 3: + raise ValueError( + "PyTorch version parsing only supports Python3" + ) + tag = f"{torch_version}-cuda{cuda_version[:2]}.{cuda_version[2:]}-cudnn{python_minor}-runtime" return { "framework_type": "pytorch", "pytorch_image_tag": tag, From 002b621f72c1d036df484ce83a72f060670f1234 Mon Sep 17 00:00:00 2001 From: syandroo Date: Tue, 2 Aug 2022 15:19:59 -0700 Subject: [PATCH 4/4] parse cudnn version --- launch/utils.py | 15 ++++++++------- 1 file changed, 8 insertions(+), 7 deletions(-) diff --git a/launch/utils.py b/launch/utils.py index 9275f51c..7b596399 100644 --- a/launch/utils.py +++ b/launch/utils.py @@ -17,24 +17,25 @@ def infer_env_params(env_selector: Optional[str]): """ if env_selector == "pytorch": import torch - import sys try: ver = torch.__version__.split("+") torch_version = ver[0] cuda_version = ver[1][2:] if len(ver) > 1 else "113" - python_minor = sys.version_info.minor + cudnn_available = torch.backends.cudnn.is_available() + cudnn_version = ( + torch.backends.cudnn.version()[:1] + if cudnn_available is not None + else "8" + ) + if ( len(cuda_version) < 3 ): # we can only parse cuda versions in the double digits raise ValueError( "PyTorch version parsing does not support CUDA versions below 10.0" ) - if sys.version_info.major < 3: - raise ValueError( - "PyTorch version parsing only supports Python3" - ) - tag = f"{torch_version}-cuda{cuda_version[:2]}.{cuda_version[2:]}-cudnn{python_minor}-runtime" + tag = f"{torch_version}-cuda{cuda_version[:2]}.{cuda_version[2:]}-cudnn{cudnn_version}-runtime" return { "framework_type": "pytorch", "pytorch_image_tag": tag,