Skip to content

Commit

Permalink
[backend] adhoc use predefined model for multimodal inference (#1727)
Browse files Browse the repository at this point in the history
* [backend] adhoc use predefined model for multimodal inference

* update schema

* update tests

* update model file
  • Loading branch information
IJtLJZ8Rm4Yr authored Jun 5, 2023
1 parent 0878790 commit b44514f
Show file tree
Hide file tree
Showing 7 changed files with 37 additions and 24 deletions.
39 changes: 25 additions & 14 deletions ymir/backend/src/ymir_app/app/api/api_v1/endpoints/inferences.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,6 @@
import os
import json
from typing import Any, Dict, Generator
from typing import Any, Dict, Generator, Tuple

from fastapi import APIRouter, Depends
from fastapi.logger import logger
Expand All @@ -16,7 +17,7 @@
ProjectNotFound,
)
from app.config import settings
from app.utils.files import FailedToDownload, save_files
from app.utils.files import FailedToDownload, save_file, save_files
from app.utils.ymir_controller import ControllerClient

router = APIRouter()
Expand All @@ -33,12 +34,18 @@ def call_inference(
"""
Call Inference
"""
model_stage = crud.model_stage.get(db, id=inference_in.model_stage_id)
if not model_stage:
logger.error("Failed to find model stage id: %s", inference_in.model_stage_id)
raise ModelStageNotFound()
if inference_in.model_stage_id:
model_stage = crud.model_stage.get(db, id=inference_in.model_stage_id)
if not model_stage:
logger.error("Failed to find model stage id: %s", inference_in.model_stage_id)
raise ModelStageNotFound()
model_hash, model_stage_name = model_stage.model.hash, model_stage.name
else:
# FIXME
# adhoc use pre-defined multimodal model
model_hash, model_stage_name = adhoc_prepare_multimodal_model()

docker_image = crud.docker_image.get_inference_docker_image(db, url=inference_in.docker_image)
docker_image = crud.docker_image.get_inference_docker_image(db, id=inference_in.docker_image_id)
if not docker_image:
logger.error("Failed to find inference model")
raise InvalidInferenceConfig()
Expand All @@ -58,8 +65,8 @@ def call_inference(
current_user.id,
project.id,
project.object_type,
model_stage.model.hash,
model_stage.name,
model_hash,
model_stage_name,
asset_dir,
docker_image.url,
json.dumps(inference_in.docker_image_config),
Expand All @@ -73,11 +80,7 @@ def call_inference(
except KeyError:
logger.exception("Invalid inference result format: %s", resp)
raise InvalidInferenceResultFormat()
result = {
"model_stage_id": inference_in.model_stage_id,
"annotations": annotations,
}
return {"result": result}
return {"result": {"annotations": annotations}}


def extract_inference_annotations(resp: Dict, *, filename_mapping: Dict) -> Generator:
Expand All @@ -87,3 +90,11 @@ def extract_inference_annotations(resp: Dict, *, filename_mapping: Dict) -> Gene
"image_url": filename_mapping[filename],
"annotations": annotations,
}


def adhoc_prepare_multimodal_model() -> Tuple[str, str]:
model_hash, model_stage_name = settings.MULTIMODAL_MODEL_HASH, "default"
if settings.MODELS_PATH and not os.path.isfile(f"{settings.MODELS_PATH}/{model_hash}"):
save_file(f"http://web/{model_hash}", settings.MODELS_PATH, keep=True)
logger.info(f"downloaded pre-defined multimodal model {model_hash}")
return model_hash, model_stage_name
3 changes: 3 additions & 0 deletions ymir/backend/src/ymir_app/app/config.py
Original file line number Diff line number Diff line change
Expand Up @@ -115,5 +115,8 @@ def get_openpai_enabled(cls, values: Dict[str, Any]) -> Dict:
OFFICIAL_DOCKER_IMAGE_URL: str = "industryessentials/ymir-executor:ymir2.4.0-detection-instance-sementic-in-one"
INIT_OFFICIAL_DOCKER_IMAGE: bool = True

# Ad Hoc multimodal model hash
MULTIMODAL_MODEL_HASH: str = "7c356ad5fcf5b71d7d080b278b6fcdbe814d4472"


settings = Settings(_env_file=".env") # type: ignore
4 changes: 2 additions & 2 deletions ymir/backend/src/ymir_app/app/crud/crud_image.py
Original file line number Diff line number Diff line change
Expand Up @@ -46,10 +46,10 @@ def get_multi_with_filter(
else:
return query.all(), query.count()

def get_inference_docker_image(self, db: Session, url: str) -> Optional[DockerImage]:
def get_inference_docker_image(self, db: Session, id: int) -> Optional[DockerImage]:
query = db.query(self.model).filter(not_(self.model.is_deleted))
query = query.filter(DockerImage.configs.any(DockerImageConfig.type == int(DockerImageType.infer)))
return query.filter(self.model.url == url).first() # type: ignore
return query.filter(self.model.id == id).first() # type: ignore

def get_official_docker_images(self, db: Session) -> List[DockerImage]:
query = db.query(self.model).filter(not_(self.model.is_deleted))
Expand Down
5 changes: 2 additions & 3 deletions ymir/backend/src/ymir_app/app/schemas/inference.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,9 +9,9 @@


class InferenceBase(BaseModel):
docker_image: str
docker_image_id: int
project_id: int
model_stage_id: int
model_stage_id: Optional[int]
image_urls: List[str]
docker_image_config: Dict = Field(description="inference docker image runtime configuration")

Expand Down Expand Up @@ -57,7 +57,6 @@ class Annotation(BaseModel):


class InferenceResult(BaseModel):
model_stage_id: int
annotations: List[Annotation]


Expand Down
8 changes: 4 additions & 4 deletions ymir/backend/src/ymir_app/tests/api/test_inferences.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,7 @@
from app.config import settings
from tests.utils.images import create_docker_image_and_configs
from tests.utils.models import create_model_stage
from tests.utils.utils import random_lower_string, random_url
from tests.utils.utils import random_url
from tests.utils.projects import create_project_record


Expand All @@ -24,7 +24,7 @@ def test_call_inference_missing_model(
j = {
"project_id": project_id,
"model_stage_id": random.randint(1000, 2000),
"docker_image": random_lower_string(),
"docker_image_id": random.randint(1000, 2000),
"image_urls": [random_url()],
"docker_image_config": {"mock_docker_image_config": "mock_docker_image_config"},
}
Expand All @@ -48,7 +48,7 @@ def test_call_inference_invalid_docker(
j = {
"project_id": project_id,
"model_stage_id": model_stage.id,
"docker_image": random_lower_string(),
"docker_image_id": random.randint(1000, 2000),
"image_urls": [random_url()],
"docker_image_config": {"mock_docker_image_config": "mock_docker_image_config"},
}
Expand All @@ -73,7 +73,7 @@ def test_call_inference_download_error(
j = {
"project_id": project_id,
"model_stage_id": model_stage.id,
"docker_image": image.url,
"docker_image_id": image.id,
"image_urls": [random_url()],
"docker_image_config": {"mock_docker_image_config": "mock_docker_image_config"},
}
Expand Down
2 changes: 1 addition & 1 deletion ymir/backend/src/ymir_app/tests/crud/test_image.py
Original file line number Diff line number Diff line change
Expand Up @@ -58,6 +58,6 @@ def test_get_inference_images(self, db: Session) -> None:
)
crud.image_config.create(db, obj_in=image_config_in)

fetched_image = crud.docker_image.get_inference_docker_image(db, url=url)
fetched_image = crud.docker_image.get_inference_docker_image(db, id=created_image.id)
assert fetched_image is not None
assert created_image.hash == fetched_image.hash
Binary file not shown.

0 comments on commit b44514f

Please sign in to comment.