From d239d39c48fd20c221c0e27e5fceb6b28ab5bece Mon Sep 17 00:00:00 2001 From: asd981256 Date: Thu, 31 Oct 2024 22:15:59 +0800 Subject: [PATCH] add ensemble example Signed-off-by: asd981256 --- .../inference_graph/ensemble_vote/AvgVote.py | 42 +++++ .../ensemble_vote/DummyClassifier1.py | 37 ++++ .../ensemble_vote/DummyClassifier2.py | 37 ++++ .../inference_graph/ensemble_vote/README.md | 169 ++++++++++++++++++ .../inference_graph/ensemble_vote/input.json | 1 + mkdocs.yml | 1 + 6 files changed, 287 insertions(+) create mode 100644 docs/modelserving/inference_graph/ensemble_vote/AvgVote.py create mode 100644 docs/modelserving/inference_graph/ensemble_vote/DummyClassifier1.py create mode 100644 docs/modelserving/inference_graph/ensemble_vote/DummyClassifier2.py create mode 100644 docs/modelserving/inference_graph/ensemble_vote/README.md create mode 100644 docs/modelserving/inference_graph/ensemble_vote/input.json diff --git a/docs/modelserving/inference_graph/ensemble_vote/AvgVote.py b/docs/modelserving/inference_graph/ensemble_vote/AvgVote.py new file mode 100644 index 000000000..dc6bab513 --- /dev/null +++ b/docs/modelserving/inference_graph/ensemble_vote/AvgVote.py @@ -0,0 +1,42 @@ +import argparse +from typing import Dict, Union +import numpy as np +from kserve import ( + Model, + ModelServer, + model_server, + InferRequest, + InferOutput, + InferResponse, + logging, +) +from kserve.utils.utils import get_predict_response + +class AvgVote(Model): + def __init__(self, name: str): + super().__init__(name) + self.model = None + self.ready = False + self.load() + + def load(self): + self.ready = True + + def predict(self, payload: Union[Dict, InferRequest], headers: Dict[str, str] = None) -> Union[Dict, InferResponse]: + tmp = [] + for isvcName, output in payload.items(): + prediction = output['predictions'] + tmp.append(prediction) + + result = [sum(x)/len(tmp) for x in zip(*tmp)] # assume same number of label + return get_predict_response(payload, result, self.name) + +parser = argparse.ArgumentParser(parents=[model_server.parser]) +args, _ = parser.parse_known_args() + +if __name__ == "__main__": + if args.configure_logging: + logging.configure_logging(args.log_config_file) + + model = AvgVote(args.model_name) + ModelServer().start([model]) diff --git a/docs/modelserving/inference_graph/ensemble_vote/DummyClassifier1.py b/docs/modelserving/inference_graph/ensemble_vote/DummyClassifier1.py new file mode 100644 index 000000000..6f1c7efc5 --- /dev/null +++ b/docs/modelserving/inference_graph/ensemble_vote/DummyClassifier1.py @@ -0,0 +1,37 @@ +import argparse +from typing import Dict, Union +import numpy as np +from kserve import ( + Model, + ModelServer, + model_server, + InferRequest, + InferOutput, + InferResponse, + logging, +) +from kserve.utils.utils import get_predict_response + + +class DummyClassifier1(Model): + def __init__(self, name: str): + super().__init__(name) + self.model = None + self.ready = False + self.load() + + def load(self): + self.ready = True + + def predict(self, payload: Union[Dict, InferRequest], headers: Dict[str, str] = None) -> Union[Dict, InferResponse]: + return {"predictions": [0.8, 0.2]} + +parser = argparse.ArgumentParser(parents=[model_server.parser]) +args, _ = parser.parse_known_args() + +if __name__ == "__main__": + if args.configure_logging: + logging.configure_logging(args.log_config_file) + + model = DummyClassifier1(args.model_name) + ModelServer().start([model]) diff --git a/docs/modelserving/inference_graph/ensemble_vote/DummyClassifier2.py b/docs/modelserving/inference_graph/ensemble_vote/DummyClassifier2.py new file mode 100644 index 000000000..4c835e951 --- /dev/null +++ b/docs/modelserving/inference_graph/ensemble_vote/DummyClassifier2.py @@ -0,0 +1,37 @@ +import argparse +from typing import Dict, Union +import numpy as np +from kserve import ( + Model, + ModelServer, + model_server, + InferRequest, + InferOutput, + InferResponse, + logging, +) +from kserve.utils.utils import get_predict_response + + +class DummyClassifier2(Model): + def __init__(self, name: str): + super().__init__(name) + self.model = None + self.ready = False + self.load() + + def load(self): + self.ready = True + + def predict(self, payload: Union[Dict, InferRequest], headers: Dict[str, str] = None) -> Union[Dict, InferResponse]: + return {"predictions": [0.6, 0.4]} + +parser = argparse.ArgumentParser(parents=[model_server.parser]) +args, _ = parser.parse_known_args() + +if __name__ == "__main__": + if args.configure_logging: + logging.configure_logging(args.log_config_file) + + model = DummyClassifier2(args.model_name) + ModelServer().start([model]) diff --git a/docs/modelserving/inference_graph/ensemble_vote/README.md b/docs/modelserving/inference_graph/ensemble_vote/README.md new file mode 100644 index 000000000..b43bd7126 --- /dev/null +++ b/docs/modelserving/inference_graph/ensemble_vote/README.md @@ -0,0 +1,169 @@ +# Deploy Ensemble Learning with InferenceGraph +The tutorial demonstrate how to deploy Ensemble Learning model using `InferenceGraph`. The case should be that the classifiers are heavy or something else and you can't make them in one custom_model. + +## Deploy the individual InferenceServices + +### Build InferenceServices +We focus on how ensemble node gather classifiers outputs and give an example of how to extract them with python code. Therefore we skip classifier part and just use [dummy classifier 1, and 2](DummyClassifier1.py) to return fixed result for demonstartion. + +#### Ensemble Node outputs +If `name` in `steps` is set, ensemble node will use it as key for its correspond `InferenceService` output. Otherwise, it use index of `InferenceService` in `steps` instead. + +For example, Ensemble node deployed as following +```yaml + routerType: Ensemble + steps: + - serviceName: classifier-1 + name: classifier-1 + - serviceName: classifier-2 +``` +will result in similar result like this. +```jsonld +{"1":{"predictions":[0.6,0.4]},"classifier-1":{"predictions":[0.8,0.2]}} +``` +#### Vote +In this tutorial, we use following [python code](AvgVote.py) to build image for average vote. +```python +import argparse +from typing import Dict, Union +import numpy as np +from kserve import ( + Model, + ModelServer, + model_server, + InferRequest, + InferOutput, + InferResponse, + logging, +) +from kserve.utils.utils import get_predict_response + +class AvgVote(Model): + def __init__(self, name: str): + super().__init__(name) + self.model = None + self.ready = False + self.load() + + def load(self): + self.ready = True + + def predict(self, payload: Union[Dict, InferRequest], headers: Dict[str, str] = None) -> Union[Dict, InferResponse]: + tmp = [] + for isvcName, output in payload.items(): + prediction = output['predictions'] + tmp.append(prediction) + + result = [sum(x)/len(tmp) for x in zip(*tmp)] # assume same number of label + return get_predict_response(payload, result, self.name) + +parser = argparse.ArgumentParser(parents=[model_server.parser]) +args, _ = parser.parse_known_args() + +if __name__ == "__main__": + if args.configure_logging: + logging.configure_logging(args.log_config_file) + + model = AvgVote(args.model_name) + ModelServer().start([model]) +``` + +#### Build Image +We are skipping this part for now. Take a look at [custom_model buildpacks](../../v1beta1/custom/custom_model/#build-custom-serving-image-with-buildpacks), or use else tools that help you build image. + +### Deploy InferenceServices +```bash +kubectl apply -f - <