-
Notifications
You must be signed in to change notification settings - Fork 162
Commit
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
## Describe your changes Add float16 configuration to Mistral. ## Checklist before requesting a review - [ ] Add unit tests for this change. - [ ] Make sure all tests can pass. - [x] Update documents if necessary. - [x] Lint and apply fixes to your code by running `lintrunner -a` - [ ] Is this a user-facing change? If yes, give a description of this change to be included in the release notes. - [ ] Is this PR including examples changes? If yes, please remember to update [example documentation](https://github.com/microsoft/Olive/blob/main/docs/source/examples.md) in a follow-up PR. ## (Optional) Issue link
- Loading branch information
1 parent
fed7c66
commit eb94a1d
Showing
4 changed files
with
205 additions
and
2 deletions.
There are no files selected for viewing
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,93 @@ | ||
# ------------------------------------------------------------------------- | ||
# Copyright (c) Microsoft Corporation. All rights reserved. | ||
# Licensed under the MIT License. | ||
# -------------------------------------------------------------------------- | ||
import argparse | ||
import json | ||
import shutil | ||
from pathlib import Path | ||
|
||
import onnxruntime as ort | ||
import torch | ||
from transformers import AutoConfig, LlamaTokenizer | ||
|
||
from olive.workflows import run as olive_run | ||
|
||
# ruff: noqa: T201, T203 | ||
|
||
|
||
def optimize(model_name: str, optimized_model_des: Path): | ||
ort.set_default_logger_severity(4) | ||
cur_dir = Path(__file__).resolve().parent | ||
|
||
# Optimize the model with Olive | ||
print(f"\nOptimizing {model_name}") | ||
|
||
olive_config = None | ||
with (cur_dir / "mistral_optimize.json").open() as fin: | ||
olive_config = json.load(fin) | ||
|
||
olive_config["input_model"]["config"]["model_path"] = model_name | ||
olive_run(olive_config) | ||
|
||
|
||
def main(): | ||
parser = argparse.ArgumentParser() | ||
parser.add_argument("--optimize", action="store_true", help="Runs the optimization step") | ||
parser.add_argument( | ||
"--model-id", | ||
dest="model_id", | ||
type=str, | ||
default="mistralai/Mistral-7B-v0.1", | ||
help="Model Id to load", | ||
) | ||
parser.add_argument("--inference", action="store_true", help="Runs the inference step") | ||
args = parser.parse_args() | ||
|
||
script_dir = Path(__file__).resolve().parent | ||
optimized_model_dir = script_dir / "models" / "convert-optimize-perf_tuning" / "mistral_gpu-cuda_model" | ||
|
||
if args.optimize: | ||
shutil.rmtree(optimized_model_dir, ignore_errors=True) | ||
|
||
if args.optimize or not optimized_model_dir.exists(): | ||
optimize(args.model_id, optimized_model_dir) | ||
|
||
if args.inference: | ||
prompt = "Is it normal to have a dark ring around the iris of my eye?" | ||
|
||
tokenizer = LlamaTokenizer.from_pretrained(args.model_id) | ||
tokens = tokenizer(prompt, return_tensors="pt") | ||
tokenizer = None | ||
|
||
config = AutoConfig.from_pretrained(args.model_id) | ||
num_heads = config.num_key_value_heads | ||
head_size = config.hidden_size // config.num_attention_heads | ||
past_seq_len = 0 | ||
|
||
position_ids = tokens.attention_mask.long().cumsum(-1) - 1 | ||
position_ids.masked_fill_(tokens.attention_mask == 0, 1) | ||
|
||
onnx_inputs = { | ||
"input_ids": tokens.input_ids.numpy(), | ||
"attention_mask": tokens.attention_mask.numpy(), | ||
"position_ids": position_ids.numpy(), | ||
} | ||
for i in range(config.num_hidden_layers): | ||
onnx_inputs[f"past_key_values.{i}.key"] = torch.rand( | ||
1, num_heads // 1, past_seq_len, head_size, dtype=torch.float16 | ||
).numpy() | ||
onnx_inputs[f"past_key_values.{i}.value"] = torch.rand( | ||
1, num_heads // 1, past_seq_len, head_size, dtype=torch.float16 | ||
).numpy() | ||
|
||
model_path = optimized_model_dir / "model.onnx" | ||
|
||
session = ort.InferenceSession(model_path, providers=["CUDAExecutionProvider"]) | ||
session.run(None, onnx_inputs)[0] | ||
|
||
print("Inference test completed successfully!") | ||
|
||
|
||
if __name__ == "__main__": | ||
main() |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,98 @@ | ||
{ | ||
"input_model": { | ||
"type": "PyTorchModel", | ||
"config": { | ||
"hf_config": { | ||
"model_name": "mistralai/Mistral-7B-v0.1", | ||
"model_class": "MistralForCausalLM" | ||
} | ||
} | ||
}, | ||
"evaluators": { | ||
"common_evaluator": { | ||
"metrics": [ | ||
{ | ||
"name": "latency", | ||
"type": "latency", | ||
"sub_types": [ | ||
{ | ||
"name": "avg", | ||
"priority": 1 | ||
} | ||
], | ||
"user_config": { | ||
"user_script": "user_script.py", | ||
"dataloader_func": "create_dataloader", | ||
"batch_size": 1, | ||
"inference_settings": { | ||
"onnx": { | ||
"session_options": { | ||
"enable_profiling": false | ||
} | ||
} | ||
} | ||
} | ||
} | ||
] | ||
} | ||
}, | ||
"passes": { | ||
"convert": { | ||
"type": "OptimumConversion", | ||
"config": { | ||
"target_opset": 14, | ||
"extra_args": { | ||
"legacy": false, | ||
"no_post_process": false | ||
} | ||
} | ||
}, | ||
"optimize": { | ||
"type": "OrtTransformersOptimization", | ||
"config": { | ||
"model_type": "gpt2", | ||
"use_gpu": true, | ||
"keep_io_types": false, | ||
"num_heads": 32, | ||
"hidden_size": 4096, | ||
"opt_level": 0, | ||
"optimization_options": { | ||
"use_multi_head_attention": false | ||
}, | ||
"save_as_external_data": true, | ||
"all_tensors_to_one_file": true, | ||
"float16": true, | ||
"use_gqa": true | ||
} | ||
}, | ||
"perf_tuning": { | ||
"type": "OrtPerfTuning", | ||
"config": { | ||
"user_script": "user_script.py", | ||
"dataloader_func": "create_dataloader", | ||
"batch_size": 1, | ||
"enable_profiling": false | ||
} | ||
} | ||
}, | ||
"pass_flows": [ | ||
[ | ||
"convert", | ||
"optimize", | ||
"perf_tuning" | ||
] | ||
], | ||
"engine": { | ||
"evaluate_input_model": false, | ||
"evaluator": "common_evaluator", | ||
"cache_dir": "cache", | ||
"output_name": "mistral", | ||
"output_dir": "models", | ||
"execution_providers": [ | ||
"CUDAExecutionProvider" | ||
], | ||
"clean_cache": false, | ||
"log_severity_level": 0, | ||
"log_to_file": true | ||
} | ||
} |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters