Skip to content

Commit

Permalink
Update Examples for TF 3x API (#1901)
Browse files Browse the repository at this point in the history
Signed-off-by: zehao-intel <zehao.huang@intel.com>
  • Loading branch information
zehao-intel authored Jul 25, 2024
1 parent 6b30207 commit fb85779
Show file tree
Hide file tree
Showing 117 changed files with 39,154 additions and 31 deletions.
18 changes: 18 additions & 0 deletions examples/.config/model_params_keras_3x.json
Original file line number Diff line number Diff line change
@@ -0,0 +1,18 @@
{
"keras": {
"resnetv2_50": {
"model_src_dir": "keras/image_recognition/resnet_v2_50/quantization/ptq",
"dataset_location": "/tf_dataset/dataset/imagenet",
"input_model": "/tf_dataset2/models/tensorflow/resnetv2_50_keras/saved_model",
"main_script": "main.py",
"batch_size": 32
},
"inception_v3": {
"model_src_dir": "keras/image_recognition/inception_v3/quantization/ptq",
"dataset_location": "/tf_dataset/dataset/imagenet",
"input_model": "/tf_dataset2/models/tensorflow/inception_v3_keras/saved_model",
"main_script": "main.py",
"batch_size": 32
}
}
}
107 changes: 107 additions & 0 deletions examples/.config/model_params_tensorflow_3x.json
Original file line number Diff line number Diff line change
Expand Up @@ -8,6 +8,20 @@
"batch_size": 64,
"fp32_model_url": "https://storage.googleapis.com/intel-optimized-tensorflow/models/v2_7_0/fp32_bert_squad.pb"
},
"distilbert_base": {
"model_src_dir": "nlp/distilbert_base/quantization/ptq",
"dataset_location": "/tf_dataset2/datasets/sst2_validation_dataset",
"input_model": "/tf_dataset2/models/tensorflow/distilbert_base/fp32/distilbert_base_fp32.pb",
"main_script": "main.py",
"batch_size": 128
},
"distilbert_base_sq": {
"model_src_dir": "nlp/distilbert_base/quantization/ptq",
"dataset_location": "/tf_dataset2/datasets/sst2_validation_dataset",
"input_model": "/tf_dataset2/models/tensorflow/distilbert_base/fp32/distilbert_base_fp32.pb",
"main_script": "main.py",
"batch_size": 128
},
"opt_125m_sq": {
"model_src_dir": "nlp/large_language_models/quantization/ptq/smoothquant",
"dataset_location": "",
Expand All @@ -29,6 +43,42 @@
"main_script": "main.py",
"batch_size": 1
},
"transformer_lt": {
"model_src_dir": "nlp/transformer_lt/quantization/ptq",
"dataset_location": "/tf_dataset/tensorflow/transformer-lt-official-fp32-inference/transformer_lt_official_fp32_pretrained_model/data",
"input_model": "/tf_dataset/tensorflow/transformer-lt-official-fp32-inference/transformer_lt_official_fp32_pretrained_model/graph/fp32_graphdef.pb",
"main_script": "main.py",
"batch_size": 64
},
"inception_v3": {
"model_src_dir": "image_recognition/inception_v3/quantization/ptq",
"dataset_location": "/tf_dataset/dataset/imagenet",
"input_model": "/tf_dataset/pre-trained-models/inceptionv3/fp32/freezed_inceptionv3.pb",
"main_script": "main.py",
"batch_size": 32,
"fp32_model_url": "https://storage.googleapis.com/intel-optimized-tensorflow/models/v1_8/inceptionv3_fp32_pretrained_model.pb"
},
"mobilenetv2": {
"model_src_dir": "image_recognition/mobilenet_v2/quantization/ptq",
"dataset_location": "/tf_dataset/dataset/imagenet",
"input_model": "/tf_dataset/pre-train-model-slim/pbfile/frozen_pb/frozen_mobilenet_v2.pb",
"main_script": "main.py",
"batch_size": 32
},
"resnetv2_50": {
"model_src_dir": "image_recognition/resnet_v2_50/quantization/ptq",
"dataset_location": "/tf_dataset/dataset/imagenet",
"input_model": "/tf_dataset/pre-train-model-slim/pbfile/frozen_pb/frozen_resnet_v2_50.pb",
"main_script": "main.py",
"batch_size": 32
},
"vgg16": {
"model_src_dir": "image_recognition/vgg16/quantization/ptq",
"dataset_location": "/tf_dataset/dataset/imagenet",
"input_model": "/tf_dataset/pre-train-model-slim/pbfile/frozen_pb/frozen_vgg16.pb",
"main_script": "main.py",
"batch_size": 32
},
"ViT": {
"model_src_dir": "image_recognition/vision_transformer/quantization/ptq",
"dataset_location": "/tf_dataset/dataset/imagenet",
Expand All @@ -42,6 +92,63 @@
"input_model": "/tf_dataset/tensorflow/graphsage/graphsage_frozen_model.pb",
"main_script": "main.py",
"batch_size": 1000
},
"faster_rcnn_resnet50": {
"model_src_dir": "object_detection/faster_rcnn_resnet50/quantization/ptq",
"dataset_location": "/tf_dataset/tensorflow/coco_val.record",
"input_model": "/tf_dataset/pre-train-model-oob/object_detection/faster_rcnn_resnet50/frozen_inference_graph.pb",
"main_script": "main.py",
"batch_size": 10
},
"mask_rcnn_inception_v2": {
"model_src_dir": "object_detection/mask_rcnn_inception_v2/quantization/ptq",
"dataset_location": "/tf_dataset/tensorflow/coco_val.record",
"input_model": "/tf_dataset/pre-train-model-oob/object_detection/mask_rcnn_inception_v2/frozen_inference_graph.pb",
"main_script": "main.py",
"batch_size": 10
},
"mask_rcnn_inception_v2_ckpt": {
"model_src_dir": "object_detection/mask_rcnn_inception_v2/quantization/ptq",
"dataset_location": "/tf_dataset/tensorflow/coco_val.record",
"input_model": "/tf_dataset/pre-train-model-oob/object_detection/mask_rcnn_inception_v2",
"main_script": "main.py",
"batch_size": 10
},
"ssd_mobilenet_v1": {
"model_src_dir": "object_detection/ssd_mobilenet_v1/quantization/ptq",
"dataset_location": "/tf_dataset/tensorflow/coco_val.record",
"input_model": "/tf_dataset/pre-train-model-oob/object_detection/ssd_mobilenet_v1/frozen_inference_graph.pb",
"main_script": "main.py",
"batch_size": 10
},
"ssd_mobilenet_v1_ckpt": {
"model_src_dir": "object_detection/ssd_mobilenet_v1/quantization/ptq",
"dataset_location": "/tf_dataset/tensorflow/coco_val.record",
"input_model": "/tf_dataset/pre-train-model-oob/object_detection/ssd_mobilenet_v1",
"main_script": "main.py",
"batch_size": 10
},
"wide_deep_large_ds": {
"model_src_dir": "recommendation/wide_deep_large_ds/quantization/ptq",
"dataset_location": "/tf_dataset/tensorflow/wide_deep_large_ds/dataset",
"input_model": "/tf_dataset/tensorflow/wide_deep_large_ds/fp32_optimized_graph.pb",
"main_script": "main.py",
"batch_size": 256,
"fp32_model_url": "https://storage.googleapis.com/intel-optimized-tensorflow/models/v1_8/wide_deep_fp32_pretrained_model.pb"
},
"3dunet-mlperf": {
"model_src_dir": "semantic_image_segmentation/3dunet-mlperf/quantization/ptq",
"dataset_location": "/tf_dataset2/models/tensorflow/3dunet/build",
"input_model": "/tf_dataset2/models/tensorflow/3dunet/3dunet_dynamic_ndhwc.pb",
"main_script": "main.py",
"batch_size": 100
},
"style_transfer": {
"model_src_dir": "style_transfer/arbitrary_style_transfer/quantization/ptq",
"dataset_location": "style_images,content_images",
"input_model": "/tf_dataset/tensorflow/style_transfer/arbitrary_style_transfer/model.ckpt",
"main_script": "main.py",
"batch_size": 1
}
}
}
Expand Down
Original file line number Diff line number Diff line change
@@ -0,0 +1,75 @@
Step-by-Step
============

This document list steps of reproducing inception_v3 model tuning and benchmark results via Neural Compressor.
This example can run on Intel CPUs and GPUs.

> **Note**:
> The models is supported in validated TensorFlow [Version](/docs/source/installation_guide.md#validated-software-environment).
# Prerequisite

## 1. Environment

### Installation
Recommend python 3.9 or higher version.
```shell
pip install -r requirements.txt
```

### Install Intel Extension for Tensorflow
#### Quantizing the model on Intel GPU(Mandatory to install ITEX)
Intel Extension for Tensorflow is mandatory to be installed for quantizing the model on Intel GPUs.

```shell
pip install --upgrade intel-extension-for-tensorflow[xpu]
```
For any more details, please follow the procedure in [install-gpu-drivers](https://github.com/intel/intel-extension-for-tensorflow/blob/main/docs/install/install_for_xpu.md#install-gpu-drivers)

#### Quantizing the model on Intel CPU(Optional to install ITEX)
Intel Extension for Tensorflow for Intel CPUs is experimental currently. It's not mandatory for quantizing the model on Intel CPUs.

```shell
pip install --upgrade intel-extension-for-tensorflow[cpu]
```
> **Note**:
> The version compatibility of stock Tensorflow and ITEX can be checked [here](https://github.com/intel/intel-extension-for-tensorflow#compatibility-table). Please make sure you have installed compatible Tensorflow and ITEX.
## 2. Prepare pre-trained model

Download pre-trained PB
```shell
wget https://storage.googleapis.com/intel-optimized-tensorflow/models/v1_6/inceptionv3_fp32_pretrained_model.pb
```

## 3. Prepare Dataset

TensorFlow [models](https://github.com/tensorflow/models) repo provides [scripts and instructions](https://github.com/tensorflow/models/tree/master/research/slim#an-automated-script-for-processing-imagenet-data) to download, process and convert the ImageNet dataset to the TF records format.
We also prepared related scripts in ` examples/3.x_api/tensorflow/cv` directory. To download the raw images, the user must create an account with image-net.org. If you have downloaded the raw data and preprocessed the validation data by moving the images into the appropriate sub-directory based on the label (synset) of the image. we can use below command ro convert it to tf records format.

```shell
cd examples/3.x_api/tensorflow/cv
# convert validation subset
bash prepare_dataset.sh --output_dir=./inception_v3/quantization/ptq/data --raw_dir=/PATH/TO/img_raw/val/ --subset=validation
# convert train subset
bash prepare_dataset.sh --output_dir=./inception_v3/quantization/ptq/data --raw_dir=/PATH/TO/img_raw/train/ --subset=train
```
> **Note**:
> The raw ImageNet dataset resides in JPEG files should be in the following directory structure. Taking validation set as an example:<br>
> &nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;/PATH/TO/img_raw/val/n01440764/ILSVRC2012_val_00000293.JPEG<br>
> &nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;/PATH/TO/img_raw/val/n01440764/ILSVRC2012_val_00000543.JPEG<br>
> where 'n01440764' is the unique synset label associated with these images.

# Run

## 1 Quantization

```shell
bash run_quant.sh --input_model=/PATH/TO/inceptionv3_fp32_pretrained_model.pb \
--output_model=./nc_inception_v3.pb --dataset_location=/path/to/ImageNet/
```

## 2. Benchmark
```shell
bash run_benchmark.sh --input_model=./nc_inception_v3.pb --mode=accuracy --dataset_location=/path/to/ImageNet/ --batch_size=32
bash run_benchmark.sh --input_model=./nc_inception_v3.pb --mode=performance --dataset_location=/path/to/ImageNet/ --batch_size=1
```
Loading

0 comments on commit fb85779

Please sign in to comment.