Skip to content

Commit

Permalink
Added PyTorch to TensorFlow model conversion
Browse files Browse the repository at this point in the history
  • Loading branch information
Anastasia committed Jul 9, 2020
1 parent 638d02d commit 8af8f6b
Show file tree
Hide file tree
Showing 7 changed files with 1,220 additions and 0 deletions.
140 changes: 140 additions & 0 deletions PyTorch-to-TensorFlow-Model-Conversion/FullyConvolutionalResnet18.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,140 @@
import cv2
import numpy as np
import tensorflow as tf
import torch
from albumentations import (
Compose,
Normalize,
)
from pytorch2keras.converter import pytorch_to_keras
from torch.autograd import Variable

from PyTorchFullyConvolutionalResnet18 import FullyConvolutionalResnet18


def converted_fully_convolutional_resnet18(
input_tensor, pretrained_resnet=True,
):
# define input tensor
input_var = Variable(torch.FloatTensor(input_tensor))

# get PyTorch ResNet18 model
model_to_transfer = FullyConvolutionalResnet18(pretrained=pretrained_resnet)
model_to_transfer.eval()

# convert PyTorch model to Keras
model = pytorch_to_keras(
model_to_transfer,
input_var,
[input_var.shape[-3:]],
change_ordering=True,
verbose=False,
name_policy="keep",
)

return model


if __name__ == "__main__":
# read ImageNet class ids to a list of labels
with open("imagenet_classes.txt") as f:
labels = [line.strip() for line in f.readlines()]

# read image
original_image = cv2.imread("camel.jpg")

# convert original image to RGB format
image = cv2.cvtColor(original_image, cv2.COLOR_BGR2RGB)

# transform input image:
transform = Compose(
[
Normalize(
# subtract mean
mean=(0.485, 0.456, 0.406),
# divide by standard deviation
std=(0.229, 0.224, 0.225),
),
],
)
# apply image transformations, (725, 1920, 3)
image = transform(image=image)["image"]

# NHWC: (1, 725, 1920, 3)
predict_image = tf.expand_dims(image, 0)
# NCHW: (1, 3, 725, 1920)
image = np.transpose(tf.expand_dims(image, 0).numpy(), [0, 3, 1, 2])

# get transferred torch ResNet18 with pre-trained ImageNet weights
model = converted_fully_convolutional_resnet18(
input_tensor=image, pretrained_resnet=True,
)

# Perform inference.
# Instead of a 1×1000 vector, we will get a
# 1×1000×n×m output ( i.e. a probability map
# of size n × m for each 1000 class,
# where n and m depend on the size of the image).
preds = model.predict(predict_image)
# NHWC: (1, 3, 8, 1000) back to NCHW: (1, 1000, 3, 8)
preds = tf.transpose(preds, (0, 3, 1, 2))
preds = tf.nn.softmax(preds, axis=1)
print("Response map shape : ", preds.shape)

# find the class with the maximum score in the n x m output map
pred = tf.math.reduce_max(preds, axis=1)
class_idx = tf.math.argmax(preds, axis=1)

row_max = tf.math.reduce_max(pred, axis=1)
row_idx = tf.math.argmax(pred, axis=1)

col_idx = tf.math.argmax(row_max, axis=1)

predicted_class = tf.gather_nd(
class_idx, (0, tf.gather_nd(row_idx, (0, col_idx[0])), col_idx[0]),
)

# print top predicted class
print("Predicted Class : ", labels[predicted_class], predicted_class)

# find the n × m score map for the predicted class
score_map = tf.expand_dims(preds[0, predicted_class, :, :], 0).numpy()
score_map = score_map[0]

# resize score map to the original image size
score_map = cv2.resize(
score_map, (original_image.shape[1], original_image.shape[0]),
)

# binarize score map
_, score_map_for_contours = cv2.threshold(
score_map, 0.25, 1, type=cv2.THRESH_BINARY,
)

score_map_for_contours = score_map_for_contours.astype(np.uint8).copy()

# Find the contour of the binary blob
contours, _ = cv2.findContours(
score_map_for_contours, mode=cv2.RETR_EXTERNAL, method=cv2.CHAIN_APPROX_SIMPLE,
)

# find bounding box around the object.
rect = cv2.boundingRect(contours[0])

# apply score map as a mask to original image
score_map = score_map - np.min(score_map[:])
score_map = score_map / np.max(score_map[:])

score_map = cv2.cvtColor(score_map, cv2.COLOR_GRAY2BGR)
masked_image = (original_image * score_map).astype(np.uint8)

# display bounding box
cv2.rectangle(
masked_image, rect[:2], (rect[0] + rect[2], rect[1] + rect[3]), (0, 0, 255), 2,
)

# display images
cv2.imshow("Original Image", original_image)
cv2.imshow("scaled_score_map", score_map)
cv2.imshow("activations_and_bbox", masked_image)
cv2.waitKey(0)
Original file line number Diff line number Diff line change
@@ -0,0 +1,60 @@
import torch
import torch.nn as nn
from torch.hub import load_state_dict_from_url
from torchvision import models


# Define the architecture by modifying resnet.
# Original code is here
# https://github.com/pytorch/vision/blob/b2e95657cd5f389e3973212ba7ddbdcc751a7878/torchvision/models/resnet.py
class FullyConvolutionalResnet18(models.ResNet):
def __init__(self, num_classes=1000, pretrained=False, **kwargs):

# Start with standard resnet18 defined here
# https://github.com/pytorch/vision/blob/b2e95657cd5f389e3973212ba7ddbdcc751a7878/torchvision/models/resnet.py
super().__init__(
block=models.resnet.BasicBlock,
layers=[2, 2, 2, 2],
num_classes=num_classes,
**kwargs,
)
if pretrained:
state_dict = load_state_dict_from_url(
models.resnet.model_urls["resnet18"], progress=True,
)
self.load_state_dict(state_dict)

# Replace AdaptiveAvgPool2d with standard AvgPool2d
# https://github.com/pytorch/vision/blob/b2e95657cd5f389e3973212ba7ddbdcc751a7878/torchvision/models/resnet.py#L153-L154
self.avgpool = nn.AvgPool2d((7, 7))

# Add final Convolution Layer.
self.last_conv = torch.nn.Conv2d(
in_channels=self.fc.in_features, out_channels=num_classes, kernel_size=1,
)
self.last_conv.weight.data.copy_(
self.fc.weight.data.view(*self.fc.weight.data.shape, 1, 1),
)
self.last_conv.bias.data.copy_(self.fc.bias.data)

# Reimplementing forward pass.
# Replacing the following code
# https://github.com/pytorch/vision/blob/b2e95657cd5f389e3973212ba7ddbdcc751a7878/torchvision/models/resnet.py#L197-L213
def _forward_impl(self, x):
# Standard forward for resnet18
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)

x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.avgpool(x)

# Notice, there is no forward pass
# through the original fully connected layer.
# Instead, we forward pass through the last conv layer
x = self.last_conv(x)
return x
12 changes: 12 additions & 0 deletions PyTorch-to-TensorFlow-Model-Conversion/README.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,12 @@
This contains the code for **PyTorch to Tensorflow Model Conversion**. For more information - visit
[**PyTorch to Tensorflow Model Conversion**](https://www.learnopencv.com/pytorch-to-tensorflow-model-conversion/)

# AI Courses by OpenCV

Want to become an expert in AI? [AI Courses by OpenCV](https://opencv.org/courses/) is a great place to start.

<a href="https://opencv.org/courses/">
<p align="center">
<img src="https://www.learnopencv.com/wp-content/uploads/2020/04/AI-Courses-By-OpenCV-Github.png">
</p>
</a>
Binary file added PyTorch-to-TensorFlow-Model-Conversion/camel.jpg
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Loading

0 comments on commit 8af8f6b

Please sign in to comment.