Skip to content

Commit

Permalink
adding other film models, improving dockerfiles, cleaning film args
Browse files Browse the repository at this point in the history
  • Loading branch information
styler00dollar committed Feb 11, 2022
1 parent 54b1543 commit f235b5b
Show file tree
Hide file tree
Showing 6 changed files with 71 additions and 34 deletions.
55 changes: 36 additions & 19 deletions Colab-VSGAN.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -216,6 +216,31 @@
"# relupload, since official download seems to be blocked sometimes due to traffic\n",
"!wget https://files.catbox.moe/v84ufq.data-00000-of-00001\n",
"!mv v84ufq.data-00000-of-00001 variables.data-00000-of-00001\n",
"# film l1\n",
"%cd /workspace\n",
"!mkdir film_l1\n",
"!gdown --id 1WPHyhqRmIhpsCCAuWmlE2j_VHPe6c4eR\n",
"!gdown --id 1HxAfoDIkJs9HUT6GmyHeiO4NDFtBhQKO\n",
"!mkdir variables\n",
"%cd variables\n",
"!gdown --id 1jvMCG321Ws0WswWjZPSiYudrUWYvP10l\n",
"#!gdown --id 19CNAIFt59brxAG_3ihKGYrjRozEQADun\n",
"# reupload\n",
"!wget https://files.catbox.moe/renvnu.data-00000-of-00001\n",
"!mv renvnu.data-00000-of-00001 variables.data-00000-of-00001\n",
"# film vgg\n",
"%cd /workspace\n",
"!mkdir film_vgg\n",
"%cd film_vgg\n",
"!gdown --id 11nvcQvf5n9JMrryvIPfypYEbRYz_1egM\n",
"!gdown --id 1-KW5CVihmeiiMki9fIqwUfsnrVJyLdyn\n",
"!mkdir variables\n",
"%cd variables\n",
"!gdown --id 1QsBtJFG9GLcjprjgnf6R-eQJq3k6mWnN\n",
"#!gdown --id 1Cke9KRsEW3s50jq3RULvYbUoEY8v--zN\n",
"# reupload\n",
"!wget https://files.catbox.moe/qt4iya.data-00000-of-00001\n",
"!mv qt4iya.data-00000-of-00001 variables.data-00000-of-00001\n",
"\n",
"# optional, rvp uses it to convert colorspace\n",
"!pip install kornia\n",
Expand All @@ -242,7 +267,11 @@
"!sudo apt install x264 -y\n",
"\n",
"# downgrading tensorflow due to IndexError: list index out of range \n",
"!pip install tensorflow==2.3.1 tensorflow-gpu==2.3.1"
"!pip install tensorflow==2.3.1 tensorflow-gpu==2.3.1\n",
"\n",
"%cd /workspace/tensorrt\n",
"!git clone https://github.com/styler00dollar/VSGAN-tensorrt-docker\n",
"%cd /workspace/tensorrt/VSGAN-tensorrt-docker"
]
},
{
Expand Down Expand Up @@ -283,19 +312,6 @@
"Dependencies are installed now."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"id": "4OUgqwohoPSo"
},
"outputs": [],
"source": [
"%cd /workspace/tensorrt\n",
"!git clone https://github.com/styler00dollar/VSGAN-tensorrt-docker\n",
"%cd /workspace/tensorrt/VSGAN-tensorrt-docker"
]
},
{
"cell_type": "markdown",
"metadata": {
Expand Down Expand Up @@ -338,16 +354,16 @@
"core.std.LoadPlugin(path='/usr/lib/x86_64-linux-gnu/libffms2.so')\n",
"\n",
"# cfr video\n",
"clip = core.ffms2.Source(source='test.mkv')\n",
"clip = core.ffms2.Source(source='nichijou.mkv')\n",
"# vfr video (untested)\n",
"#clip = core.ffms2.Source(source='input.mkv', fpsnum = 24000, fpsden = 1001)\n",
"###############################################\n",
"# COLORSPACE\n",
"###############################################\n",
"# convert colorspace\n",
"clip = vs.core.resize.Bicubic(clip, format=vs.RGBS, matrix_in_s='709')\n",
"#clip = vs.core.resize.Bicubic(clip, format=vs.RGBS, matrix_in_s='709')\n",
"# convert colorspace + resizing\n",
"#clip = vs.core.resize.Bicubic(clip, width=256, height=256, format=vs.RGBS, matrix_in_s='709')\n",
"clip = vs.core.resize.Bicubic(clip, width=848, height=480, format=vs.RGBS, matrix_in_s='709')\n",
"\n",
"###############################################\n",
"\n",
Expand All @@ -360,7 +376,7 @@
"# sepconv\n",
"#clip = sepconv_model(clip)\n",
"# RIFE4\n",
"clip = RIFE(clip, multi = 2, scale = 1.0, fp16 = True, fastmode = False, ensemble = True)\n",
"#clip = RIFE(clip, multi = 2, scale = 1.0, fp16 = True, fastmode = False, ensemble = True)\n",
"# VFI example for jit models\n",
"#clip = video_model(clip, fp16=False, model_path=\"/workspace/rvpV1_105661_G.pt\")\n",
"# SwinIR\n",
Expand All @@ -382,7 +398,8 @@
"# scales: 2 | 3 | 4, kind_model: no_denoise | denoise3x | conservative, backend_inference: cuda | onnx\n",
"#clip = cugan_inference(clip, fp16 = True, scale = 2, kind_model = \"no_denoise\", backend_inference = \"cuda\")\n",
"# FILM\n",
"#clip = FILM_inference(clip)\n",
"# models: l1 | vgg | style\n",
"clip = FILM_inference(clip, model_choise = \"l1\")\n",
"\n",
"###############################################\n",
"# [NOT IN DOCKER] MODELS (NCNN)\n",
Expand Down
15 changes: 10 additions & 5 deletions Dockerfile
Original file line number Diff line number Diff line change
Expand Up @@ -59,11 +59,16 @@ RUN gdown --id 1uMSkSaear_f3BhIVoyPAeEYecpTNSV6W
RUN gdown --id 1CaQ15NiDQlFoOYGe4OJTsHbdxa8zQvME
# film style
RUN mkdir /workspace/film_style
RUN cd film_style && gdown --id 1nfi15im3LQvCx84ZRiNcfMuodDkRL_Ei && gdown --id 1dT85Z-HyYsiUgIQbOgYFjwWPOw8en1RC
RUN mkdir /workspace/film_style/variables
#RUN cd /workspace/film_style/variables && gdown --id 1ceC2kbJs3U1dMMrp4hNIpoHRFxO33SFC && gdown --id 1_oyM-LBAK9o7-bNWf1jG8VvBYeqpmSUr
# relupload, since official download seems to be blocked sometimes due to traffic
RUN cd /workspace/film_style/variables && gdown --id 1ceC2kbJs3U1dMMrp4hNIpoHRFxO33SFC && wget https://files.catbox.moe/v84ufq.data-00000-of-00001 mv v84ufq.data-00000-of-00001 variables.data-00000-of-00001
RUN cd /workspace/film_style && gdown --id 1nfi15im3LQvCx84ZRiNcfMuodDkRL_Ei && gdown --id 1dT85Z-HyYsiUgIQbOgYFjwWPOw8en1RC
RUN mkdir /workspace/film_style/variables && cd /workspace/film_style/variables && gdown --id 1ceC2kbJs3U1dMMrp4hNIpoHRFxO33SFC && wget https://files.catbox.moe/v84ufq.data-00000-of-00001 mv v84ufq.data-00000-of-00001 variables.data-00000-of-00001
# film l1
RUN mkdir /workspace/film_l1
RUN cd /workspace/film_l1 && gdown --id 1WPHyhqRmIhpsCCAuWmlE2j_VHPe6c4eR && gdown --id 1HxAfoDIkJs9HUT6GmyHeiO4NDFtBhQKO
RUN mkdir /workspace/film_l1/variables && cd /workspace/film_l1/variables && gdown --id 1jvMCG321Ws0WswWjZPSiYudrUWYvP10l && wget https://files.catbox.moe/renvnu.data-00000-of-00001 && mv renvnu.data-00000-of-00001 variables.data-00000-of-00001
# film vgg
RUN mkdir /workspace/film_vgg
RUN cd /workspace/film_vgg && gdown --id 11nvcQvf5n9JMrryvIPfypYEbRYz_1egM && gdown --id 1-KW5CVihmeiiMki9fIqwUfsnrVJyLdyn
RUN mkdir /workspace/film_vgg/variables && cd variables && gdown --id 1QsBtJFG9GLcjprjgnf6R-eQJq3k6mWnN && wget https://files.catbox.moe/qt4iya.data-00000-of-00001 && mv qt4iya.data-00000-of-00001 variables.data-00000-of-00001

# optional, rvp uses it to convert colorspace
RUN pip install kornia
Expand Down
15 changes: 10 additions & 5 deletions Dockerfile_mpv
Original file line number Diff line number Diff line change
Expand Up @@ -66,11 +66,16 @@ RUN gdown --id 1uMSkSaear_f3BhIVoyPAeEYecpTNSV6W
RUN gdown --id 1CaQ15NiDQlFoOYGe4OJTsHbdxa8zQvME
# film style
RUN mkdir /workspace/film_style
RUN cd film_style && gdown --id 1nfi15im3LQvCx84ZRiNcfMuodDkRL_Ei && gdown --id 1dT85Z-HyYsiUgIQbOgYFjwWPOw8en1RC
RUN mkdir /workspace/film_style/variables
#RUN cd /workspace/film_style/variables && gdown --id 1ceC2kbJs3U1dMMrp4hNIpoHRFxO33SFC && gdown --id 1_oyM-LBAK9o7-bNWf1jG8VvBYeqpmSUr
# relupload, since official download seems to be blocked sometimes due to traffic
RUN cd /workspace/film_style/variables && gdown --id 1ceC2kbJs3U1dMMrp4hNIpoHRFxO33SFC && wget https://files.catbox.moe/v84ufq.data-00000-of-00001 mv v84ufq.data-00000-of-00001 variables.data-00000-of-00001
RUN cd /workspace/film_style && gdown --id 1nfi15im3LQvCx84ZRiNcfMuodDkRL_Ei && gdown --id 1dT85Z-HyYsiUgIQbOgYFjwWPOw8en1RC
RUN mkdir /workspace/film_style/variables && cd /workspace/film_style/variables && gdown --id 1ceC2kbJs3U1dMMrp4hNIpoHRFxO33SFC && wget https://files.catbox.moe/v84ufq.data-00000-of-00001 mv v84ufq.data-00000-of-00001 variables.data-00000-of-00001
# film l1
RUN mkdir /workspace/film_l1
RUN cd /workspace/film_l1 && gdown --id 1WPHyhqRmIhpsCCAuWmlE2j_VHPe6c4eR && gdown --id 1HxAfoDIkJs9HUT6GmyHeiO4NDFtBhQKO
RUN mkdir /workspace/film_l1/variables && cd /workspace/film_l1/variables && gdown --id 1jvMCG321Ws0WswWjZPSiYudrUWYvP10l && wget https://files.catbox.moe/renvnu.data-00000-of-00001 && mv renvnu.data-00000-of-00001 variables.data-00000-of-00001
# film vgg
RUN mkdir /workspace/film_vgg
RUN cd /workspace/film_vgg && gdown --id 11nvcQvf5n9JMrryvIPfypYEbRYz_1egM && gdown --id 1-KW5CVihmeiiMki9fIqwUfsnrVJyLdyn
RUN mkdir /workspace/film_vgg/variables && cd variables && gdown --id 1QsBtJFG9GLcjprjgnf6R-eQJq3k6mWnN && wget https://files.catbox.moe/qt4iya.data-00000-of-00001 && mv qt4iya.data-00000-of-00001 variables.data-00000-of-00001

# optional, rvp uses it to convert colorspace
RUN pip install kornia
Expand Down
3 changes: 2 additions & 1 deletion inference.py
Original file line number Diff line number Diff line change
Expand Up @@ -65,7 +65,8 @@
# scales: 2 | 3 | 4, kind_model: no_denoise | denoise3x | conservative, backend_inference: cuda | onnx
#clip = cugan_inference(clip, fp16 = True, scale = 2, kind_model = "no_denoise", backend_inference = "cuda")
# FILM
#clip = FILM_inference(clip)
# models: l1 | vgg | style
clip = FILM_inference(clip, model_choise = "vgg")

###############################################
# [NOT IN DOCKER] MODELS (NCNN)
Expand Down
3 changes: 2 additions & 1 deletion inference_batch.py
Original file line number Diff line number Diff line change
Expand Up @@ -69,7 +69,8 @@
# scales: 2 | 3 | 4, kind_model: no_denoise | denoise3x | conservative, backend_inference: cuda | onnx
#clip = cugan_inference(clip, fp16 = True, scale = 2, kind_model = "no_denoise", backend_inference = "cuda")
# FILM
#clip = FILM_inference(clip)
# models: l1 | vgg | style
clip = FILM_inference(clip, model_choise = "l1")

###############################################
# [NOT IN DOCKER] MODELS (NCNN)
Expand Down
14 changes: 11 additions & 3 deletions src/film.py
Original file line number Diff line number Diff line change
Expand Up @@ -32,7 +32,7 @@ def _redirect_stdout(to):
# buffering and flags such as
# CLOEXEC may be different

def FILM_inference(clip: vs.VideoNode, fp16: bool = False, model_path: str = "/workspace/rvpV1_105661_G.pt") -> vs.VideoNode:
def FILM_inference(clip: vs.VideoNode, model_choise: str = "vgg") -> vs.VideoNode:
if not isinstance(clip, vs.VideoNode):
raise vs.Error('This is not a clip')

Expand All @@ -51,8 +51,13 @@ def FILM_inference(clip: vs.VideoNode, fp16: bool = False, model_path: str = "/w
sys.argv.append("(C++)")
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
import tensorflow as tf
model = tf.compat.v2.saved_model.load("/workspace/film_style/")

if model_choise == "style":
model = tf.compat.v2.saved_model.load("/workspace/film_style/")
elif model_choise == "l1":
model = tf.compat.v2.saved_model.load("/workspace/film_l1/")
elif model_choise == "vgg"
model = tf.compat.v2.saved_model.load("/workspace/film_vgg/")

batch_dt = np.full(shape=(1,), fill_value=0.5, dtype=np.float32)
batch_dt = np.expand_dims(batch_dt, axis=0)
batch_dt = tf.convert_to_tensor(batch_dt)
Expand All @@ -70,12 +75,15 @@ def execute(n: int, clip: vs.VideoNode) -> vs.VideoNode:

I0 = np.expand_dims(I0, 0)
I1 = np.expand_dims(I1, 0)

I0 = np.swapaxes(I0, 3, 1)
I0 = np.swapaxes(I0, 1, 2)
I1 = np.swapaxes(I1, 3, 1)
I1 = np.swapaxes(I1, 1, 2)

I0 = tf.convert_to_tensor(I0)
I1 = tf.convert_to_tensor(I1)

inputs = {'x0': I0, 'x1': I1, 'time': batch_dt}
middle = model(inputs, training=False)['image'].numpy()

Expand Down

0 comments on commit f235b5b

Please sign in to comment.