forked from malraharsh/style-transfer-web-app
-
Notifications
You must be signed in to change notification settings - Fork 31
/
input.py
73 lines (56 loc) · 2.62 KB
/
input.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
import numpy as np
import streamlit as st
from streamlit_webrtc import webrtc_streamer
from PIL import Image
import cv2
import imutils
from neural_style_transfer import get_model_from_path, style_transfer
from data import *
import av
from turn import get_ice_servers
from streamlit_session_memo import st_session_memo
def image_input(style_model_name):
style_model_path = style_models_dict[style_model_name]
model = get_model_from_path(style_model_path)
if st.sidebar.checkbox('Upload'):
content_file = st.sidebar.file_uploader("Choose a Content Image", type=["png", "jpg", "jpeg"])
else:
content_name = st.sidebar.selectbox("Choose the content images:", content_images_name)
content_file = content_images_dict[content_name]
if content_file is not None:
content = Image.open(content_file)
content = np.array(content) #pil to cv
content = cv2.cvtColor(content, cv2.COLOR_RGB2BGR)
else:
st.warning("Upload an Image OR Untick the Upload Button)")
st.stop()
WIDTH = st.sidebar.select_slider('QUALITY (May reduce the speed)', list(range(150, 501, 50)), value=200)
content = imutils.resize(content, width=WIDTH)
generated = style_transfer(content, model)
st.sidebar.image(content, width=300, channels='BGR')
st.image(generated, channels='BGR', clamp=True)
def webcam_input(style_model_name):
st.header("Webcam Live Feed")
WIDTH = st.sidebar.select_slider('QUALITY (May reduce the speed)', list(range(150, 501, 50)))
width = WIDTH
@st_session_memo
def load_model(model_name, width): # `width` is not used when loading the model, but is necessary as a cache key.
return get_model_from_path(model_name)
model = load_model(style_models_dict[style_model_name], width)
def video_frame_callback(frame: av.VideoFrame) -> av.VideoFrame:
image = frame.to_ndarray(format="bgr24")
if model is None:
return image
orig_h, orig_w = image.shape[0:2]
# cv2.resize used in a forked thread may cause memory leaks
input = np.asarray(Image.fromarray(image).resize((width, int(width * orig_h / orig_w))))
transferred = style_transfer(input, model)
result = Image.fromarray((transferred * 255).astype(np.uint8))
image = np.asarray(result.resize((orig_w, orig_h)))
return av.VideoFrame.from_ndarray(image, format="bgr24")
ctx = webrtc_streamer(
key="neural-style-transfer",
video_frame_callback=video_frame_callback,
rtc_configuration={"iceServers": get_ice_servers()},
media_stream_constraints={"video": True, "audio": False},
)