forked from dessa-oss/DeepFake-Detection
-
Notifications
You must be signed in to change notification settings - Fork 0
/
faceforensics_download.py
258 lines (231 loc) · 10.3 KB
/
faceforensics_download.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
#!/usr/bin/env python
""" Downloads FaceForensics++ and Deep Fake Detection public data release
Example usage:
see -h or https://github.com/ondyari/FaceForensics
"""
# -*- coding: utf-8 -*-
import argparse
import os
import urllib
import urllib.request
import tempfile
import time
import sys
import json
import random
from tqdm import tqdm
from os.path import join
# URLs and filenames
FILELIST_URL = 'misc/filelist.json'
DEEPFEAKES_DETECTION_URL = 'misc/deepfake_detection_filenames.json'
DEEPFAKES_MODEL_NAMES = ['decoder_A.h5', 'decoder_B.h5', 'encoder.h5',]
# Parameters
DATASETS = {
'original_youtube_videos': 'misc/downloaded_youtube_videos.zip',
'original_youtube_videos_info': 'misc/downloaded_youtube_videos_info.zip',
'original': 'original_sequences/youtube',
'DeepFakeDetection_original': 'original_sequences/actors',
'Deepfakes': 'manipulated_sequences/Deepfakes',
'DeepFakeDetection': 'manipulated_sequences/DeepFakeDetection',
'Face2Face': 'manipulated_sequences/Face2Face',
'FaceSwap': 'manipulated_sequences/FaceSwap',
'NeuralTextures': 'manipulated_sequences/NeuralTextures'
}
ALL_DATASETS = ['original', 'DeepFakeDetection_original', 'Deepfakes',
'DeepFakeDetection', 'Face2Face', 'FaceSwap',
'NeuralTextures']
COMPRESSION = ['raw', 'c23', 'c40']
TYPE = ['videos', 'masks', 'models']
SERVERS = ['EU', 'EU2', 'CA']
def parse_args():
parser = argparse.ArgumentParser(
description='Downloads FaceForensics v2 public data release.',
formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
parser.add_argument('output_path', type=str, help='Output directory.')
parser.add_argument('-d', '--dataset', type=str, default='all',
help='Which dataset to download, either pristine or '
'manipulated data or the downloaded youtube '
'videos.',
choices=list(DATASETS.keys()) + ['all']
)
parser.add_argument('-c', '--compression', type=str, default='raw',
help='Which compression degree. All videos '
'have been generated with h264 with a varying '
'codec. Raw (c0) videos are lossless compressed.',
choices=COMPRESSION
)
parser.add_argument('-t', '--type', type=str, default='videos',
help='Which file type, i.e. videos, masks, for our '
'manipulation methods, models, for Deepfakes.',
choices=TYPE
)
parser.add_argument('-n', '--num_videos', type=int, default=None,
help='Select a number of videos number to '
"download if you don't want to download the full"
' dataset.')
parser.add_argument('--server', type=str, default='EU',
help='Server to download the data from. If you '
'encounter a slow download speed, consider '
'changing the server.',
choices=SERVERS
)
args = parser.parse_args()
# URLs
server = args.server
if server == 'EU':
server_url = 'http://canis.vc.in.tum.de:8100/'
elif server == 'EU2':
server_url = 'http://kaldir.vc.in.tum.de/faceforensics/'
elif server == 'CA':
server_url = 'http://falas.cmpt.sfu.ca:8100/'
else:
raise Exception('Wrong server name. Choices: {}'.format(str(SERVERS)))
args.tos_url = server_url + 'webpage/FaceForensics_TOS.pdf'
args.base_url = server_url + 'v3/'
args.deepfakes_model_url = server_url + 'v3/manipulated_sequences/' + \
'Deepfakes/models/'
return args
def download_files(filenames, base_url, output_path, report_progress=True):
os.makedirs(output_path, exist_ok=True)
if report_progress:
filenames = tqdm(filenames)
for filename in filenames:
download_file(base_url + filename, join(output_path, filename))
def reporthook(count, block_size, total_size):
global start_time
if count == 0:
start_time = time.time()
return
duration = time.time() - start_time
progress_size = int(count * block_size)
speed = int(progress_size / (1024 * duration))
percent = int(count * block_size * 100 / total_size)
sys.stdout.write("\rProgress: %d%%, %d MB, %d KB/s, %d seconds passed" %
(percent, progress_size / (1024 * 1024), speed, duration))
sys.stdout.flush()
def download_file(url, out_file, report_progress=False):
out_dir = os.path.dirname(out_file)
if not os.path.isfile(out_file):
fh, out_file_tmp = tempfile.mkstemp(dir=out_dir)
f = os.fdopen(fh, 'w')
f.close()
if report_progress:
urllib.request.urlretrieve(url, out_file_tmp,
reporthook=reporthook)
else:
urllib.request.urlretrieve(url, out_file_tmp)
os.rename(out_file_tmp, out_file)
else:
tqdm.write('WARNING: skipping download of existing file ' + out_file)
def main(args):
# TOS
print('By pressing any key to continue you confirm that you have agreed '\
'to the FaceForensics terms of use as described at:')
print(args.tos_url)
print('***')
print('Press any key to continue, or CTRL-C to exit.')
_ = input('')
# Extract arguments
c_datasets = [args.dataset] if args.dataset != 'all' else ALL_DATASETS
c_type = args.type
c_compression = args.compression
num_videos = args.num_videos
output_path = args.output_path
os.makedirs(output_path, exist_ok=True)
# Check for special dataset cases
for dataset in c_datasets:
dataset_path = DATASETS[dataset]
# Special cases
if 'original_youtube_videos' in dataset:
# Here we download the original youtube videos zip file
print('Downloading original youtube videos.')
if not 'info' in dataset_path:
print('Please be patient, this may take a while (~40gb)')
suffix = ''
else:
suffix = 'info'
download_file(args.base_url + '/' + dataset_path,
out_file=join(output_path,
'downloaded_videos{}.zip'.format(
suffix)),
report_progress=True)
return
# Else: regular datasets
print('Downloading {} of dataset "{}"'.format(
c_type, dataset_path
))
# Get filelists and video lenghts list from server
if 'DeepFakeDetection' in dataset_path or 'actors' in dataset_path:
filepaths = json.loads(urllib.request.urlopen(args.base_url + '/' +
DEEPFEAKES_DETECTION_URL).read().decode("utf-8"))
if 'actors' in dataset_path:
filelist = filepaths['actors']
else:
filelist = filepaths['DeepFakesDetection']
elif 'original' in dataset_path:
# Load filelist from server
file_pairs = json.loads(urllib.request.urlopen(args.base_url + '/' +
FILELIST_URL).read().decode("utf-8"))
filelist = []
for pair in file_pairs:
filelist += pair
else:
# Load filelist from server
file_pairs = json.loads(urllib.request.urlopen(args.base_url + '/' +
FILELIST_URL).read().decode("utf-8"))
# Get filelist
filelist = []
for pair in file_pairs:
filelist.append('_'.join(pair))
if c_type != 'models':
filelist.append('_'.join(pair[::-1]))
# Maybe limit number of videos for download
if num_videos is not None and num_videos > 0:
print('Downloading the first {} videos'.format(num_videos))
filelist = filelist[:num_videos]
# Server and local paths
dataset_videos_url = args.base_url + '{}/{}/{}/'.format(
dataset_path, c_compression, c_type)
dataset_mask_url = args.base_url + '{}/{}/videos/'.format(
dataset_path, 'masks', c_type)
if c_type == 'videos':
dataset_output_path = join(output_path, dataset_path, c_compression,
c_type)
print('Output path: {}'.format(dataset_output_path))
filelist = [filename + '.mp4' for filename in filelist]
download_files(filelist, dataset_videos_url, dataset_output_path)
elif c_type == 'masks':
dataset_output_path = join(output_path, dataset_path, c_type,
'videos')
print('Output path: {}'.format(dataset_output_path))
if 'original' in dataset:
if args.dataset != 'all':
print('Only videos available for original data. Aborting.')
return
else:
print('Only videos available for original data. '
'Skipping original.\n')
continue
filelist = [filename + '.mp4' for filename in filelist]
download_files(filelist, dataset_mask_url, dataset_output_path)
# Else: models for deepfakes
else:
if dataset != 'Deepfakes' and c_type == 'models':
print('Models only available for Deepfakes. Aborting')
return
dataset_output_path = join(output_path, dataset_path, c_type)
print('Output path: {}'.format(dataset_output_path))
# Get Deepfakes models
for folder in tqdm(filelist):
folder_filelist = DEEPFAKES_MODEL_NAMES
# Folder paths
folder_base_url = args.deepfakes_model_url + folder + '/'
folder_dataset_output_path = join(dataset_output_path,
folder)
download_files(folder_filelist, folder_base_url,
folder_dataset_output_path,
report_progress=False) # already done
if __name__ == "__main__":
args = parse_args()
main(args)