-
Notifications
You must be signed in to change notification settings - Fork 21
/
build_web_content.py
executable file
·160 lines (142 loc) · 5.38 KB
/
build_web_content.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
#!/usr/bin/env python3
#
# This script converts standard web content files (html, css, etc) into a C++ language
# header file that is included in the program body. The files are compressed and use
# PROGMEM keyword to store in Flash to save RAM.
#
# With thanks to https://github.com/mitchjs for removal of dependencies on external gzip/sed/xxd
#
# Copyright (c) 2023 David Kerr, https://github.com/dkerr64
#
import os
import shutil
import base64
import zlib
import gzip
sourcepath = "src/www"
targetpath = sourcepath + "/build"
filenames = next(os.walk(sourcepath), (None, None, []))[2]
print("Compressing and converting files from " + sourcepath + " into " + targetpath)
# Start by deleting the target directory, then creating empty one.
try:
shutil.rmtree(targetpath)
except FileNotFoundError:
pass
os.mkdir(targetpath)
# calculate a CRC32 for each file and base64 encode it, this will change if the
# file contents are changed. We use this to control browser caching.
file_crc = {}
for file in filenames:
# skip hidden files
if file[0] == ".":
continue
# skip status.json
if file == "status.json":
continue
with open(sourcepath + "/" + file, "rb") as f:
# read contents of the file
data = f.read()
crc32 = (
base64.urlsafe_b64encode(zlib.crc32(data).to_bytes(4, byteorder="big"))
.decode()
.replace("=", "")
)
f.close()
file_crc[file] = crc32
print("CRC: " + crc32 + " (" + file + ")")
# Open webcontent file and write warning header...
wf = open(targetpath + "/webcontent.h", "w")
wf.write("/**************************************\n")
wf.write(" * Autogenerated DO NOT EDIT\n")
wf.write(" **************************************/\n")
wf.write("#include <unordered_map>\n")
wf.write("#include <string>\n")
wf.flush()
varnames = []
# now loop through each file...
for file in filenames:
# skip hidden files
if file[0] == ".":
continue
# skip status.json
if file == "status.json":
continue
# create gzip file name
gzfile = targetpath + "/" + file + ".gz"
# create variable names
varnames.append(("/" + file, gzfile.replace(".", "_").replace("/", "_").replace("-", "_"), file_crc[file]))
# get file type
t = file.rpartition(".")[-1]
# if file matches, add true crc to ?v=CRC-32 marker and create the gzip
if (t == "html") or (t == "htm") or (t == "js"):
with open(sourcepath + "/" + file, 'rb') as f_in, gzip.open(gzfile, 'wb') as f_out:
# read contents of the file
data = f_in.read()
# loop through each file that could be referenced
for f_name, crc32 in file_crc.items():
# Replace the target string with real crc
data = data.replace(bytes(f_name + "?v=CRC-32", 'utf-8'), bytes(f_name + "?v=" + crc32, 'utf-8'))
f_out.write(data)
f_out.close()
else :
with open(sourcepath + "/" + file, 'rb') as f_in, gzip.open(gzfile, 'wb') as f_out:
f_out.writelines(f_in)
f_out.close()
# create the 'c' code
# const unsigned char src_www_build_apple_touch_icon_png_gz[] PROGMEM = {
# const unsigned int src_www_build_apple_touch_icon_png_gz_len = 2721;
wf.write("const unsigned char %s[] PROGMEM = {\n" % gzfile.replace(".", "_").replace("/", "_").replace("-", "_") )
count = 0
with open(gzfile, 'rb') as f:
bytes_read = f.read(12)
while bytes_read:
count = count + len(bytes_read)
wf.write(' ')
for b in bytes_read:
wf.write('0x%02X,' % b)
wf.write('\n')
bytes_read = f.read(12)
wf.write('};\n')
wf.write("const unsigned int %s_len = %d;\n\n" % (gzfile.replace(".", "_").replace("/", "_").replace("-", "_"), count) )
wf.flush()
# Add possible MIME types to the file...
wf.write(
"""
const char type_svg[] PROGMEM = "image/svg+xml";
const char type_bmp[] PROGMEM = "image/bmp";
const char type_gif[] PROGMEM = "image/gif";
const char type_jpeg[] PROGMEM = "image/jpeg";
const char type_jpg[] PROGMEM = "image/jpeg";
const char type_png[] PROGMEM = "image/png";
const char type_tiff[] PROGMEM = "image/tiff";
const char type_tif[] PROGMEM = "image/tiff";
const char type_txt[] PROGMEM = "text/plain";
const char type_[] PROGMEM = "text/plain";
const char type_htm[] PROGMEM = "text/html";
const char type_html[] PROGMEM = "text/html";
const char type_css[] PROGMEM = "text/css";
const char type_js[] PROGMEM = "text/javascript";
const char type_mjs[] PROGMEM = "text/javascript";
const char type_json[] PROGMEM = "application/json";
// Must be at least one more than max string above...
#define MAX_MIME_TYPE_LEN 20
"""
)
# Use an unordered_map so we can lookup the data, length and type based on filename...
wf.write(
"const std::unordered_map<std::string, std::tuple<const unsigned char *, const unsigned int, const char *, std::string>> webcontent = {"
)
n = 0
for file, var, crc32 in varnames:
t = ""
if file.find(".") > 0:
t = file.rpartition(".")[-1]
# Need comma at end of every line except last one...
if n > 0:
wf.write(",")
wf.write('\n { "' + file + '", {' + var + ", " + var + "_len, type_" + t + ', "' + crc32 + '"' + "} }")
n = n + 1
# All done, close the file...
wf.write("\n};\n")
wf.close()
print("processed " + str(len(varnames)) + " files")