-
Notifications
You must be signed in to change notification settings - Fork 0
/
entrypoint.sh
executable file
·274 lines (229 loc) · 9.88 KB
/
entrypoint.sh
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
#!/bin/sh
# vim:set tabstop=8 shiftwidth=4 expandtab:
# kate: space-indent on; indent-width 4;
#
# Copyright (c) 2022-2023 Jakob Meng, <jakobmeng@web.de>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# NOTE: declare local variables first and initialize them later because return code of "local ..." is always 0
set -eu
# Environment variables
DEBUG=${DEBUG:=no}
DEBUG_SHELL=${DEBUG_SHELL:=no}
SSH_AUTH_SOCK=${SSH_AUTH_SOCK:-}
if [ "$DEBUG" = "yes" ] || [ "$DEBUG" = "true" ]; then
set -x
fi
error() {
echo "ERROR: $*" 1>&2
}
warn() {
echo "WARNING: $*" 1>&2
}
if [ "$(id -u)" -ne 0 ]; then
error "Please run as root"
exit 125
fi
# Kill process group including child processes such as libvirtd
# shellcheck disable=SC2064
trap "trap - INT TERM && kill -- -$$" INT TERM
(
set -eu
if [ "$DEBUG" = "yes" ] || [ "$DEBUG" = "true" ]; then
set -x
fi
# Ensure user cloudy has full access to mounted directories
chown -v cloudy.cloudy /home/cloudy/.local/share/libvirt/images/ /home/cloudy/.ssh/
# Ansible requires SSH keys to be able to connect to virtual machines
sudo -u cloudy --set-home sh -c '
for t in ecdsa ed25519 rsa; do
[ -e "$HOME/.ssh/id_$t" ] || ssh-keygen -t "$t" -N "" -f "$HOME/.ssh/id_$t" || exit;
done'
# Ansible uses SSH agent forwarding for accessing nested virtual machines
if [ -z "$SSH_AUTH_SOCK" ]; then
# Using SSH_AUTH_SOCK instead of SSH_AGENT_PID
# because ssh-agent is running on the container host
eval "$(sudo -u cloudy --set-home ssh-agent)"
sudo -u cloudy --set-home --preserve-env=SSH_AUTH_SOCK ssh-add
fi
cd /home/cloudy/project/
if [ ! -e "ansible.cfg" ]; then
error "ansible.cfg not found"
exit 123
fi
[ -e "/usr/share/ansible/collections/ansible_collections/jm1/cloudy" ] \
|| sudo -u cloudy ansible-galaxy collection install jm1.cloudy
# Sorted in ascending order of priority
for dir in \
"/usr/share/ansible/collections/ansible_collections/jm1/cloudy" \
"/home/cloudy/.ansible/collections/ansible_collections/jm1/cloudy" \
"/home/cloudy/project";
do
[ -e "$dir/playbooks/setup.yml" ] && playbook_setup="$dir/playbooks/setup.yml"
[ -e "$dir/playbooks/site.yml" ] && playbook_site="$dir/playbooks/site.yml"
[ -e "$dir/requirements.yml" ] && requirements="$dir/requirements.yml"
done
if [ -z "$playbook_setup" ]; then
error "Ansible playbook setup.yml not found"
exit 122
fi
if [ -z "$playbook_site" ]; then
error "Ansible playbook site.yml not found"
exit 121
fi
if [ -z "$requirements" ]; then
error "Ansible Galaxy's requirements.yml not found"
exit 120
fi
if ! python3 -c "import ansible"; then
py=python2
else
py=python3
fi
if ! "$py" -c \
"import ansible; import sys; from packaging import version;"\
"sys.exit(0 if version.parse(ansible.release.__version__) >= version.parse('2.9') else 1);"
then
error "Ansible 2.9 or newer is required"
exit 119
fi
# Use older Ansible collections for compatibility with older Ansible releases
if "$py" -c \
"import ansible; import sys; from packaging import version;"\
"sys.exit(0 if version.parse(ansible.release.__version__) < version.parse('2.11') else 1);"
then
sudo -u cloudy --set-home ansible-galaxy collection install 'ansible.utils:<3.0.0' 'community.general:<5.0.0' \
'kubernetes.core:<3.0.0'
elif "$py" -c \
"import ansible; import sys; from packaging import version;"\
"sys.exit(0 if version.parse(ansible.release.__version__) < version.parse('2.13') else 1);"
then
sudo -u cloudy --set-home ansible-galaxy collection install 'ansible.utils:<3.0.0' 'community.general:<8.0.0' \
'kubernetes.core:<3.0.0'
elif "$py" -c \
"import ansible; import sys; from packaging import version;"\
"sys.exit(0 if version.parse(ansible.release.__version__) < version.parse('2.14') else 1);"
then
sudo -u cloudy --set-home ansible-galaxy collection install 'ansible.utils:<3.0.0' 'kubernetes.core:<3.0.0'
fi
sudo -u cloudy --set-home ansible-galaxy collection install --requirements-file "$requirements"
sudo -u cloudy --set-home ansible-galaxy role install --role-file "$requirements"
sudo -u cloudy --set-home \
ansible-playbook "$playbook_setup" \
--limit lvrt-lcl-system
sudo -u cloudy --set-home \
ansible-playbook "$playbook_site" \
--limit lvrt-lcl-system \
--skip-tags "jm1.kvm_nested_virtualization" \
--skip-tags "jm1.cloudy.libvirt_pools" \
--skip-tags "jm1.cloudy.libvirt_images" \
--skip-tags "jm1.cloudy.libvirt_networks"
# Change container's gid for group kvm to match host's gid
# else cloudy will not have access to /dev/kvm
if [ -e /dev/kvm ] && \
[ "$(getent group kvm | cut -d: -f3)" != "$(stat -c '%g' /dev/kvm)" ]; then
groupmod --gid "$(stat -c '%g' /dev/kvm)" kvm
fi
# Start libvirt system daemon
(
set -eu
# Exit subshell if libvirtd is already running
{ pgrep --uid "$(id -u)" --exact libvirtd && exit; } || true
# Ref.: /lib/systemd/system/libvirtd.service
# shellcheck disable=SC1091
[ -e /etc/default/libvirtd ] && . /etc/default/libvirtd
# shellcheck disable=SC1091
[ -e /etc/sysconfig/libvirtd ] && . /etc/sysconfig/libvirtd
# shellcheck disable=SC2086
/usr/sbin/libvirtd --daemon ${LIBVIRTD_ARGS:-}
)
# Disable libvirt tls transport, enable unauthenticated libvirt tcp transport and bind
# to all network interfaces for connectivity from container host and virtual machines.
if [ ! -e /home/cloudy/.config/libvirt/libvirtd.conf ]; then
sudo -u cloudy mkdir -p /home/cloudy/.config/libvirt/
cp -av /etc/libvirt/libvirtd.conf /home/cloudy/.config/libvirt/libvirtd.conf
chown cloudy.cloudy /home/cloudy/.config/libvirt/libvirtd.conf
sed -i \
-e 's/^[#]*listen_tls = .*/listen_tls = 0/g' \
-e 's/^[#]*listen_tcp = .*/listen_tcp = 1/g' \
-e 's/^[#]*listen_addr = .*/listen_addr = "0.0.0.0"/g' \
-e 's/^[#]*auth_tcp = .*/auth_tcp = "none"/g' \
-e 's/^unix_sock_/#unix_sock_/g' \
/home/cloudy/.config/libvirt/libvirtd.conf
fi
sudo -u cloudy --set-home \
ansible-playbook "$playbook_site" \
--limit lvrt-lcl-system \
--skip-tags "jm1.kvm_nested_virtualization"
# Enable masquerading for internet connectivity from libvirt domains on networks route-0-dhcp and route-1-no-dhcp
if command -v nft >/dev/null; then
nft --file - << '________EOF'
table ip nat {
chain POSTROUTING {
type nat hook postrouting priority srcnat; policy accept;
meta l4proto tcp ip saddr 192.168.157.0/24 ip daddr != 192.168.157.0/24 masquerade to :1024-65535
meta l4proto udp ip saddr 192.168.157.0/24 ip daddr != 192.168.157.0/24 masquerade to :1024-65535
ip saddr 192.168.157.0/24 ip daddr != 192.168.157.0/24 masquerade
meta l4proto tcp ip saddr 192.168.158.0/24 ip daddr != 192.168.158.0/24 masquerade to :1024-65535
meta l4proto udp ip saddr 192.168.158.0/24 ip daddr != 192.168.158.0/24 masquerade to :1024-65535
ip saddr 192.168.158.0/24 ip daddr != 192.168.158.0/24 masquerade
}
}
________EOF
else
iptables-restore << '________EOF'
*nat
:POSTROUTING ACCEPT [0:0]
-A POSTROUTING -s 192.168.157.0/24 ! -d 192.168.157.0/24 -p tcp -j MASQUERADE --to-ports 1024-65535
-A POSTROUTING -s 192.168.157.0/24 ! -d 192.168.157.0/24 -p udp -j MASQUERADE --to-ports 1024-65535
-A POSTROUTING -s 192.168.157.0/24 ! -d 192.168.157.0/24 -j MASQUERADE
-A POSTROUTING -s 192.168.158.0/24 ! -d 192.168.158.0/24 -p tcp -j MASQUERADE --to-ports 1024-65535
-A POSTROUTING -s 192.168.158.0/24 ! -d 192.168.158.0/24 -p udp -j MASQUERADE --to-ports 1024-65535
-A POSTROUTING -s 192.168.158.0/24 ! -d 192.168.158.0/24 -j MASQUERADE
COMMIT
________EOF
fi
if ! groups cloudy | grep -q -w kvm; then
sudo -u cloudy killall libvirtd || true
usermod --append --groups kvm cloudy
fi
# Start libvirt session daemon
pgrep --uid "$(id -u cloudy)" --exact libvirtd || sudo -u cloudy --set-home /usr/sbin/libvirtd --daemon --listen
sudo -u cloudy --set-home \
ansible-playbook "$playbook_site" \
--limit lvrt-lcl-session \
--skip-tags "jm1.kvm_nested_virtualization"
if [ $# -eq 0 ]; then
sudo -u cloudy --set-home --preserve-env=SSH_AUTH_SOCK bash --login
else
sudo -u cloudy --set-home --preserve-env=SSH_AUTH_SOCK env -- "$@"
# Wait until libvirt domains have been shutdown
while [ -n "$(sudo -u cloudy --set-home virsh list --name)" ]; do
warn "Waiting for libvirt domains $(sudo -u cloudy --set-home virsh list --name | xargs echo) to stop."
sleep 60
done
fi
# Stop libvirt daemons
killall libvirtd
)
# shellcheck disable=SC2181
# else 'set -e' in subshell is ignored
if [ $? -ne 0 ]; then
if [ "$DEBUG_SHELL" = "yes" ] || [ "$DEBUG_SHELL" = "true" ]; then
bash
exit
fi
exit 255
fi