-
Notifications
You must be signed in to change notification settings - Fork 26
/
inventory-example.yml
369 lines (353 loc) · 14.1 KB
/
inventory-example.yml
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
---
all:
vars:
hosts:
#
# ----
## Terraform node is required; it is the IP of the machine
## that performs the terraform tasks. The default value of
## 127.0.0.1 indicates that the machine that performs terraform
## tasks is the same that launches the ansible playbook.
##
## When using 127.0.0.1 for the terraform_node, it is advisable to also
## set ansible_connection to local, so it does not use ssh for localhost.
##
## In case the terraform machine is NOT the local machine, put here the
## IP/hostname of the terraform node.
#
# ----
terraform_node:
# this should correspond to the ip/host of the machine where terraform
# is installed. If it's the same machine that is launching the ansible
# run, you should set it to 127.0.0.1 ans ansible_connection: local.
ansible_host: remote_terraform_node.test.com
# it is better to explicitly set the ansible_connection variable.
# if set to `local` the role will assume that terraform is on the same
# node that is launching the ansible run.
#
# supported values: local, ssh, paramiko
ansible_connection: local # ssh
#
# ----
## it the terraform_node is NOT local or the TARGET KVM MACHINE is not local,
## we can assume that you need a bastion/jumphost to reach the VM's internal
## network and perform early provisioning (such as network setup).
##
## if terraform is NOT local, we can assume that we have 2 type of situations:
## - terraform is on the same node where the kvm hypervisor is
## - terraform is targetting another different kvm-hypervisor node
##
## if terraform IS LOCAL, we are assuming that ansible and teraform are
## on the same node but the target hypervisor is remote.
##
## we then need to specify the ansible_host_* variables to
## let both terraform and later ansible access the final hypervisor and VMs.
hypervisor_1:
ansible_host: remote_kvm_machine.foo.bar
ansible_user: root
ansible_port: 22
ansible_ssh_pass: password
children:
deploy:
vars:
# ----
## Hypervisor dependent
#
# ----
## Source image:
# in this example, we setup the default image to a centos8 image,
# this can be overwritten on a per-group or per-host level.
disk_source: "~/VirtualMachines/centos8-terraform.qcow2"
pool_name: default
# ----
#
## These are the ssh infos that are necessary to access the
## basic VM source image.
ssh_password: root
ssh_port: 22
ssh_user: root
# ----
##
# Default ssh public key location.
ssh_public_key_file: '~/.ssh/id_rsa.pub'
# ----
##
# Default folders where to safe HCL files and the final state
base_deploy_path: "{{ lookup('env','HOME') }}"
state_save_file: "{{ ansible_inventory_sources[0] }}-state.tar.gz"
children:
master_nodes:
hosts:
# Here the list of VMs that we want to create and provision.
host-vm-1:
## Host specific resources
#
## It is possible to specify per-VM disk source.
## This is useful for mixed environments with
## multiple distributions.
# disk_source: "~/VirtualMachines/centos8-terraform.qcow2"
os_family: RedHat
cpu: 4
memory: 1024
# Whether to autostart the VM with the hypervisor or not.
# Defaults to false
vm_autostart: True
## Hypervisor
# this variable is required and decides on which hypervisor
# the VM should be deployed, this value should be the inventory_hostname
# of the target hypervisor declared ad the start of the inventory
hypervisor: hypervisor_1
# This is the network resource where you want
# to assign the default NAT network, libvirt
# creates this "default" when installing, being the
# 192.168.122.0/24 subnet.
# ----
## Mount points are optionals, the folder are
## exposed to the guest via 9p fs, make sure
## the guest supports it.
##
## It can be 1 value or more, depending on your needs.
##
#
# mount_points:
# - {src: /tmp, dest: mnt }
# - {src: /home, dest: home }
# ----
# ----
## ansible_host is declared here as a mean for ansible
## to continue the VM provisioning after the terraform apply.
#
ansible_host: 172.16.0.156
# ----
## Network declaration:
##
## Declare each device you want to add inside the network_interfaces
## dictionary.
## Remember that THE ORDER OF DECLARATION is IMPORTANT, the NAT device
## should always be present (unless you can control your DHCP leases
## for the external devices) and that should be the FIRST device.
## It is important because it's the way the role has to communicate
## with the VM **BEFORE** setting up all the userspace networks.
##
## Other interfaces are of 2 types: macvtap, bridge
##
## the default_route should be assigned to ONLY ONE interface to
## function properly.
##
## if not set it's equal to False.
##
## Structure:
## name_of_network:
## type: ... # mandatory
## ip: ... # mandatory
## gw: ... # mandatory
## dns: ... # mandatory
## - dns1
## - dns2
## ...
## mac_address: ... # optional
## default_route: ... # at least one true mandatory, false is optional.
# ----
network_interfaces:
# Nat interface, it should always be the first one you declare.
# it does not necessary have to be your default_route or main ansible_host,
# but it's important to declare it so ansible has a way to communicate with
# the VM and setup all the remaining networks.
iface_1:
name: nat
type: nat
ip: 192.168.122.155
gw: 192.168.122.1
dns:
- 192.168.122.1
iface_2:
name: ens1
type: macvtap
ip: 192.168.1.124
gw: 192.168.1.1
dns:
- 1.1.1.1
- 8.8.8.8
default_route: True
iface_3:
name: EXT_BRIDGE
type: bridge
ip: 172.16.0.156
gw: 172.16.0.1
dns:
- 172.16.0.1
# ----
## Default ssh public key location.
## If you need a specific ssh key for a specific host
## you can declare it here, to be host specific instead
## of global.
#
# ssh_public_key_file: '~/.ssh/id_rsa.pub'
# ----
# ----
## this parameter is optional. Specifying this parameter
## will change the VM password during the initialization of it.
##
## This will then be used in the provisioning section if no ssh-public-key
## is found.
##
## It is advisable to use ansible-vault for this section.
#
# set_new_password: test_psk
# ----
# ----
## It is possible to specify custom commands that should be executed
## on a VM **BEFORE** ansible provisioning starts. This can be useful
## for cases where python3 is not installed by default for example, or
## to setup static IPs for bridge/macvtap interfaces.
#
## It supports using jinja2 syntax and variables.
#
terraform_custom_provisioners:
- "echo {{ ansible_host }} > /tmp/test_provisioning_success.log"
# ----
## It is possible to specify custom commands that should be executed
## on a VM **AFTER** ansible provisioning has finished. This can be useful
## for cases where specific-node post-deploy scripts or playbook are
## presents like in an `ansible-pull`.
#
## It supports using jinja2 syntax and variables.
#
terrible_custom_provisioners:
- "echo {{ ansible_host }} > /tmp/post_privisioning.log"
- >
for i in /*; do
echo $i >> /tmp/test_prov_multiline.log
done
# ----
## This section explain how you can add some additional disk to the VMs.
## Suppose you want to create a VM that needs a large amount of storage space, and a separated disk just to store the configurations. Doing this is quite simple.
## The main variable you need is data_disks. The you have to specify the disks and the related properties for each of them.
## If data_disks is mentioned in your inventory, the following variables are required:
#
## size: required. Specify the disk size expressed in GB. (es. size: 1 means 1GB)
## pool: required. Specify the pool where you want to store the additional disks.
## format: require. Specify the filesystem format you want to apply to the disk. Supported formats are: ext4, ext3, ext2, xfs for Linux VMs and ufs for FreeBSD VMs.
## mount_point: required. Specify the mount point of the disk.
data_disks:
disk-0:
size: 1
pool: default
format: ext4
encryption: True
mount_point: /mnt/config
disk-1:
size: 1
pool: default
format: ext3
encryption: False
mount_point: /mnt/config-1
disk-2:
size: 5
pool: default
format: swap
encryption: False
mount_point: none
vars:
## It is possible to specify per-GROUP disk source.
## This is useful for mixed environments with
## multiple distributions.
# disk_source: "~/VirtualMachines/centos8-terraform.qcow2"
# ----
## Default ssh public key location.
## If you need a specific ssh key for a specific group of hosts
## you can declare it here, to be GROUP-SPECIFIC instead
## of global.
#
# ssh_public_key_file: '~/.ssh/id_rsa.pub'
# ---
# ----
## this parameter is optional. Specifying this parameter
## will change the password during the initialization of the VM
## GROUP-SPECIFIC.
##
## This will then be used in the provisioning section if no ssh-public-key
## is found.
##
## It is advisable to use ansible-vault for this section.
#
# set_new_password: test_psk
# ----
## Here we can provide a GROUP-SPECIFIC default
## for the resources (cpu, ram, network...)
## so if all nodes in a group are the same size
## it is possible to deduplicate a lot of settings.
# cpu: 1
# memory: 1024
# mount_points:
# - {src: /tmp, dest: mnt }
# - {src: /home, dest: home }
slave_nodes:
hosts:
host-vm-2:
os_family: RedHat
hypervisor: hypervisor_1
vm_autostart: False
ansible_host: 192.168.122.200
network_interfaces:
iface_1:
name: NAT
type: nat
ip: 192.168.122.200
gw: 192.168.122.1
dns:
- 192.168.122.1
mac_address: "AA:BB:CC:12:25:69"
default_route: True
cpu: 2
memory: 1024
data_disks:
additional_disk_1:
size: 20
pool: default
format: ext4
mount_point: /mnt/data
encryption: False
host-vm-3:
os_family: Suse
disk_source: "~/VirtualMachines/opensuse15.2-terraform.qcow2"
ansible_host: 192.168.122.201
hypervisor: hypervisor_1
network_interfaces:
iface_1:
name: NAT
type: nat
ip: 192.168.122.201
gw: 192.168.122.1
dns:
- 192.168.122.1
mac_address: "AA:BB:CC:11:25:69"
default_route: True
cpu: 2
memory: 1024
vars:
# ...
# hypervisor_2:
# vars:
# children:
# master_nodes:
# slave_nodes:
# hypervisor_2
# vars:
# ...
# children:
# host:
# terraform_node:
# ansible_host: 127.0.0.1
# group_1:
# hosts:
# ...
# vars:
# ...
# group_2:
# hosts:
# ...
# vars:
# ...
# hypervisor_3
# ...