-
Notifications
You must be signed in to change notification settings - Fork 241
/
utils.py
4278 lines (3497 loc) · 143 KB
/
utils.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
# Copyright 2015-2017 Yelp Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import contextlib
import copy
import datetime
import difflib
import errno
import fcntl
import getpass
import glob
import hashlib
import io
import json
import logging
import math
import os
import pwd
import queue
import re
import shlex
import signal
import socket
import ssl
import sys
import tempfile
import threading
import time
import warnings
from collections import OrderedDict
from enum import Enum
from fnmatch import fnmatch
from functools import lru_cache
from functools import wraps
from subprocess import PIPE
from subprocess import Popen
from subprocess import STDOUT
from types import FrameType
from typing import Any
from typing import Callable
from typing import cast
from typing import Collection
from typing import ContextManager
from typing import Dict
from typing import FrozenSet
from typing import IO
from typing import Iterable
from typing import Iterator
from typing import List
from typing import Literal
from typing import Mapping
from typing import NamedTuple
from typing import Optional
from typing import Sequence
from typing import Set
from typing import TextIO
from typing import Tuple
from typing import Type
from typing import TypeVar
from typing import Union
import choice
import dateutil.tz
import ldap3
import requests_cache
import service_configuration_lib
import yaml
from docker import APIClient
from docker.utils import kwargs_from_env
from kazoo.client import KazooClient
from mypy_extensions import TypedDict
from service_configuration_lib import read_extra_service_information
from service_configuration_lib import read_service_configuration
import paasta_tools.cli.fsm
# DO NOT CHANGE SPACER, UNLESS YOU'RE PREPARED TO CHANGE ALL INSTANCES
# OF IT IN OTHER LIBRARIES (i.e. service_configuration_lib).
# It's used to compose a job's full ID from its name and instance
SPACER = "."
INFRA_ZK_PATH = "/nail/etc/zookeeper_discovery/infrastructure/"
PATH_TO_SYSTEM_PAASTA_CONFIG_DIR = os.environ.get(
"PAASTA_SYSTEM_CONFIG_DIR", "/etc/paasta/"
)
DEFAULT_SOA_DIR = service_configuration_lib.DEFAULT_SOA_DIR
DEFAULT_VAULT_TOKEN_FILE = "/root/.vault_token"
AUTO_SOACONFIG_SUBDIR = "autotuned_defaults"
DEFAULT_DOCKERCFG_LOCATION = "file:///root/.dockercfg"
DEPLOY_PIPELINE_NON_DEPLOY_STEPS = (
"itest",
"itest-and-push-to-registry",
"security-check",
"push-to-registry",
)
# Default values for _log
ANY_CLUSTER = "N/A"
ANY_INSTANCE = "N/A"
DEFAULT_LOGLEVEL = "event"
no_escape = re.compile(r"\x1B\[[0-9;]*[mK]")
# instead of the convention of using underscores in this scribe channel name,
# the audit log uses dashes to prevent collisions with a service that might be
# named 'audit_log'
AUDIT_LOG_STREAM = "stream_paasta-audit-log"
DEFAULT_SYNAPSE_HAPROXY_URL_FORMAT = (
"http://{host:s}:{port:d}/;csv;norefresh;scope={scope:s}"
)
DEFAULT_CPU_PERIOD = 100000
DEFAULT_CPU_BURST_ADD = 1
DEFAULT_SOA_CONFIGS_GIT_URL = "sysgit.yelpcorp.com"
log = logging.getLogger(__name__)
log.addHandler(logging.NullHandler())
INSTANCE_TYPES = (
"paasta_native",
"adhoc",
"kubernetes",
"eks",
"tron",
"flink",
"flinkeks",
"cassandracluster",
"kafkacluster",
"monkrelays",
"nrtsearchservice",
"nrtsearchserviceeks",
)
PAASTA_K8S_INSTANCE_TYPES = {
"kubernetes",
"eks",
}
INSTANCE_TYPE_TO_K8S_NAMESPACE = {
"marathon": "paasta",
"adhoc": "paasta",
"tron": "tron",
"flink": "paasta-flinks",
"flinkeks": "paasta-flinks",
"cassandracluster": "paasta-cassandraclusters",
"kafkacluster": "paasta-kafkaclusters",
"nrtsearchservice": "paasta-nrtsearchservices",
"nrtsearchserviceeks": "paasta-nrtsearchservices",
}
SHARED_SECRETS_K8S_NAMESPACES = {"paasta-spark", "paasta-cassandraclusters"}
CAPS_DROP = [
"SETPCAP",
"MKNOD",
"AUDIT_WRITE",
"CHOWN",
"NET_RAW",
"DAC_OVERRIDE",
"FOWNER",
"FSETID",
"KILL",
"SETGID",
"SETUID",
"NET_BIND_SERVICE",
"SYS_CHROOT",
"SETFCAP",
]
class RollbackTypes(Enum):
AUTOMATIC_SLO_ROLLBACK = "automatic_slo_rollback"
AUTOMATIC_METRIC_ROLLBACK = "automatic_metric_rollback"
USER_INITIATED_ROLLBACK = "user_initiated_rollback"
class TimeCacheEntry(TypedDict):
data: Any
fetch_time: float
_CacheRetT = TypeVar("_CacheRetT")
class time_cache:
def __init__(self, ttl: float = 0) -> None:
self.configs: Dict[Tuple, TimeCacheEntry] = {}
self.ttl = ttl
def __call__(self, f: Callable[..., _CacheRetT]) -> Callable[..., _CacheRetT]:
def cache(*args: Any, **kwargs: Any) -> _CacheRetT:
if "ttl" in kwargs:
ttl = kwargs["ttl"]
del kwargs["ttl"]
else:
ttl = self.ttl
key = args
for item in kwargs.items():
key += item
if (
(not ttl)
or (key not in self.configs)
or (time.time() - self.configs[key]["fetch_time"] > ttl)
):
self.configs[key] = {
"data": f(*args, **kwargs),
"fetch_time": time.time(),
}
return self.configs[key]["data"]
return cache
_SortDictsT = TypeVar("_SortDictsT", bound=Mapping)
def sort_dicts(dcts: Iterable[_SortDictsT]) -> List[_SortDictsT]:
def key(dct: _SortDictsT) -> Tuple:
return tuple(sorted(dct.items()))
return sorted(dcts, key=key)
class InvalidInstanceConfig(Exception):
pass
DeployBlacklist = List[Tuple[str, str]]
DeployWhitelist = Optional[Tuple[str, List[str]]]
# The actual config files will have lists, since tuples are not expressible in base YAML, so we define different types
# here to represent that. The getter functions will convert to the safe versions above.
UnsafeDeployBlacklist = Optional[Sequence[Sequence[str]]]
UnsafeDeployWhitelist = Optional[Sequence[Union[str, Sequence[str]]]]
Constraint = Sequence[str]
# e.g. ['GROUP_BY', 'habitat', 2]. Tron doesn't like that so we'll convert to Constraint later.
UnstringifiedConstraint = Sequence[Union[str, int, float]]
SecurityConfigDict = Dict # Todo: define me.
class VolumeWithMode(TypedDict):
mode: str
class DockerVolume(VolumeWithMode):
hostPath: str
containerPath: str
class AwsEbsVolume(VolumeWithMode):
volume_id: str
fs_type: str
partition: int
container_path: str
class PersistentVolume(VolumeWithMode):
size: int
container_path: str
storage_class_name: str
class SecretVolumeItem(TypedDict, total=False):
key: str
path: str
mode: Union[str, int]
class SecretVolume(TypedDict, total=False):
secret_name: str
container_path: str
default_mode: Union[str, int]
items: List[SecretVolumeItem]
class ProjectedSAVolume(TypedDict, total=False):
container_path: str
audience: str
expiration_seconds: int
class TronSecretVolume(SecretVolume, total=False):
secret_volume_name: str
class MonitoringDict(TypedDict, total=False):
alert_after: Union[str, float]
check_every: str
check_oom_events: bool
component: str
description: str
notification_email: Union[str, bool]
page: bool
priority: str
project: str
realert_every: float
runbook: str
slack_channels: Union[str, List[str]]
tags: List[str]
team: str
ticket: bool
tip: str
class InstanceConfigDict(TypedDict, total=False):
deploy_group: str
mem: float
cpus: float
disk: float
cmd: str
namespace: str
args: List[str]
cfs_period_us: float
cpu_burst_add: float
cap_add: List
env: Dict[str, str]
monitoring: MonitoringDict
deploy_blacklist: UnsafeDeployBlacklist
deploy_whitelist: UnsafeDeployWhitelist
pool: str
persistent_volumes: List[PersistentVolume]
role: str
extra_volumes: List[DockerVolume]
aws_ebs_volumes: List[AwsEbsVolume]
secret_volumes: List[SecretVolume]
projected_sa_volumes: List[ProjectedSAVolume]
security: SecurityConfigDict
dependencies_reference: str
dependencies: Dict[str, Dict]
constraints: List[UnstringifiedConstraint]
extra_constraints: List[UnstringifiedConstraint]
net: str
extra_docker_args: Dict[str, str]
gpus: int
branch: str
iam_role: str
iam_role_provider: str
service: str
uses_bulkdata: bool
class BranchDictV1(TypedDict, total=False):
docker_image: str
desired_state: str
force_bounce: Optional[str]
class BranchDictV2(TypedDict):
git_sha: str
docker_image: str
image_version: Optional[str]
desired_state: str
force_bounce: Optional[str]
class DockerParameter(TypedDict):
key: str
value: str
KubeContainerResourceRequest = TypedDict(
"KubeContainerResourceRequest",
{
"cpu": float,
"memory": str,
"ephemeral-storage": str,
},
total=False,
)
def safe_deploy_blacklist(input: UnsafeDeployBlacklist) -> DeployBlacklist:
return [(t, l) for t, l in input]
def safe_deploy_whitelist(input: UnsafeDeployWhitelist) -> DeployWhitelist:
try:
location_type, allowed_values = input
return cast(str, location_type), cast(List[str], allowed_values)
except TypeError:
return None
# For mypy typing
InstanceConfig_T = TypeVar("InstanceConfig_T", bound="InstanceConfig")
class InstanceConfig:
config_filename_prefix: str
def __init__(
self,
cluster: str,
instance: str,
service: str,
config_dict: InstanceConfigDict,
branch_dict: Optional[BranchDictV2],
soa_dir: str = DEFAULT_SOA_DIR,
) -> None:
self.config_dict = config_dict
self.branch_dict = branch_dict
self.cluster = cluster
self.instance = instance
self.service = service
self.soa_dir = soa_dir
self._job_id = compose_job_id(service, instance)
config_interpolation_keys = ("deploy_group",)
interpolation_facts = self.__get_interpolation_facts()
for key in config_interpolation_keys:
if (
key in self.config_dict
and self.config_dict[key] is not None # type: ignore
):
self.config_dict[key] = self.config_dict[key].format( # type: ignore
**interpolation_facts
)
def __repr__(self) -> str:
return "{!s}({!r}, {!r}, {!r}, {!r}, {!r}, {!r})".format(
self.__class__.__name__,
self.service,
self.instance,
self.cluster,
self.config_dict,
self.branch_dict,
self.soa_dir,
)
def __get_interpolation_facts(self) -> Dict[str, str]:
return {
"cluster": self.cluster,
"instance": self.instance,
"service": self.service,
}
def get_cluster(self) -> str:
return self.cluster
def get_namespace(self) -> str:
"""Get namespace from config, default to the value from INSTANCE_TYPE_TO_K8S_NAMESPACE for this instance type, 'paasta' if that isn't defined."""
return self.config_dict.get(
"namespace",
INSTANCE_TYPE_TO_K8S_NAMESPACE.get(self.get_instance_type(), "paasta"),
)
def get_instance(self) -> str:
return self.instance
def get_service(self) -> str:
return self.service
@property
def job_id(self) -> str:
return self._job_id
def get_docker_registry(
self, system_paasta_config: Optional["SystemPaastaConfig"] = None
) -> str:
return get_service_docker_registry(
self.service, self.soa_dir, system_config=system_paasta_config
)
def get_branch(self) -> str:
return get_paasta_branch(
cluster=self.get_cluster(), instance=self.get_instance()
)
def get_deploy_group(self) -> str:
return self.config_dict.get("deploy_group", self.get_branch())
def get_team(self) -> str:
return self.config_dict.get("monitoring", {}).get("team", None)
def get_mem(self) -> float:
"""Gets the memory required from the service's configuration.
Defaults to 4096 (4G) if no value specified in the config.
:returns: The amount of memory specified by the config, 4096 if not specified"""
mem = self.config_dict.get("mem", 4096)
return mem
def get_mem_swap(self) -> str:
"""Gets the memory-swap value. This value is passed to the docker
container to ensure that the total memory limit (memory + swap) is the
same value as the 'mem' key in soa-configs. Note - this value *has* to
be >= to the mem key, so we always round up to the closest MB and add
additional 64MB for the docker executor (See PAASTA-12450).
"""
mem = self.get_mem()
mem_swap = int(math.ceil(mem + 64))
return "%sm" % mem_swap
def get_cpus(self) -> float:
"""Gets the number of cpus required from the service's configuration.
Defaults to 1 cpu if no value specified in the config.
:returns: The number of cpus specified in the config, 1 if not specified"""
cpus = self.config_dict.get("cpus", 1)
return cpus
def get_cpu_burst_add(self) -> float:
"""Returns the number of additional cpus a container is allowed to use.
Defaults to DEFAULT_CPU_BURST_ADD"""
return self.config_dict.get("cpu_burst_add", DEFAULT_CPU_BURST_ADD)
def get_cpu_period(self) -> float:
"""The --cpu-period option to be passed to docker
Comes from the cfs_period_us configuration option
:returns: The number to be passed to the --cpu-period docker flag"""
return self.config_dict.get("cfs_period_us", DEFAULT_CPU_PERIOD)
def get_cpu_quota(self) -> float:
"""Gets the --cpu-quota option to be passed to docker
Calculation: (cpus + cpus_burst_add) * cfs_period_us
:returns: The number to be passed to the --cpu-quota docker flag"""
cpu_burst_add = self.get_cpu_burst_add()
return (self.get_cpus() + cpu_burst_add) * self.get_cpu_period()
def get_extra_docker_args(self) -> Dict[str, str]:
return self.config_dict.get("extra_docker_args", {})
def get_cap_add(self) -> Iterable[DockerParameter]:
"""Get the --cap-add options to be passed to docker
Generated from the cap_add configuration option, which is a list of
capabilities.
Example configuration: {'cap_add': ['IPC_LOCK', 'SYS_PTRACE']}
:returns: A generator of cap_add options to be passed as --cap-add flags"""
for value in self.config_dict.get("cap_add", []):
yield {"key": "cap-add", "value": f"{value}"}
def get_cap_drop(self) -> Iterable[DockerParameter]:
"""Generates --cap-drop options to be passed to docker by default, which
makes them not able to perform special privilege escalation stuff
https://docs.docker.com/engine/reference/run/#runtime-privilege-and-linux-capabilities
"""
for cap in CAPS_DROP:
yield {"key": "cap-drop", "value": cap}
def get_cap_args(self) -> Iterable[DockerParameter]:
"""Generate all --cap-add/--cap-drop parameters, ensuring not to have overlapping settings"""
cap_adds = list(self.get_cap_add())
if cap_adds and is_using_unprivileged_containers():
log.warning(
"Unprivileged containerizer detected, adding capabilities will not work properly"
)
yield from cap_adds
added_caps = [cap["value"] for cap in cap_adds]
for cap in self.get_cap_drop():
if cap["value"] not in added_caps:
yield cap
def format_docker_parameters(
self,
with_labels: bool = True,
system_paasta_config: Optional["SystemPaastaConfig"] = None,
) -> List[DockerParameter]:
"""Formats extra flags for running docker. Will be added in the format
`["--%s=%s" % (e['key'], e['value']) for e in list]` to the `docker run` command
Note: values must be strings
:param with_labels: Whether to build docker parameters with or without labels
:returns: A list of parameters to be added to docker run"""
parameters: List[DockerParameter] = [
{"key": "memory-swap", "value": self.get_mem_swap()},
{"key": "cpu-period", "value": "%s" % int(self.get_cpu_period())},
{"key": "cpu-quota", "value": "%s" % int(self.get_cpu_quota())},
]
if self.use_docker_disk_quota(system_paasta_config=system_paasta_config):
parameters.append(
{
"key": "storage-opt",
"value": f"size={int(self.get_disk() * 1024 * 1024)}",
}
)
if with_labels:
parameters.extend(
[
{"key": "label", "value": "paasta_service=%s" % self.service},
{"key": "label", "value": "paasta_instance=%s" % self.instance},
]
)
extra_docker_args = self.get_extra_docker_args()
if extra_docker_args:
for key, value in extra_docker_args.items():
parameters.extend([{"key": key, "value": value}])
parameters.extend(self.get_docker_init())
parameters.extend(self.get_cap_args())
return parameters
def use_docker_disk_quota(
self, system_paasta_config: Optional["SystemPaastaConfig"] = None
) -> bool:
if system_paasta_config is None:
system_paasta_config = load_system_paasta_config()
return system_paasta_config.get_enforce_disk_quota()
def get_docker_init(self) -> Iterable[DockerParameter]:
return [{"key": "init", "value": "true"}]
def get_disk(self, default: float = 1024) -> float:
"""Gets the amount of disk space in MiB required from the service's configuration.
Defaults to 1024 (1GiB) if no value is specified in the config.
:returns: The amount of disk space specified by the config, 1024 MiB if not specified
"""
disk = self.config_dict.get("disk", default)
return disk
def get_gpus(self) -> Optional[int]:
"""Gets the number of gpus required from the service's configuration.
Default to None if no value is specified in the config.
:returns: The number of gpus specified by the config, 0 if not specified"""
gpus = self.config_dict.get("gpus", None)
return gpus
def get_container_type(self) -> Optional[str]:
"""Get Mesos containerizer type.
Default to DOCKER if gpus are not used.
:returns: Mesos containerizer type, DOCKER or MESOS"""
if self.get_gpus() is not None:
container_type = "MESOS"
else:
container_type = "DOCKER"
return container_type
def get_cmd(self) -> Optional[Union[str, List[str]]]:
"""Get the docker cmd specified in the service's configuration.
Defaults to None if not specified in the config.
:returns: A string specified in the config, None if not specified"""
return self.config_dict.get("cmd", None)
def get_instance_type(self) -> Optional[str]:
return getattr(self, "config_filename_prefix", None)
def get_env_dictionary(
self, system_paasta_config: Optional["SystemPaastaConfig"] = None
) -> Dict[str, str]:
"""A dictionary of key/value pairs that represent environment variables
to be injected to the container environment"""
env = {
"PAASTA_SERVICE": self.service,
"PAASTA_INSTANCE": self.instance,
"PAASTA_CLUSTER": self.cluster,
"PAASTA_DEPLOY_GROUP": self.get_deploy_group(),
"PAASTA_DOCKER_IMAGE": self.get_docker_image(),
"PAASTA_RESOURCE_CPUS": str(self.get_cpus()),
"PAASTA_RESOURCE_MEM": str(self.get_mem()),
"PAASTA_RESOURCE_DISK": str(self.get_disk()),
}
if self.get_gpus() is not None:
env["PAASTA_RESOURCE_GPUS"] = str(self.get_gpus())
try:
env["PAASTA_GIT_SHA"] = get_git_sha_from_dockerurl(
self.get_docker_url(system_paasta_config=system_paasta_config)
)
except Exception:
pass
image_version = self.get_image_version()
if image_version is not None:
env["PAASTA_IMAGE_VERSION"] = image_version
team = self.get_team()
if team:
env["PAASTA_MONITORING_TEAM"] = team
instance_type = self.get_instance_type()
if instance_type:
env["PAASTA_INSTANCE_TYPE"] = instance_type
user_env = self.config_dict.get("env", {})
env.update(user_env)
return {str(k): str(v) for (k, v) in env.items()}
def get_env(
self, system_paasta_config: Optional["SystemPaastaConfig"] = None
) -> Dict[str, str]:
"""Basic get_env that simply returns the basic env, other classes
might need to override this getter for more implementation-specific
env getting"""
return self.get_env_dictionary(system_paasta_config=system_paasta_config)
def get_args(self) -> Optional[List[str]]:
"""Get the docker args specified in the service's configuration.
If not specified in the config and if cmd is not specified, defaults to an empty array.
If not specified in the config but cmd is specified, defaults to null.
If specified in the config and if cmd is also specified, throws an exception. Only one may be specified.
:param service_config: The service instance's configuration dictionary
:returns: An array of args specified in the config,
``[]`` if not specified and if cmd is not specified,
otherwise None if not specified but cmd is specified"""
if self.get_cmd() is None:
return self.config_dict.get("args", [])
else:
args = self.config_dict.get("args", None)
if args is None:
return args
else:
# TODO validation stuff like this should be moved into a check_*
raise InvalidInstanceConfig(
"Instance configuration can specify cmd or args, but not both."
)
def get_monitoring(self) -> MonitoringDict:
"""Get monitoring overrides defined for the given instance"""
return self.config_dict.get("monitoring", {})
def get_deploy_constraints(
self,
blacklist: DeployBlacklist,
whitelist: DeployWhitelist,
system_deploy_blacklist: DeployBlacklist,
system_deploy_whitelist: DeployWhitelist,
) -> List[Constraint]:
"""Return the combination of deploy_blacklist and deploy_whitelist
as a list of constraints.
"""
return (
deploy_blacklist_to_constraints(blacklist)
+ deploy_whitelist_to_constraints(whitelist)
+ deploy_blacklist_to_constraints(system_deploy_blacklist)
+ deploy_whitelist_to_constraints(system_deploy_whitelist)
)
def get_deploy_blacklist(self) -> DeployBlacklist:
"""The deploy blacklist is a list of lists, where the lists indicate
which locations the service should not be deployed"""
return safe_deploy_blacklist(self.config_dict.get("deploy_blacklist", []))
def get_deploy_whitelist(self) -> DeployWhitelist:
"""The deploy whitelist is a tuple of (location_type, [allowed value, allowed value, ...]).
To have tasks scheduled on it, a host must be covered by the deploy whitelist (if present) and not excluded by
the deploy blacklist."""
return safe_deploy_whitelist(self.config_dict.get("deploy_whitelist"))
def get_docker_image(self) -> str:
"""Get the docker image name (with tag) for a given service branch from
a generated deployments.json file."""
if self.branch_dict is not None:
return self.branch_dict["docker_image"]
else:
return ""
def get_image_version(self) -> Optional[str]:
"""Get additional information identifying the Docker image from a
generated deployments.json file."""
if self.branch_dict is not None and "image_version" in self.branch_dict:
return self.branch_dict["image_version"]
else:
return None
def get_docker_url(
self, system_paasta_config: Optional["SystemPaastaConfig"] = None
) -> str:
"""Compose the docker url.
:returns: '<registry_uri>/<docker_image>'
"""
registry_uri = self.get_docker_registry(
system_paasta_config=system_paasta_config
)
docker_image = self.get_docker_image()
if not docker_image:
raise NoDockerImageError(
"Docker url not available because there is no docker_image"
)
docker_url = f"{registry_uri}/{docker_image}"
return docker_url
def get_desired_state(self) -> str:
"""Get the desired state (either 'start' or 'stop') for a given service
branch from a generated deployments.json file."""
if self.branch_dict is not None:
return self.branch_dict["desired_state"]
else:
return "start"
def get_force_bounce(self) -> Optional[str]:
"""Get the force_bounce token for a given service branch from a generated
deployments.json file. This is a token that, when changed, indicates that
the instance should be recreated and bounced, even if no other
parameters have changed. This may be None or a string, generally a
timestamp.
"""
if self.branch_dict is not None:
return self.branch_dict["force_bounce"]
else:
return None
def check_cpus(self) -> Tuple[bool, str]:
cpus = self.get_cpus()
if cpus is not None:
if not isinstance(cpus, (float, int)):
return (
False,
'The specified cpus value "%s" is not a valid float or int.' % cpus,
)
return True, ""
def check_mem(self) -> Tuple[bool, str]:
mem = self.get_mem()
if mem is not None:
if not isinstance(mem, (float, int)):
return (
False,
'The specified mem value "%s" is not a valid float or int.' % mem,
)
return True, ""
def check_disk(self) -> Tuple[bool, str]:
disk = self.get_disk()
if disk is not None:
if not isinstance(disk, (float, int)):
return (
False,
'The specified disk value "%s" is not a valid float or int.' % disk,
)
return True, ""
def check_security(self) -> Tuple[bool, str]:
security = self.config_dict.get("security")
if security is None:
return True, ""
outbound_firewall = security.get("outbound_firewall")
if outbound_firewall is None:
return True, ""
if outbound_firewall is not None and outbound_firewall not in (
"block",
"monitor",
):
return (
False,
'Unrecognized outbound_firewall value "%s"' % outbound_firewall,
)
unknown_keys = set(security.keys()) - {
"outbound_firewall",
}
if unknown_keys:
return (
False,
'Unrecognized items in security dict of service config: "%s"'
% ",".join(unknown_keys),
)
return True, ""
def check_dependencies_reference(self) -> Tuple[bool, str]:
dependencies_reference = self.config_dict.get("dependencies_reference")
if dependencies_reference is None:
return True, ""
dependencies = self.config_dict.get("dependencies")
if dependencies is None:
return (
False,
'dependencies_reference "%s" declared but no dependencies found'
% dependencies_reference,
)
if dependencies_reference not in dependencies:
return (
False,
'dependencies_reference "%s" not found in dependencies dictionary'
% dependencies_reference,
)
return True, ""
def check(self, param: str) -> Tuple[bool, str]:
check_methods = {
"cpus": self.check_cpus,
"mem": self.check_mem,
"security": self.check_security,
"dependencies_reference": self.check_dependencies_reference,
"deploy_group": self.check_deploy_group,
}
check_method = check_methods.get(param)
if check_method is not None:
return check_method()
else:
return (
False,
'Your service config specifies "%s", an unsupported parameter.' % param,
)
def validate(
self,
params: Optional[List[str]] = None,
) -> List[str]:
if params is None:
params = [
"cpus",
"mem",
"security",
"dependencies_reference",
"deploy_group",
]
error_msgs = []
for param in params:
check_passed, check_msg = self.check(param)
if not check_passed:
error_msgs.append(check_msg)
return error_msgs
def check_deploy_group(self) -> Tuple[bool, str]:
deploy_group = self.get_deploy_group()
if deploy_group is not None:
pipeline_deploy_groups = get_pipeline_deploy_groups(
service=self.service, soa_dir=self.soa_dir
)
if deploy_group not in pipeline_deploy_groups:
return (
False,
f"{self.service}.{self.instance} uses deploy_group {deploy_group}, but {deploy_group} is not deployed to in deploy.yaml",
) # noqa: E501
return True, ""
def get_extra_volumes(self) -> List[DockerVolume]:
"""Extra volumes are a specially formatted list of dictionaries that should
be bind mounted in a container The format of the dictionaries should
conform to the `Mesos container volumes spec
<https://mesosphere.github.io/marathon/docs/native-docker.html>`_"""
return self.config_dict.get("extra_volumes", [])
def get_aws_ebs_volumes(self) -> List[AwsEbsVolume]:
return self.config_dict.get("aws_ebs_volumes", [])
def get_secret_volumes(self) -> List[SecretVolume]:
return self.config_dict.get("secret_volumes", [])
def get_projected_sa_volumes(self) -> List[ProjectedSAVolume]:
return self.config_dict.get("projected_sa_volumes", [])
def get_iam_role(self) -> str:
return self.config_dict.get("iam_role", "")
def get_iam_role_provider(self) -> str:
return self.config_dict.get("iam_role_provider", "aws")
def get_role(self) -> Optional[str]:
"""Which mesos role of nodes this job should run on."""
return self.config_dict.get("role")
def get_pool(self) -> str:
"""Which pool of nodes this job should run on. This can be used to mitigate noisy neighbors, by putting
particularly noisy or noise-sensitive jobs into different pools.
This is implemented with an attribute "pool" on each mesos slave and by adding a constraint or node selector.
Eventually this may be implemented with Mesos roles, once a framework can register under multiple roles.
:returns: the "pool" attribute in your config dict, or the string "default" if not specified.
"""
return self.config_dict.get("pool", "default")
def get_pool_constraints(self) -> List[Constraint]:
pool = self.get_pool()
return [["pool", "LIKE", pool]]
def get_constraints(self) -> Optional[List[Constraint]]:
return stringify_constraints(self.config_dict.get("constraints", None))
def get_extra_constraints(self) -> List[Constraint]:
return stringify_constraints(self.config_dict.get("extra_constraints", []))
def get_net(self) -> str:
"""
:returns: the docker networking mode the container should be started with.
"""
return self.config_dict.get("net", "bridge")