Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[sonic-utilities] Update submodule #5828

Closed
8 changes: 5 additions & 3 deletions build_debian.sh
Original file line number Diff line number Diff line change
Expand Up @@ -413,13 +413,15 @@ done < files/image_config/sysctl/sysctl-net.conf

sudo augtool --autosave "$sysctl_net_cmd_string" -r $FILESYSTEM_ROOT

## docker Python API package is needed by Ansible docker module
sudo https_proxy=$https_proxy LANG=C chroot $FILESYSTEM_ROOT pip install 'docker==4.1.0'
# docker Python API package is needed by Ansible docker module as well as some SONiC applications
sudo https_proxy=$https_proxy LANG=C chroot $FILESYSTEM_ROOT pip2 install 'docker==4.1.0'
sudo https_proxy=$https_proxy LANG=C chroot $FILESYSTEM_ROOT pip3 install 'docker==4.3.1'

## Note: keep pip installed for maintainance purpose

## Get gcc and python dev pkgs
sudo LANG=C DEBIAN_FRONTEND=noninteractive chroot $FILESYSTEM_ROOT apt-get -y install gcc libpython2.7-dev
sudo https_proxy=$https_proxy LANG=C chroot $FILESYSTEM_ROOT pip install 'netifaces==0.10.7'
sudo https_proxy=$https_proxy LANG=C chroot $FILESYSTEM_ROOT pip2 install 'netifaces==0.10.7'

## Create /var/run/redis folder for docker-database to mount
sudo mkdir -p $FILESYSTEM_ROOT/var/run/redis
Expand Down
10 changes: 8 additions & 2 deletions dockers/docker-orchagent/enable_counters.py
Original file line number Diff line number Diff line change
@@ -1,13 +1,16 @@
#!/usr/bin/env python
#!/usr/bin/env python3

import swsssdk
import time

import swsssdk


def enable_counter_group(db, name):
info = {}
info['FLEX_COUNTER_STATUS'] = 'enable'
db.mod_entry("FLEX_COUNTER_TABLE", name, info)


def enable_counters():
db = swsssdk.ConfigDBConnector()
db.connect()
Expand All @@ -20,10 +23,12 @@ def enable_counters():
enable_counter_group(db, 'BUFFER_POOL_WATERMARK')
enable_counter_group(db, 'PORT_BUFFER_DROP')


def get_uptime():
with open('/proc/uptime') as fp:
return float(fp.read().split(' ')[0])


def main():
# If the switch was just started (uptime less than 5 minutes),
# wait for 3 minutes and enable counters
Expand All @@ -35,5 +40,6 @@ def main():
time.sleep(60)
enable_counters()


if __name__ == '__main__':
main()
2 changes: 2 additions & 0 deletions files/build_templates/sonic_debian_extension.j2
Original file line number Diff line number Diff line change
Expand Up @@ -339,7 +339,9 @@ sudo chmod og-rw $FILESYSTEM_ROOT_ETC_SONIC/core_analyzer.rc.json

sudo LANG=C DEBIAN_FRONTEND=noninteractive chroot $FILESYSTEM_ROOT apt-get -y install libffi-dev libssl-dev
sudo https_proxy=$https_proxy LANG=C chroot $FILESYSTEM_ROOT pip2 install azure-storage==0.36.0
sudo https_proxy=$https_proxy LANG=C chroot $FILESYSTEM_ROOT pip3 install azure-storage==0.36.0
sudo https_proxy=$https_proxy LANG=C chroot $FILESYSTEM_ROOT pip2 install watchdog==0.10.2
sudo https_proxy=$https_proxy LANG=C chroot $FILESYSTEM_ROOT pip3 install watchdog==0.10.3
sudo https_proxy=$https_proxy LANG=C chroot $FILESYSTEM_ROOT pip2 install futures==3.3.0

{% if include_kubernetes == "y" %}
Expand Down
75 changes: 28 additions & 47 deletions files/image_config/corefile_uploader/core_uploader.py
Original file line number Diff line number Diff line change
@@ -1,25 +1,19 @@
#!/usr/bin/env python
#!/usr/bin/env python3

import json
import os
import time
import tarfile
import socket
import tarfile
import time

import yaml
import json
import syslog
from azure.storage.file import FileService
from sonic_py_common.logger import Logger
from watchdog.observers import Observer
from watchdog.events import FileSystemEventHandler
from azure.storage.file import FileService

global CORE_FILE_PATH, RC_FILE
global hostname, sonicversion, asicname, acctname, acctkey, sharename, cwd
global INIT_CWD
global log_level
global this_file

this_file = os.path.basename(__file__)
SYSLOG_IDENTIFIER = os.path.basename(__file__)

global cfg
cfg = ""

CORE_FILE_PATH = "/var/core/"
Expand All @@ -42,55 +36,40 @@
MAX_RETRIES = 5
UPLOAD_PREFIX = "UPLOADED_"

log_level = syslog.LOG_DEBUG

def log_msg(lvl, fname, m):
if (lvl <= log_level):
syslog.syslog(lvl, "{}: {}".format(fname, m))

if log_level == syslog.LOG_DEBUG:
print("{}: {}".format(fname, m))

def log_err(m):
log_msg(syslog.LOG_ERR, this_file, m)

def log_info(m):
log_msg(syslog.LOG_INFO, this_file, m)

def log_warn(m):
log_msg(syslog.LOG_WARNING, this_file, m)

def log_debug(m):
log_msg(syslog.LOG_DEBUG, this_file, m)
# Global logger instance
logger = Logger(SYSLOG_IDENTIFIER)
logger.set_min_log_priority_info()


def make_new_dir(p):
os.system("rm -rf " + p)
os.system("mkdir -p " + p)


def parse_a_json(data, prefix, val):
for i in data:
if type(data[i]) == dict:
parse_a_json(data[i], prefix + (i,), val)
else:
val[prefix + (i,)] = data[i]


class config:
parsed_data = {}
cfg_data = {}

def __init__(self):
while not os.path.exists(RC_FILE):
# Wait here until service restart
log_err("Unable to retrieve Azure storage credentials")
logger.log_error("Unable to retrieve Azure storage credentials")
time.sleep (HOURS_4)

with open(RC_FILE, 'r') as f:
self.parsed_data = json.load(f)
parse_a_json(self.parsed_data, (), self.cfg_data)

def get_data(self, k):
return self.cfg_data[k] if self.cfg_data.has_key(k) else ""
return self.cfg_data[k] if k in self.cfg_data else ""

def get_dict(self):
return self.parsed_data
Expand Down Expand Up @@ -123,15 +102,17 @@ def run(self):
time.sleep(POLL_SLEEP)
except:
self.observer.stop()
log_err("Error in watcher")
logger.log_error("Error in watcher")

self.observer.join()


def set_env(lst):
for k in lst:
if lst[k]:
os.environ[k] = lst[k]
log_debug("set env {} = {}".format(k, lst[k]))
logger.log_debug("set env {} = {}".format(k, lst[k]))


class Handler(FileSystemEventHandler):

Expand All @@ -155,7 +136,7 @@ def init():
if not acctname or not acctkey or not sharename:
while True:
# Wait here until service restart
log_err("Unable to retrieve Azure storage credentials")
logger.log_error("Unable to retrieve Azure storage credentials")
time.sleep (HOURS_4)

with open("/etc/sonic/sonic_version.yml", 'r') as stream:
Expand All @@ -182,7 +163,7 @@ def on_any_event(event):

elif event.event_type == 'created':
# Take any action here when a file is first created.
log_debug("Received create event - " + event.src_path)
logger.log_debug("Received create event - " + event.src_path)
Handler.wait_for_file_write_complete(event.src_path)
Handler.handle_file(event.src_path)

Expand All @@ -205,7 +186,7 @@ def wait_for_file_write_complete(path):
raise Exception("Dump file creation is too slow: " + path)
# Give up as something is terribly wrong with this file.

log_debug("File write complete - " + path)
logger.log_debug("File write complete - " + path)


@staticmethod
Expand All @@ -227,11 +208,11 @@ def handle_file(path):
tar.add(metafiles[e])
tar.add(path)
tar.close()
log_debug("Tar file for upload created: " + tarf_name)
logger.log_debug("Tar file for upload created: " + tarf_name)

Handler.upload_file(tarf_name, tarf_name, path)

log_debug("File uploaded - " + path)
logger.log_debug("File uploaded - " + path)
os.chdir(INIT_CWD)

@staticmethod
Expand All @@ -250,16 +231,16 @@ def upload_file(fname, fpath, coref):
e.append(l[len(e)])
svc.create_directory(sharename, "/".join(e))

log_debug("Remote dir created: " + "/".join(e))
logger.log_debug("Remote dir created: " + "/".join(e))

svc.create_file_from_path(sharename, "/".join(l), fname, fpath)
log_debug("Remote file created: name{} path{}".format(fname, fpath))
logger.log_debug("Remote file created: name{} path{}".format(fname, fpath))
newcoref = os.path.dirname(coref) + "/" + UPLOAD_PREFIX + os.path.basename(coref)
os.rename(coref, newcoref)
break

except Exception as ex:
log_err("core uploader failed: Failed during upload (" + coref + ") err: ("+ str(ex) +") retry:" + str(i))
logger.log_error("core uploader failed: Failed during upload (" + coref + ") err: ("+ str(ex) +") retry:" + str(i))
if not os.path.exists(fpath):
break
i += 1
Expand All @@ -281,5 +262,5 @@ def scan():
Handler.scan()
w.run()
except Exception as e:
log_err("core uploader failed: " + str(e) + " Exiting ...")
logger.log_err("core uploader failed: " + str(e) + " Exiting ...")

5 changes: 2 additions & 3 deletions files/image_config/misc/docker-wait-any
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
#!/usr/bin/env python
#!/usr/bin/env python3

"""
docker-wait-any
Expand All @@ -23,7 +23,7 @@
cases where we need the dependent container to be warm-restarted without
affecting other services (eg: warm restart of teamd service)

NOTE: This script is written against docker Python package 4.1.0. Newer
NOTE: This script is written against docker Python package 4.3.1. Newer
versions of docker may have a different API.
"""
import argparse
Expand Down Expand Up @@ -68,7 +68,6 @@ def main():
docker_client = APIClient(base_url='unix://var/run/docker.sock')

parser = argparse.ArgumentParser(description='Wait for dependent docker services',
version='1.0.0',
formatter_class=argparse.RawTextHelpFormatter,
epilog="""
Examples:
Expand Down
20 changes: 12 additions & 8 deletions src/sonic-device-data/tests/config_checker
Original file line number Diff line number Diff line change
@@ -1,20 +1,23 @@
#!/usr/bin/env python
#!/usr/bin/env python3

import glob
import re
import sys
import glob

permitted_properties = []


def usage():
print "Usage: " + sys.argv[0] + " <config_file>"
print("Usage: " + sys.argv[0] + " <config_file>")
sys.exit(1)

def check_property(p):

def check_property(p):
if p in permitted_properties:
return True
return False


def check_file(file_name):
try:
file_ok = True
Expand Down Expand Up @@ -58,14 +61,14 @@ def check_file(file_name):

if not check_property(p):
file_ok = False
print("[line %d] Error: %s is not permitted" % (lineno, p))
print("[line {}] Error: {} is not permitted".format(lineno, p))
if file_ok:
print "Result: " + file_name + " PASSED the config check!"
print("Result: " + file_name + " PASSED the config check!")
else:
print "Result: " + file_name + " FAILED the config check!"
print("Result: " + file_name + " FAILED the config check!")
return file_ok
except IOError:
print "Error: Cannot open file " + file_name
print("Error: Cannot open file " + file_name)
return False


Expand Down Expand Up @@ -93,5 +96,6 @@ def main(argv):
if not all_good:
sys.exit(-1)


if __name__ == "__main__":
main(sys.argv[1:])
Loading